text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# Manage the EISCAT data
## Download and load eiscat data
EISCAT provides three kinds of data file formats. They are **mat**, **eiscat-hdf5**, and **madrigal-hdf5**. Currently, [GeospaceLab](https://github.com/JouleCai/geospacelab) supports the **eiscat-hdf5** and **madrigal-hdf5** files.
The package can download and load the data files (`load_mode="AUTO"`) from [EISCAT](https://madrigal.eiscat.se/madrigal/), or load the local files (`load_mode="asigned"` or `"dialog"`).
### Automatically download and load the data
#### Step 1. Import modules and create a EISCATDashboard object.
```
import datetime
import matplotlib.pyplot as plt
%matplotlib widget
import geospacelab.express.eiscat_dashboard as eiscat
# settings
dt_fr = datetime.datetime.strptime('20201209' + '1800', '%Y%m%d%H%M')
dt_to = datetime.datetime.strptime('20201210' + '0600', '%Y%m%d%H%M')
site = 'UHF' # facility attributes required, check from the eiscat schedule page
antenna = 'UHF'
modulation = '60'
load_mode = 'AUTO'
data_file_type = 'eiscat-hdf5' # or 'madrigal-hdf5'
dashboard = eiscat.EISCATDashboard(
dt_fr, dt_to,
site=site, antenna=antenna, modulation=modulation,
data_file_type=data_file_type, load_mode=load_mode, status_control=False,
residual_control=False
)
```
##### Some post processing actions ...
```
# Check the beams
dashboard.check_beams()
# Slect beams, useful for multi-beam scanning
dashboard.select_beams(field_aligned=True)
# Mast bad values (status and residuals)
dashboard.status_mask()
```
#### Step 2. Assign variables from the EISCAT dataset
The returns are the GeospaceLab Variable objects.
```
n_e = dashboard.assign_variable('n_e')
T_i = dashboard.assign_variable('T_i')
T_e = dashboard.assign_variable('T_e')
v_i = dashboard.assign_variable('v_i_los')
az = dashboard.assign_variable('AZ')
el = dashboard.assign_variable('EL')
ptx = dashboard.assign_variable('P_Tx')
tsys = dashboard.assign_variable('T_SYS_1')
```
#### Step 3. Get values
```
n_e_arr = n_e.value
```
## Visualize the EISCAT data
### Set the panel layout and draw the plots.
```
layout = [[n_e], [T_e], [T_i], [v_i], [az, [el], [ptx], [tsys]]]
dashboard.set_layout(panel_layouts=layout, )
dashboard.draw()
```
### Utilities.
#### Add a title
```
dashboard.add_title()
```
#### Add panel labels
```
dashboard.add_panel_labels()
```
#### Add verticle lines and shadings as indicators
```
# add vertical lines
dt_fr_2 = datetime.datetime.strptime('20201209' + '2030', "%Y%m%d%H%M")
dt_to_2 = datetime.datetime.strptime('20201210' + '0130', "%Y%m%d%H%M")
dashboard.add_vertical_line(dt_fr_2, bottom_extend=0, top_extend=0.02, label='Line 1', label_position='top')
# add shading
dashboard.add_shading(dt_fr_2, dt_to_2, bottom_extend=0, top_extend=0.02, label='Shading 1', label_position='top')
# add top bar
dt_fr_3 = datetime.datetime.strptime('20201210' + '0130', "%Y%m%d%H%M")
dt_to_3 = datetime.datetime.strptime('20201210' + '0430', "%Y%m%d%H%M")
dashboard.add_top_bar(dt_fr_3, dt_to_3, bottom=0., top=0.02, label='Top bar 1')
```
### List all the datasets
```
dashboard.list_sourced_datasets()
```
| github_jupyter |
```
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
np.set_printoptions(precision=2)
import normal_pkg.normal as nor
import normal_pkg.adaptation as ada
from normal_pkg.adaptation import CholeskyModule
```
# Fix the convergence of Proximal algorithm
```
results_dir = 'normal_pkg/normal_results'
for k in [1]:
nsteps = 300
init = 'natural'
for intervention in ['cause', 'effect']:
plotname = f'{intervention}_{init}_k={k}'
filepath = os.path.join(results_dir, plotname + '.pkl')
if os.path.isfile(filepath):
with open(filepath, 'rb') as fin:
results = pickle.load(fin)
print(results[-2])
for r in results[:]:
# print('lr={lr}'.format(r))
plt.plot(r['kl_causal_average'].mean(1), label=r['lr'])
plt.legend()
plt.grid()
for r in results[:]:
# print('lr={lr}'.format(r))
plt.plot(r['scoredist_causal_average'].mean(1), label=r['lr'])
plt.legend()
plt.grid()
print(r['loss'].shape)
print(np.sum(np.isnan(r['loss'])))
from scipy.signal import savgol_filter
for r in results[:]:
y = r['loss'].mean(1)
y = savgol_filter(y, 51, 3)
plt.plot(y , label=r['lr'])
plt.legend()
plt.grid()
```
One way to debug is to plot all contributions.
```
models = r['models'][0] # models for one experiment
direction = 'causal'
endpoint = models[0][direction]
target = models[1][direction]
print('endpoint ',endpoint)
print('target ',target)
def cholesky_diff(p0, p1):
z0, L0 = p0.joint_parameters()
z1, L1 = p1.joint_parameters()
return z0-z1, L0-L1
import torch
torch.set_printoptions(precision=2, linewidth=200, profile='short')
np.set_printoptions(precision=2, linewidth=200)
u, v = cholesky_diff(target, endpoint)
print(u.detach().numpy())
print(v.detach().numpy())
print('KL')
print(ada.cholesky_kl(target, endpoint, decompose=True))
print(-torch.sum(torch.log(torch.diag(endpoint.joint_parameters()[1]))))
print('\n Distances')
print('total ',target.dist(endpoint))
print('vector ',torch.sum(u**2))
print('diags ',torch.sum(torch.diag(v)**2))
print('not diag ', torch.sum((v - torch.diag(torch.diag(v)))**2))
```
# Fix Priors
show me the average joint distributions
```
k = 5
xx = np.random.randn(10000,k,k) / np.sqrt(2*k)
np.mean(xx @ np.swapaxes(xx,1,2), 0)
def print_means(dim,mode, n=500):
allo = []
for t in range(n):
p = nor.sample_natural(dim,mode)
p = p.to_joint()
m = p.to_mean()
hey = [p.eta, m.mean, p.precision, m.cov]
if t==0:
print('\n'.join([str(h) for h in hey]))
hey = [np.abs(u) for u in hey]
allo += [hey]
everybody = [np.mean([m[i] for m in allo], 0) for i in range(4)]
print('dim=',dim, mode)
for i, name in enumerate(['eta','mean','precision','covariance']):
print(name)
print(everybody[i])
means = print_means(2,'conjugate')
print_means(2,'naive')
```
# Intervention interpolation
```
from collections import defaultdict
from normal_pkg.distances import intervention_distances
def interpolation_behavior(intervention, d=10, n=1000, alpha=.2):
# interpolation = np.logspace(-2, 0, n)
interpolation = np.linspace(0.001,1,n)
dist = intervention_distances(d, n, intervention=intervention, interpolation=interpolation)
dist = {key:np.array(item) for key, item in dist.items()}
# for unit in ['nat','cho']:
# plt.figure()
# # plt.xscale('log')
# for name in ['causal','anti','joint']:
# key = name+'_'+unit
# if key in dist:
# plt.scatter(interpolation, dist[key], label=name, alpha=alpha)
# plt.ylabel(unit+' distance')
# plt.grid()
# plt.legend()
plt.figure()
# plt.xscale('log')
for unit in ['nat','cho']:
plt.scatter(interpolation, dist['anti_'+unit]/dist['causal_'+unit], label=unit, alpha=alpha)
plt.ylabel('ratio anti/causal')
plt.grid()
plt.legend()
plt.show()
interpolation_behavior('cause')
interpolation_behavior('effect')
```
NOTE: both curves do not start at 0 because I am still making variables independent
```
interpolation_behavior('mechanism')
for inter in ['cause','effect','mechanism']:
for d in [3,10,30]:
print('dimension ', d, inter)
interpolation_behavior(inter,d,400)
```
| github_jupyter |
# Introdução ao Python - Ana Beatriz Macedo<img src="https://octocat-generator-assets.githubusercontent.com/my-octocat-1626096942740.png" width="324" height="324" align="right">
## Link para download: https://github.com/AnabeatrizMacedo241/Python-101
## Github: https://github.com/AnabeatrizMacedo241
## Linkedin: www.linkedin.com/in/anabeatriz-macedo
<img src="https://cdn.jsdelivr.net/gh/devicons/devicon/icons/python/python-original.svg" alt="rails" width='150' height='150' style='max-width: 100%;'></img>

## Nessa oitava parte veremos:
- Filas
- Pilhas
### Filas
Em uma fila, seja ela do ônibus ou mercado, o primeiro da fila será o primeiro a sair. Faz sentido, não é? O termo de Filas em programação é igual, chamamos de `FIFO`: first in, first out. Ela servirá como uma estrutura de armazenamento de dados temporária.
```
#Um exemplo simples
fila = []
fila.append('Ana')
fila.append('Maria')
fila.append('Carlos')
fila.append('Marcelo')
fila
#Como 'Ana' foi a primeira a entrar, ela seria a primeira a sair.
fila.pop() #Ana
print(fila)
fila.pop() #Maria
print(fila)
```
Não foi isso que esperavamos... Isso acontece, porque listas não as melhores estruturas para criar Filas. O recomendado seria usar **deques**.
Suas operações principais são:
- `enqueue`: para inserir na fila
- `dequeue`: retirar da fila
```
#Exemplo
class Fila(object):
def __init__(self):
self.dados = []
def insere(self, elemento):
self.dados.append(elemento)
def retira(self):
return self.dados.pop()
def vazia(self):
return len(self.dados) == 0
class FilaDeque:
Capacidade = 25 #Definindo um número exato para armazenamento de dados
def __init__(self):
self.dados = [None] * FilaDeque.Capacidade
self.size = 0
self.front = 0
def __len__(self): #Quantidade de infos
return self.size
def vazia(self): #Chaca se está vazia
return self.size == 0
def primeiro(self):
if (self.vazia()): #Retorna o primeiro elemento
raise EmptyQueueException('A Fila está vazia')
return self.dados[self.front]
def dequeue(self):
if (self.vazia()): #Remove o primeiro elemento
raise EmptyQueueException('A Fila está vazia')
answer = self.dados[self.front]
self.dados[self.front] = None
self.front = (self.front + 1) % len(self.dados)
self.size -= 1
return answer
def enqueue(self, elemento): #Adiciona elementos
if (self.size == len(self.dados)):
self.resize(2 * len(self.dados))
avail = (self.front + self.size) % len(self.dados)
self.dados[avail] = elemento
self.size += 1
def __str__(self): #Retorna todos os valores
return str(self.dados)
Elementos = FilaDeque()
Elementos.enqueue(10)
Elementos.enqueue(9)
Elementos.enqueue(8)
print(Elementos.dequeue()) #10 é retirado e depois o 9 (FIFO)
print(Elementos.dequeue())
Elementos.enqueue(7)
print(Elementos) #25 espaços, apenas 8 e 7 restam em suas posições de entrada.
Elementos.vazia()
```
### Pilhas
Um exemplo do cotidiano para explicar **Pilhas** seriam pilhas de papéis, por exemplo. Quando vamos empilhando papéis, o último a ser colocado, será o priemiro a sair por ele estar em cima. Esse termo em programação seria `LIFO`: last in, first out.
Suas operações principais são:
- `push`: para inserir no topo da pilha
- `pop`: retirar do topo
- `top`: checar qual o elemento que está no topo
```
#Exemplo
class Pilha(object):
def __init__(self):
self.dados = []
def empilha(self, elemento):
self.dados.append(elemento)
def desempilha(self):
if not self.vazia():
return self.dados.pop()
def vazia(self):
return len(self.dados) == 0
```
Onde as pilhas são usadas?
Podem ser usadas em gerenciamento de chamadas de função de um programa com a finalidade de manter informações sobre as funções de um programa que estejam ativas, aguardando serem terminadas.
```
class Pilha:
def __init__(self):
self.dados = [] #Cria o armazenamento
def vazio(self):
return len(self.dados)==0 #Verifica se está vazio
def push(self, elemento):
self.dados.append(elemento) #Insere novos elementos
def pop(self):
if self.vazio():
raise Emptyexception('Pilha vazia')
return self.dados.pop() #Retira o elemento
def top(self):
if self.vazio():
raise Emptyexception('Pilha vazia')
return self.data[-1] #Retorna o elemento no topo, o último adicionado
def len_ (self):
return len(self.dados)
def __str__(self):
return str(self.dados) #Mostra o que tem dentro da pilha
Dados = Pilha()
Dados.push(10)
print(Dados)
Dados.push(9)
Dados.push(8)
Dados.push(7)
print(Dados)
Dados.pop()
print(Dados) #Retirou o último(LIFO)
```
### Conclusão:
**Filas e Pilhas** são usadas para a implementação de listas como estruturas para armazenamento dos dados.
### Faça seus próprios exemplos para praticar e bons estudos!
## Ana Beatriz Macedo

| github_jupyter |
# Formal Simulated Inference
1. Define F (i.e. your model and assumptions)
2. Formalize test
3. Describe test statistic
4. A. Sample data from F∈ℱ0
B. Sample data from F∈ℱA
5. A. Plot power vs n (i.e. perspective power analysis)
B. Plot power vs n (i.e. perspective power analysis)
6. Apply to data
## Step 1: Define model and assumptions
**Model**
F(X,Y) | ~ Histogram Data
a) $F(I,k) = \{ F(I|k)*F(k) \}$
$F(k)= \{0,1\}$
* 0 = Control Image
* 1 = Cocaine Image
b) $F(I,k) = \{ F(I|k)*F(k) \}$
$F(k)= \{0,1\}$
**Assumption**
## Step 2: Formalize test
H0:
1. $F(I,0) > || = F(I,1)$
2. $F(I,0) < ||= F(I,1)$
1. The maximum gray value for Cocaine stimulated brain is equal to the control brain.
2. The maximum gray value for fear induced brain is equal the control brain.
HA:
1. $F(I,0) < || != F(I,1)$
2. $F(I,0) > || != F(I,1)$
1. The maximum gray value for Cocaine stimulated brain is greater than the control brain.
2. The maximum gray value for fear induced brain is lower than the control brain.
## Step 3. Test Statistic
$$F(i,j) = floor((L-1)sum_{n=0}^{F(i,j)} {P_n} )$$
$P_n$ = Number of pixels of intensity n / Total number of pixels
$F(I) = mean (F(i,j))$ % mean gray value in the image
## Requirements to run the program
In order to run the program, we need to install the packages in python
> [PyQt4](https://www.riverbankcomputing.com/software/pyqt/download) install from tar file
>
> [SIP](https://www.riverbankcomputing.com/software/sip/download) install from tar file
>
> matplotlib,numpy,jgraph,vispy,[nibabel](http://nipy.org/nibabel/installation.html) (use pip install or pip2 install)
>
> If you have trouble downloading PyQt please try to pip install pyglet
```
import os
PATH="/Users/david/Desktop/CourseWork/TheArtOfDataScience/claritycontrol/code/scripts/" # use your own path
os.chdir(PATH)
import clarity as cl # I wrote this module for easier operations on data
import matplotlib.pyplot as plt
import jgraph as ig
%matplotlib inline
# create a instance
c = cl.Clarity("Fear199")
# load image, to points # takes a while to process the data
# * threshold rate is used to cut off some noise data.
# * sample is sampling rate
# show histogram
c.loadImg().imgToPoints(threshold=0.02,sample=0.3).showHistogram(bins=256)
b = cl.Clarity("Cocaine174")
b.loadImg(info=False).imgToPoints(threshold=0.08,sample=0.1).showHistogram(bins=256)
a = cl.Clarity("Control239")
a.loadImg(info=False).imgToPoints(threshold=0.04,sample=0.3).showHistogram(bins=256)
# save points to csv file
# load directly from points data, instead of the origin data
c.loadImg().imgToPoints(threshold=0.04,sample=0.5).savePoints()
# load from points data file
# show on 3D graph, GPU intensive
c.loadPoints().show()
# A screen shot
from IPython.display import Image
from IPython.core.display import HTML
Image(url= "https://raw.githubusercontent.com/Upward-Spiral-Science/claritycontrol/master/figs/a04/3dvisualdemo.png")
```
## Reflect
We can infer from the histogram plots that the maximum gray values in cocaine stimulated brain is higher than the control brain. This is conclusive with the alternative hypothesis. This also agrees with literature that indicates that the brain is more active when stimulated by cocaine, thereby lighting up more than the control brain.
| github_jupyter |
<center>
<table style="border:none">
<tr style="border:none">
<th style="border:none">
<a href='https://colab.research.google.com/github/AmirMardan/ml_course/blob/main/3_pandas/0_intro_to_pandas.ipynb'><img src='https://colab.research.google.com/assets/colab-badge.svg'></a>
</th>
<th style="border:none">
<a href='https://github1s.com/AmirMardan/ml_course/blob/main/3_pandas/0_intro_to_pandas.ipynb'><img src='../imgs/open_vscode.svg' height=20px width=115px></a>
</th>
</tr>
</table>
</center>
This notebook is created by <a href='https://amirmardan.github.io'> Amir Mardan</a>. For any feedback or suggestion, please contact me via my <a href="mailto:mardan.amir.h@gmail.com">email</a>, (mardan.amir.h@gmail.com).
<center>
<img id='PYTHON' src='img/pandas.svg' width='300px'>
</center>
<a name='top'></a>
# Introduction to pandas
This notebook will cover the following topics:
- [Introduction](#introduction)
- [1. Introducing Pandas objects](#objects)
- [The pandas `Series` object](#series)
- [The pandas `DataFrame` object](#dataframe)
- [2. Data indexing and selection](#indexing)
- [Data selection in Series](#index_series)
- [Data selection in DataFrame](#index_df)
- [3. Handling missing data](#missing)
- [Detecting the missing values](#check_missing)
- [Dealing with missing values](#deal_missing)
- [4. IO in pandas](#import)
<a name='introduction'></a>
## Introduction
pandas is a library for data manipulation and analysis.
Created by **Wes McKinney**, first time released in January 2008.
<center><img src='./img/wes.png' alter='tavis' width=300px></center>
In this notebook, we learn the basic pandas. We learn
- What the pandas' objects are and how to create them,
- Data selection and indexing
- Handling missing data
```
# Ignore this cell
def letter_generator(n, random=True):
"""
random_letter generates characters
Parameters
----------
n : int
Number of required characters
random : Boolean
If True, the function returns structured random characters
Returns
-------
str
Random characters
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
dis_alphabet = np.array([char for char in alphabet])
ind = np.random.randint(0, 26, n)
to_return = [dis_alphabet[ind[:n]] if random else dis_alphabet[:n]]
return to_return[0]
```
<a name='objects'></a>
## 1. Introducing pandas objects
At a basic level, pandas objects can be thought of as NumPy structured arrays in which the rows and columns are identified with labels rather than integer indices. There are three fundamental pandas structures:
- `Series`
- `DataFrame`
- `Index`
Let's import pandas and NumPy and discuss the mentioned structures.
```
import numpy as np
import pandas as pd
```
<a name='series'></a>
### 1.1 The pandas `Series` object
A pandas `Series` is a one-dimensional array.
```
# Creating a series from list
data = pd.Series([2, 1, 3.4, -8])
data
```
As we see, `Series` makes a sequence of values and a sequence of indices.
```
pd.Series(['k', 3, 2])
```
We can define the index
```
pd.Series([1, 2, 4], index=['a', 'x', 't'])
# Creating a series from dictionary
courses = {
'Math': 3.4,
'Literatur': 4,
'French': 3
}
pd.Series(courses)
# Creating a series from NumPy array
df = pd.Series(np.arange(3, 9, 1.2), index=['a', 'b', 'c', 'd', 'e'])
df
```
We have access to values and the indices using `values` and `index`
```
# Get the indices
df.index
# Get the values
df.values
```
Values are accessible using indices
```
# Creating homogenous series
pd.Series(50, index=[1, 2, 3])
```
<a name='dataframe'></a>
### 1.2 The pandas `DataFrame` object
A pandas `DataFrame` can be thought of NumPy array of a dictionary.
```
# Let's prepare some data
population_dict = {
'China': 1439323776,
'India': 1380004385,
'US': 331002651,
'Indonesia': 273523615,
'Pakistan': 220892340
}
land_area_dict = {
'China': 9388211,
'India': 2973190,
'US': 9147420,
'Indonesia': 1811570,
'Pakistan': 770880
}
# Creating DataFrame using Series
# 1. Creating Series
population = pd.Series(population_dict)
land_area = pd.Series(land_area_dict)
# 2. Combine the Series
countries = pd.DataFrame({'Population': population, 'Land Area': land_area})
countries
# Creating DataFrame using list
# 1. Creating the list
countries_list = []
population_list = []
land_area_list = []
for param in land_area_dict:
countries_list.append(param)
population_list.append(population_dict[param])
land_area_list.append(land_area_dict[param])
countries_list
# 2. Combine the lists
df = pd.DataFrame({"Population": population_list,
"Land Area": land_area_list},
index=countries_list)
df
# Adding another column.
# For example, let's calculate the density
df['Density']= df['Population'] /df['Land Area']
df
```
We use `index` and `columns` attributes to get the index and the name of columns.
```
df.index
df.columns
# Attribute values
df.values
# Creating DataFrame with missing values
pd.DataFrame([{'a': 0, 'b': 1},
{'a': 2, 'f':3, 'g':6}])
# Creating with 2-D NumPy array
pd.DataFrame(np.random.random((3, 4)),
columns=['col1', 'col2', 'col3', 'col4'],
index=[2, 4, 6])
```
<a name='indexing'></a>
## 2. Data indexing and selection
In this part, we learn how to get access to a part of data and modify it.
<a name='index_series'></a>
### 2.1 Data selection in Series
```
# Creating a series from NumPy array
a = np.arange(2.5, 12, 1.5)
df = pd.Series(a, index=letter_generator(len(a), False))
df
```
We can get a part of a `Series` with different methods:
- slicing
- masking
- fancy masking
For slicing, the data is accessible either with explicit indexing or implicit indexing.
```
# Explicit indexing to one element
df['a']
# Implicit indexing to one element
df[0]
# Explicit indexing
df['a': 'c']
# Explicit indexing
df[['a', 'd']]
# Masking
# Let's vreate a mask
mask = (df > 1) & (df % 2 == 0)
print("The create mask is:\n{}".format(mask))
# Index using the mask
masked = df[mask]
print("\nThe masked DataFrame is:\n{}".format(masked))
# Fancy indexing
df[[0, 3]]
```
#### Indexers, loc and iloc
Let's imagine a `Series` have integer indexing that doesn't start from zero. This can be the source of lots of confusion for explicit and implicit indexing.
```
df = pd.Series(letter_generator(5, random=False),
index=[4, 2, 3, 1, 6])
df
```
<hr>
<div>
<span style="color:#151D3B; font-weight:bold">Question: 🤔</span><p>
What's the result of
<code>df[2]</code>
Explicit indexing: 'b'
implicit indexing: 'c'
</div>
<hr>
```
# Answer
```
To avoid confusion, pandas provides some special *indexer*
- `loc`
- `iloc`
```
# loc for explicit indexing
df.loc[2]
# iloc for implicit indexing
df.iloc[2]
# Implicit slicing
df.iloc[2: 4]
# Implicit fancy indexing
df.loc[2: 1]
# Explicit fancy indexing
df.loc[[2, 4]]
```
<a name='index_df'></a>
### 2.2 Data selection in DataFrame
```
# Let's create a DataFrame
countries_list = ['China', 'India', 'US','Indonesia', 'Pakistan']
population_list = [1439323776, 1380004385, 331002651, 273523615, 220892340]
land_area_list = [9388211, 2973190, 9147420, 1811570, 770880]
density = list(map(np.divide, population_list, land_area_list))
df = pd.DataFrame({"Population": population_list,
"Land Area": land_area_list,
"Density": density},
index=countries_list)
df
```
An individual `Series` of the DataFrame can be accessed in attribute-style indexing.
```
df.Population
```
However, this might cause some confusion if DataFrame has a column with the name a reserved key. In this case, it's better to use dictionary-style indexing.
```
df['Population']
```
The other advantage of dictionary-style indexing is its functionality for picking more than one column.
```
df[['Population', 'Density']]
```
We can also use `loc` and `iloc`.
```
# Explicit indexing for DataFrame
df.loc['India', ['Population' ,'Density']]
```
<hr>
<div>
<span style="color:#151D3B; font-weight:bold">Question: 🤔</span><p>
Select the population and land area of Pakistan and India using explicit indexing.
</div>
<hr>
```
# Answer
df.loc[['Pakistan', 'India'],['Population', 'Land Area']]
# Answer using implicit indexing
df.iloc[[4, 1], [0, 1]]
```
#### Conditional indexing
```
# Get all columns based on a condition
df[df['Density'] < 120]
```
<hr>
<div>
<span style="color:#151D3B; font-weight:bold">Question: 🤔</span><p>
Get the population and land area of the countries with the density of at least twice the density of the US?
</div>
<hr>
```
# Answer
# df.loc[df['Density'] >= 2 * df.loc['US', 'Density'], ['Population', 'Land Area']]
```
$\color{red}{\text{Note:}}$
- Indexing refers to columns
- Slicing refers to rows
- Masking operations are row-wise
<a name='missing'></a>
## 3. Handling missing data
The data in the real world is rarely clean and homogenous. There are usually missing values in datasets. More complicated, there are different ways to indicate the missing data.
```
# Let's create a dataframe
population_dict = {
'China': 1439323776,
'India': 1380004385,
'US': 331002651,
'Indonesia': 273523615,
'Pakistan': 220892340,
}
land_area_dict = {
'China': 9388211,
'US': 9147420,
'Indonesia': 1811570,
'Pakistan': 770880,
'Brazil': 8358140
}
# 1. Creating Series
population = pd.Series(population_dict)
land_area = pd.Series(land_area_dict)
# 2. Combine the Series
df_missing = pd.DataFrame({'Population': population, 'Land Area': land_area})
df_missing
```
<a name='check_missing'></a>
### 3.1 Detecting the missing values
```
# Find the missing values using isna
df_missing.isna()
# Find the missing values using isnull
df_missing.isnull()
# Check the missing value
df_missing.notna()
# Number of missing values
df_missing.isnull().sum()
# Percentage of missing values
100 * df_missing.isnull().sum() / len(df_missing.isnull())
```
<a name='deal_missing'></a>
### 3.2 Dealing with missing values
Missing values can be either *ignored*, *dropped*, or *filled*.
#### Dropping
```
# Dropping the misisng values with axis = 0
df_missing.dropna()
# Dropping the misisng values with axis = 1
df_missing.dropna(axis=1)
# Dropping the misisng values with axis = 'rows'
df_missing.dropna(axis='rows')
# Drop specific column
df_missing['Population'].dropna()
df_missing
```
#### Filling
```
# Filling with a specific value
df_missing['Population'].fillna(df_missing['Population'].mean())
# Filling using forward or backward fill (ffill / bfill)
df_missing.fillna(method='ffill')
# Filling using forward or backward fill (ffill / bfill)
df_missing.fillna(method='bfill')
# Filling with axis
df_missing.fillna(method='bfill', axis='columns')
```
<a name='import'></a>
## 4. IO in pandas
Pandas has powerful functionality for dealing with different file formats. Here, we see how to import data files with CSV format or files from Excel
```
#Let's download the data
!curl -O https://raw.githubusercontent.com/AmirMardan/ml_course/main/data/house_intro_pandas.csv
!curl -O https://raw.githubusercontent.com/AmirMardan/ml_course/main/data/house_intro_pandas.xlsx
# Loading CSV file
df_csv = pd.read_csv('./house_intro_pandas.csv')
# We can use the method head() to see the five first rows of a dataframe
df_csv.head()
# Saving CSV file
df_csv.to_csv('./house_intro_pandas1.csv', index=False)
# Loading Excel file
df_xlsx = pd.read_excel('./house_intro_pandas.xlsx')
df_xlsx.head()
# Saving Excel file
df_csv.to_excel('./house_intro_pandas1.xlsx', index=False)
```
### [TOP ☝️](#top)
| github_jupyter |
##### Copyright 2019 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
# Wiki40B Language Models
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/wiki40b_lm"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/wiki40b_lm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/wiki40b_lm.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/wiki40b_lm.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Generate Wikipedia-like text using the **Wiki40B language models** from [TensorFlow Hub](https://tfhub.dev)!
This notebook illustrates how to:
* Load the 41 monolingual and 2 multilingual language models that are part of the [Wiki40b-LM collection](https://tfhub.dev/google/collections/wiki40b-lm/1) on TF-Hub
* Use the models to obtain perplexity, per layer activations, and word embeddings for a given piece of text
* Generate text token-by-token from a piece of seed text
The language models are trained on the newly published, cleaned-up [Wiki40B dataset](https://www.tensorflow.org/datasets/catalog/wiki40b) available on TensorFlow Datasets. The training setup is based on the paper [“Wiki-40B: Multilingual Language Model Dataset”](https://research.google/pubs/pub49029/).
## Setup
```
#@title Installing Dependencies
!pip install --quiet tensorflow_text
#@title Imports
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
import tensorflow_text as tf_text
tf.disable_eager_execution()
tf.logging.set_verbosity(tf.logging.WARN)
```
## Choose Language
Let's choose **which language model** to load from TF-Hub and the **length of text** to be generated.
```
#@title { run: "auto" }
language = "en" #@param ["en", "ar", "zh-cn", "zh-tw", "nl", "fr", "de", "it", "ja", "ko", "pl", "pt", "ru", "es", "th", "tr", "bg", "ca", "cs", "da", "el", "et", "fa", "fi", "he", "hi", "hr", "hu", "id", "lt", "lv", "ms", "no", "ro", "sk", "sl", "sr", "sv", "tl", "uk", "vi", "multilingual-64k", "multilingual-128k"]
hub_module = "https://tfhub.dev/google/wiki40b-lm-{}/1".format(language)
max_gen_len = 20 #@param
print("Using the {} model to generate sequences of max length {}.".format(hub_module, max_gen_len))
```
## Build the Model
Okay, now that we've configured which pre-trained model to use, let's configure it to generate text up to `max_gen_len`. We will need to load the language model from TF-Hub, feed in a piece of starter text, and then iteratively feed in tokens as they are generated.
```
#@title Load the language model pieces
g = tf.Graph()
n_layer = 12
model_dim = 768
with g.as_default():
text = tf.placeholder(dtype=tf.string, shape=(1,))
# Load the pretrained model from TF-Hub
module = hub.Module(hub_module)
# Get the word embeddings, activations at each layer, negative log likelihood
# of the text, and calculate the perplexity.
embeddings = module(dict(text=text), signature="word_embeddings", as_dict=True)["word_embeddings"]
activations = module(dict(text=text), signature="activations", as_dict=True)["activations"]
neg_log_likelihood = module(dict(text=text), signature="neg_log_likelihood", as_dict=True)["neg_log_likelihood"]
ppl = tf.exp(tf.reduce_mean(neg_log_likelihood, axis=1))
#@title Construct the per-token generation graph
def feedforward_step(module, inputs, mems):
"""Generate one step."""
# Set up the input dict for one step of generation
inputs = tf.dtypes.cast(inputs, tf.int64)
generation_input_dict = dict(input_tokens=inputs)
mems_dict = {"mem_{}".format(i): mems[i] for i in range(n_layer)}
generation_input_dict.update(mems_dict)
# Generate the tokens from the language model
generation_outputs = module(generation_input_dict, signature="prediction", as_dict=True)
# Get the probablities and the inputs for the next steps
probs = generation_outputs["probs"]
new_mems = [generation_outputs["new_mem_{}".format(i)] for i in range(n_layer)]
return probs, new_mems
#@title Build the statically unrolled graph for `max_gen_len` tokens
with g.as_default():
# Tokenization with the sentencepiece model.
token_ids = module(dict(text=text), signature="tokenization", as_dict=True)["token_ids"]
inputs_np = token_ids
# Generate text by statically unrolling the computational graph
mems_np = [np.zeros([1, 0, model_dim], dtype=np.float32) for _ in range(n_layer)]
# Generate up to `max_gen_len` tokens
sampled_ids = []
for step in range(max_gen_len):
probs, mems_np = feedforward_step(module, inputs_np, mems_np)
sampled_id = tf.random.categorical(tf.math.log(probs[0]), num_samples=1, dtype=tf.int32)
sampled_id = tf.squeeze(sampled_id)
sampled_ids.append(sampled_id)
inputs_np = tf.reshape(sampled_id, [1, 1])
# Transform the ids into text
sampled_ids = tf.expand_dims(sampled_ids, axis=0)
generated_text = module(dict(token_ids=sampled_ids), signature="detokenization", as_dict=True)["text"]
init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()])
```
## Generate some text
Let's generate some text! We'll set a text `seed` to prompt the language model.
You can use one of the **predefined** seeds or _optionally_ **enter your own**. This text will be used as seed for the language model to help prompt the language model for what to generate next.
You can use the following special tokens precede special parts of the generated article. Use **`_START_ARTICLE_`** to indicate the beginning of the article, **`_START_SECTION_`** to indicate the beginning of a section, and **`_START_PARAGRAPH_`** to generate text in the article
```
#@title Predefined Seeds
lang_to_seed = {"en": "\n_START_ARTICLE_\n1882 Prince Edward Island general election\n_START_PARAGRAPH_\nThe 1882 Prince Edward Island election was held on May 8, 1882 to elect members of the House of Assembly of the province of Prince Edward Island, Canada.",
"ar": "\n_START_ARTICLE_\nأوليفيا كوك\n_START_SECTION_\nنشأتها والتعلي \n_START_PARAGRAPH_\nولدت أوليفيا كوك في أولدهام في مانشستر الكبرى لأسرة تتكون من أب يعمل كظابط شرطة، وأمها تعمل كممثلة مبيعات. عندما كانت صغيرة بدأت تأخذ دروساً في الباليه الجمباز. وفي المدرسة شاركت في المسرحيات المدرسية، إضافةً إلى عملها في مسرح سندريلا . وفي سن الرابعة عشر عاماً، حصلت على وكيلة لها في مانشستر وهي وقعت عقداً مع وكالة الفنانين المبدعين في مانشستر،",
"zh-cn": "\n_START_ARTICLE_\n上尾事件\n_START_SECTION_\n日本国铁劳资关系恶化\n_START_PARAGRAPH_\n由于日本国铁财政恶化,管理层开始重整人手安排,令工会及员工感到受威胁。但日本国铁作为公营企业,其雇员均受公营企业等劳资关系法规管——该法第17条规定公营企业员工不得发动任何罢工行为。为了规避该法例",
"zh-tw": "\n_START_ARTICLE_\n乌森\n_START_PARAGRAPH_\n烏森(法語:Houssen,發音:[usən];德語:Hausen;阿爾薩斯語:Hüse)是法國上萊茵省的一個市鎮,位於該省北部,屬於科爾馬-里博維萊區(Colmar-Ribeauvillé)第二科爾馬縣(Colmar-2)。該市鎮總面積6.7平方公里,2009年時的人口為",
"nl": "\n_START_ARTICLE_\n1001 vrouwen uit de Nederlandse geschiedenis\n_START_SECTION_\nSelectie van vrouwen\n_START_PARAGRAPH_\nDe 'oudste' biografie in het boek is gewijd aan de beschermheilige",
"fr": "\n_START_ARTICLE_\nꝹ\n_START_SECTION_\nUtilisation\n_START_PARAGRAPH_\nLe d insulaire est utilisé comme lettre additionnelle dans l’édition de 1941 du recueil de chroniques galloises Brut y Tywysogion",
"de": "\n_START_ARTICLE_\nÜnal Demirkıran\n_START_SECTION_\nLaufbahn\n_START_PARAGRAPH_\nDemirkıran debütierte als junges Talent am 25. September 1999 im Auswärtsspiel des SSV Ulm 1846 bei Werder Bremen (2:2) in der Bundesliga, als er kurz",
"it": "\n_START_ARTICLE_\n28th Street (linea IRT Lexington Avenue)\n_START_SECTION_\nStoria\n_START_PARAGRAPH_\nLa stazione, i cui lavori di costruzione ebbero inizio nel 1900, venne aperta il 27 ottobre 1904, come",
"ja": "\n_START_ARTICLE_\nしのぶ・まさみshow'05 恋してラララ\n_START_SECTION_\n概要\n_START_PARAGRAPH_\n『上海ルーキーSHOW』の打ち切り後に放送された年末特番で、同番組MCの大竹しのぶと久本雅美が恋愛にまつわるテーマでトークや音楽企画を展開していた。基本は女",
"ko": "\n_START_ARTICLE_\n녹턴, Op. 9 (쇼팽)\n_START_SECTION_\n녹턴 3번 나장조\n_START_PARAGRAPH_\n쇼팽의 녹턴 3번은 세도막 형식인 (A-B-A)형식을 취하고 있다. 첫 부분은 알레그레토(Allegretto)의 빠르기가 지시되어 있으며 물 흐르듯이 부드럽게 전개되나",
"pl": "\n_START_ARTICLE_\nAK-176\n_START_SECTION_\nHistoria\n_START_PARAGRAPH_\nPod koniec lat 60 XX w. w ZSRR dostrzeżono potrzebę posiadania lekkiej armaty uniwersalnej średniego kalibru o stosunkowo dużej mocy ogniowej, która",
"pt": "\n_START_ARTICLE_\nÁcido ribonucleico\n_START_SECTION_\nIntermediário da transferência de informação\n_START_PARAGRAPH_\nEm 1957 Elliot Volkin e Lawrence Astrachan fizeram uma observação significativa. Eles descobriram que uma das mais marcantes mudanças",
"ru": "\n_START_ARTICLE_\nАрнольд, Ремо\n_START_SECTION_\nКлубная карьера\n_START_PARAGRAPH_\nАрнольд перешёл в академию «Люцерна» в 12 лет. С 2014 года выступал за вторую команду, где провёл пятнадцать встреч. С сезона 2015/2016 находится в составе основной команды. 27 сентября 2015 года дебютировал",
"es": "\n_START_ARTICLE_\n(200012) 2007 LK20\n_START_SECTION_\nDesignación y nombre\n_START_PARAGRAPH_\nDesignado provisionalmente como 2007 LK20.\n_START_SECTION_\nCaracterísticas orbitales\n_START_PARAGRAPH_\n2007 LK20",
"th": "\n_START_ARTICLE_\nการนัดหยุดเรียนเพื่อภูมิอากาศ\n_START_SECTION_\nเกรียตา ทืนแบร์ย\n_START_PARAGRAPH_\nวันที่ 20 สิงหาคม 2561 เกรียตา ทืนแบร์ย นักกิจกรรมภูมิอากาศชาวสวีเดน ซึ่งขณะนั้นศึกษาอยู่ในชั้นเกรด 9 (เทียบเท่ามัธยมศึกษาปีที่ 3) ตัดสินใจไม่เข้าเรียนจนกระทั่งการเลือกตั้งทั่วไปในประเทศสวีเดนปี",
"tr": "\n_START_ARTICLE_\nİsrail'in Muhafazakar Dostları\n_START_SECTION_\nFaaliyetleri\n_START_PARAGRAPH_\nGrubun 2005 stratejisi ile aşağıdaki faaliyet alanları tespit edilmiştir:_NEWLINE_İsrail'i destekleme",
"bg": "\n_START_ARTICLE_\nАвтомобил с повишена проходимост\n_START_SECTION_\nОсобености на конструкцията\n_START_PARAGRAPH_\nВ исторически план леки автомобили с висока проходимост се произвеждат и имат военно",
"ca": "\n_START_ARTICLE_\nAuchy-la-Montagne\n_START_SECTION_\nPoblació\n_START_PARAGRAPH_\nEl 2007 la població de fet d'Auchy-la-Montagne era de 469 persones. Hi havia 160 famílies de les quals 28",
"cs": "\n_START_ARTICLE_\nŘemeslo\n_START_PARAGRAPH_\nŘemeslo je určitý druh manuální dovednosti, provozovaný za účelem obživy, resp. vytváření zisku. Pro řemeslné práce je charakteristický vysoký podíl ruční práce, spojený s používáním specializovaných nástrojů a pomůcek. Řemeslné práce",
"da": "\n_START_ARTICLE_\nÖrenäs slot\n_START_PARAGRAPH_\nÖrenäs slot (svensk: Örenäs slott) er et slot nær Glumslöv i Landskrona stad tæt på Øresunds-kysten i Skåne i Sverige._NEWLINE_Örenäs ligger",
"el": "\n_START_ARTICLE_\nΆλβαρο Ρεκόμπα\n_START_SECTION_\nΒιογραφικά στοιχεία\n_START_PARAGRAPH_\nΟ Άλβαρο Ρεκόμπα γεννήθηκε στις 17 Μαρτίου 1976 στο Μοντεβίδεο της Ουρουγουάης από",
"et": "\n_START_ARTICLE_\nAus deutscher Geistesarbeit\n_START_PARAGRAPH_\nAus deutscher Geistesarbeit (alapealkiri Wochenblatt für wissenschaftliche und kulturelle Fragen der Gegenwart) oli ajakiri, mis 1924–1934 ilmus Tallinnas. Ajakirja andis 1932–1934",
"fa": "\n_START_ARTICLE_\nتفسیر بغوی\n_START_PARAGRAPH_\nایرانی حسین بن مسعود بغوی است. این کتاب خلاصه ای از تفسیر الکشف و البیان عن تفسیر القرآن ابواسحاق احمد ثعلبی میباشد. این کتاب در ۴ جلد موجود میباش",
"fi": "\n_START_ARTICLE_\nBovesin verilöyly\n_START_SECTION_\nVerilöyly\n_START_PARAGRAPH_\n19. syyskuuta 1943 partisaaniryhmä saapui Bovesiin tarkoituksenaan ostaa leipää kylästä. Kylässä sattui olemaan kaksi SS-miestä, jotka",
"he": "\n_START_ARTICLE_\nאוגדה 85\n_START_SECTION_\nהיסטוריה\n_START_PARAGRAPH_\nהאוגדה הוקמה בהתחלה כמשלט העמקים בשנות השבעים. בשנות השמונים הפכה להיות אוגדה מרחבית עם שתי",
"hi": "\n_START_ARTICLE_\nऑडी\n_START_SECTION_\nऑडी इंडिया\n_START_PARAGRAPH_\nऑडी इंडिया की स्थापना मार्च 2007 में फोक्सवैगन ग्रुप सेल्स इंडिया के एक विभाजन के रूप में की गई थी। दुनिया भर में 110",
"hr": "\n_START_ARTICLE_\nČimariko (jezična porodica)\n_START_PARAGRAPH_\nChimarikan.-porodica sjevernoameričkih indijanskih jezika koja prema Powersu obuhvaća jezike Indijanaca Chimariko (Chemaŕeko) sa rijeke Trinity i Chimalakwe",
"hu": "\n_START_ARTICLE_\nÁllami Politikai Igazgatóság\n_START_PARAGRAPH_\nAz Állami Politikai Igazgatóság (rövidítve: GPU, oroszul: Государственное политическое управление), majd később Egyesített Állami Politikai Igazgatóság Szovjet-Oroszország",
"id": "\n_START_ARTICLE_\n(257195) 2008 QY41\n_START_SECTION_\nPembentukan\n_START_PARAGRAPH_\nSeperti asteroid secara keseluruhan, asteroid ini terbentuk dari nebula matahari primordial sebagai pecahan planetisimal, sesuatu di",
"lt": "\n_START_ARTICLE_\nŠavijos–Uardigo regionas\n_START_SECTION_\nGeografija\n_START_PARAGRAPH_\nŠavijos-Uardigo regionas yra Atlanto vandenynu pakrantės lygumoje",
"lv": "\n_START_ARTICLE_\nApatīts\n_START_SECTION_\nĪpašības\n_START_PARAGRAPH_\nApatīta kopējā ķīmiskā formula ir Ca₁₀(PO₄)₆(OH,F,Cl)₂, ir trīs atšķirīgi apatīta veidi: apatīts: Ca₁₀(PO₄)₆(OH)₂, fluorapatīts Ca₁₀(PO₄)₆(F)₂ un hlorapatīts: Ca₁₀(PO₄)₆(Cl)₂. Pēc sastāva",
"ms": "\n_START_ARTICLE_\nEdward C. Prescott\n_START_PARAGRAPH_\nEdward Christian Prescott (lahir 26 Disember 1940) ialah seorang ahli ekonomi Amerika. Beliau menerima Hadiah Peringatan Nobel dalam Sains Ekonomi pada tahun 2004, berkongsi",
"no": "\n_START_ARTICLE_\nAl-Minya\n_START_SECTION_\nEtymologi\n_START_PARAGRAPH_\nDet er sprikende forklaringer på bynavnet. Det kan komme fra gammelegyptisk Men'at Khufu, i betydning byen hvor Khufu ble ammet, noe som knytter byen til farao Khufu (Keops), som",
"ro": "\n_START_ARTICLE_\nDealurile Cernăuțiului\n_START_PARAGRAPH_\nDealurile Cernăuțiului sunt un lanț deluros striat, care se întinde în partea centrală a interfluviului dintre Prut și Siret, în cadrul regiunii Cernăuți din",
"sk": "\n_START_ARTICLE_\n10. peruť RAAF\n_START_PARAGRAPH_\n10. peruť RAAF je námorná hliadkovacia peruť kráľovských austrálskych vzdušných síl (Royal Australian Air Force – RAAF) založená na základni Edinburgh v Južnej Austrálii ako súčasť 92",
"sl": "\n_START_ARTICLE_\n105 Artemida\n_START_SECTION_\nOdkritje\n_START_PARAGRAPH_\nAsteroid je 16. septembra 1868 odkril James Craig Watson (1838 – 1880). Poimenovan je po Artemidi, boginji Lune iz grške",
"sr": "\n_START_ARTICLE_\nЉанос Морелос 1. Сексион (Истапангахоја)\n_START_SECTION_\nСтановништво\n_START_PARAGRAPH_\nПрема подацима из 2010. године у насељу је живело 212",
"sv": "\n_START_ARTICLE_\nÖstra Torps landskommun\n_START_SECTION_\nAdministrativ historik\n_START_PARAGRAPH_\nKommunen bildades i Östra Torps socken i Vemmenhögs härad i Skåne när 1862 års kommunalförordningar trädde i kraft. _NEWLINE_Vid kommunreformen",
"tl": "\n_START_ARTICLE_\nBésame Mucho\n_START_PARAGRAPH_\nAng Bésame Mucho ay isang awit na nasa Kastila. Isinulat ito ng Mehikanang si Consuelo Velázquez noong 1940, bago sumapit ang kanyang ika-16 na",
"uk": "\n_START_ARTICLE_\nІслам та інші релігії\n_START_PARAGRAPH_\nПротягом багатовікової ісламської історії мусульманські правителі, ісламські вчені і звичайні мусульмани вступали у різні відносини з представниками інших релігій. Стиль цих",
"vi": "\n_START_ARTICLE_\nĐường tỉnh 316\n_START_PARAGRAPH_\nĐường tỉnh 316 hay tỉnh lộ 316, viết tắt ĐT316 hay TL316, là đường tỉnh ở các huyện Thanh Sơn, Thanh Thủy, Tam Nông tỉnh Phú Thọ ._NEWLINE_ĐT316 bắt đầu từ xã Tinh Nhuệ",
"multilingual-64k": "\n_START_ARTICLE_\n1882 Prince Edward Island general election\n_START_PARAGRAPH_\nThe 1882 Prince Edward Island election was held on May 8, 1882 to elect members of the House of Assembly of the province of Prince Edward Island, Canada.",
"multilingual-128k": "\n_START_ARTICLE_\n1882 Prince Edward Island general election\n_START_PARAGRAPH_\nThe 1882 Prince Edward Island election was held on May 8, 1882 to elect members of the House of Assembly of the province of Prince Edward Island, Canada."}
seed = lang_to_seed[language]
#@title Enter your own seed (Optional).
user_seed = "" #@param { type: "string" }
if user_seed.strip():
seed = user_seed.strip()
# The seed must start with "_START_ARTICLE_" or the generated text will be gibberish
START_ARTICLE = "_START_ARTICLE_"
if START_ARTICLE not in seed:
seed = "\n{}\n{}".format(START_ARTICLE, seed)
print("Generating text from seed:\n{}".format(seed))
#@title Initialize session.
with tf.Session(graph=g).as_default() as session:
session.run(init_op)
#@title Generate text
with session.as_default():
results = session.run([embeddings, neg_log_likelihood, ppl, activations, token_ids, generated_text], feed_dict={text: [seed]})
embeddings_result, neg_log_likelihood_result, ppl_result, activations_result, token_ids_result, generated_text_result = results
generated_text_output = generated_text_result[0].decode('utf-8')
print(generated_text_output)
```
We can also look at the other outputs of the model - the perplexity, the token ids, the intermediate activations, and the embeddings
```
ppl_result
token_ids_result
activations_result.shape
embeddings_result
```
| github_jupyter |
## Task #2
In the following task, we will train a Restricted Boltzmann Machine (RBM) on 100 Rydberg atoms data. We will compare the energy of our simulated system against the exact known energy. In order to do this, it is necessary to explore some parameters of the Boltzman network. The number of hidden nodes and samples is important in order to obtain good results.
Imports and loading in data:
```
import numpy as np
import torch
import Rydberg_energy_calculator
from RBM_helper import RBM
training_data = torch.from_numpy(np.loadtxt("Rydberg_data.txt"))
```
The binary data in ```Rydberg\_data.txt``` corresponds to 100 atoms. An exact resolution of a system via diagonalization requires around $2^N$ terms, which in this case is beyond any possible calulation. Nontheless, RBM allow us to heavily compress this problem, changing the exponentially growing complexity for a linear growing complexity. For recovering the wavefunction of a system with 100 atoms, we only require $100 + n_h + n_h \times 100$ numbers, where $n_h$ is the number of hidden nodes.
We will evaluate the energy during training and compare it to the exact energy. This can be done with ```Rydberg\_energy\_calculator.py```. We will arbitrarly select a learning criterion, i.e. a limit to cut our training with satisfactory results.
We selected as learning criteria $\vert E_{RBM} - E_{exact} \vert \leq 0.0002$, where $E_{exact} = -4.1203519096$.
This problem relies heavily on the size of the sample we take from our data. The more samples we use the more complex is the network we need to generalize. We will first consider the entire dataset and find the minimum number of hidden nodes required to reach the learning criteria.
Each iteration will change the number of hidden nodes, and will have at most 1000 epochs.
```
flag = 0
i = 0
epochs = 1000
num_samples = 20000
n_vis = training_data.shape[1]
exact_energy = -4.1203519096
print("Exact energy: ",exact_energy)
while flag == 0 :
i = i + 1
n_hin = i
rbm = RBM(n_vis, n_hin)
print("\n The number of hidden units is: ", n_hin)
e = 0
while (e < epochs):
e = e + 1
rbm.train(training_data)
if e % 100 == 0:
init_state = torch.zeros(num_samples, n_vis)
RBM_samples = rbm.draw_samples(1000, init_state)
energies = Rydberg_energy_calculator.energy(RBM_samples, rbm.wavefunction)
print("Epoch:", e,". Energy from RBM samples:", energies.item(),". Error:", abs(exact_energy - energies.item()))
if (abs(exact_energy - energies.item()) < 0.0002):
print("FINAL NUMBER OF HIDDEN UNITS:", n_hin)
print("FINAL NUMBER OF EPOCHS:", e)
print("ERROR:", abs(exact_energy - energies.item()))
e = epochs
flag = 1
```
We will now double the number of hidden units (and hence the complexity of the network), but lowering the number of samples to find the minimum required number to achive the same ammount of precision.
```
flag = 0
i = 0
epochs = 1000
n_hin = 3 * 2 # in the previous case it converged with 3 units
n_vis = training_data.shape[1]
exact_energy = -4.1203519096
print("Exact energy: ",exact_energy,". Hidden units:",n_hin,".")
while flag == 0 :
i = i + 1
num_samples = 10 * i
rbm = RBM(n_vis, n_hin)
print("\nThe number of samples is: ", num_samples)
e = 0
while (e < epochs):
e = e + 1
rbm.train(training_data)
if e % 100 == 0:
init_state = torch.zeros(num_samples, n_vis)
RBM_samples = rbm.draw_samples(1000, init_state)
energies = Rydberg_energy_calculator.energy(RBM_samples, rbm.wavefunction)
print("Epoch:", e,". Energy from RBM samples:", energies.item(),". Error:", abs(exact_energy - energies.item()))
if (abs(exact_energy - energies.item()) < 0.0002):
print("NUMBER OF SAMPLES:", num_samples)
print("FINAL NUMBER OF EPOCHS:", e)
print("ERROR:", abs(exact_energy - energies.item()))
e = epochs
flag = 1
```
For a better precission with the whole data set, such as $\vert E_{RBM} - E_{exact} \vert \leq 0.0001$, the required number of hidden nodes is extremly hard to find. Iteration leading up to 100 did not found any case, we even tried it with a greater number of hidden units without meeting the learning criteria. As the number of samples decreases, the complexity of the system necessary to meet the learning criteria gets lower.
More information on these kind of systems with RBM can be found in the work [Integrating Neural Networks with a Quantum Simulator for State Reconstruction](https://arxiv.org/pdf/1904.08441.pdf)
| github_jupyter |
# ModelList (Multi-Output) GP Regression
## Introduction
This notebook demonstrates how to wrap independent GP models into a convenient Multi-Output GP model using a ModelList.
Unlike in the Multitask case, this do not model correlations between outcomes, but treats outcomes independently. This is equivalent to setting up a separate GP for each outcome, but can be much more convenient to handle, in particular it does not require manually looping over models when fitting or predicting.
This type of model is useful if
- when the number of training / test points is different for the different outcomes
- using different covariance modules and / or likelihoods for each outcome
For block designs (i.e. when the above points do not apply), you should instead use a batch mode GP as described in the [batch independent multioutput example](./Batch_Independent_Multioutput_GP.ipynb). This will be much faster because it uses additional parallelism.
```
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
### Set up training data
In the next cell, we set up the training data for this example. We'll be using a different number of training examples for the different GPs.
```
train_x1 = torch.linspace(0, 0.95, 50) + 0.05 * torch.rand(50)
train_x2 = torch.linspace(0, 0.95, 25) + 0.05 * torch.rand(25)
train_y1 = torch.sin(train_x1 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x1)
train_y2 = torch.cos(train_x2 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x2)
```
## Set up the sub-models
Each individual model uses the `ExactGP` model from the [simple regression example](../01_Exact_GPs/Simple_GP_Regression.ipynb).
```
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood1 = gpytorch.likelihoods.GaussianLikelihood()
model1 = ExactGPModel(train_x1, train_y1, likelihood1)
likelihood2 = gpytorch.likelihoods.GaussianLikelihood()
model2 = ExactGPModel(train_x2, train_y2, likelihood2)
```
We now collect the submodels in an `IndependentMultiOutputGP`, and the respective likelihoods in a `MultiOutputLikelihood`. These are container modules that make it easy to work with multiple outputs. In particular, they will take in and return lists of inputs / outputs and delegate the data to / from the appropriate sub-model (it is important that the order of the inputs / outputs corresponds to the order of models with which the containers were instantiated).
```
model = gpytorch.models.IndependentModelList(model1, model2)
likelihood = gpytorch.likelihoods.LikelihoodList(model1.likelihood, model2.likelihood)
```
### Set up overall Marginal Log Likelihood
Assuming independence, the MLL for the container model is simply the sum of the MLLs for the individual models. `SumMarginalLogLikelihood` is a convenient container for this (by default it uses an `ExactMarginalLogLikelihood` for each submodel)
```
from gpytorch.mlls import SumMarginalLogLikelihood
mll = SumMarginalLogLikelihood(likelihood, model)
```
### Train the model hyperparameters
With the containers in place, the models can be trained in a single loop on the container (note that this means that optimization is performed jointly, which can be an issue if the individual submodels require training via very different step sizes).
```
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iterations = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
for i in range(training_iterations):
optimizer.zero_grad()
output = model(*model.train_inputs)
loss = -mll(output, model.train_targets)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
```
### Make predictions with the model
```
# Set into eval mode
model.eval()
likelihood.eval()
# Initialize plots
f, axs = plt.subplots(1, 2, figsize=(8, 3))
# Make predictions (use the same test points)
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
# This contains predictions for both outcomes as a list
predictions = likelihood(*model(test_x, test_x))
for submodel, prediction, ax in zip(model.models, predictions, axs):
mean = prediction.mean
lower, upper = prediction.confidence_region()
tr_x = submodel.train_inputs[0].detach().numpy()
tr_y = submodel.train_targets.detach().numpy()
# Plot training data as black stars
ax.plot(tr_x, tr_y, 'k*')
# Predictive mean as blue line
ax.plot(test_x.numpy(), mean.numpy(), 'b')
# Shade in confidence
ax.fill_between(test_x.numpy(), lower.detach().numpy(), upper.detach().numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
ax.set_title('Observed Values (Likelihood)')
None
```
| github_jupyter |
```
#hide
from utils import *
```
# Collaborative Filtering Deep Dive
## A First Look at the Data
```
from fastai.collab import *
from fastai.tabular.all import *
path = untar_data(URLs.ML_100k)
ratings = pd.read_csv(path/'u.data', delimiter='\t', header=None,
names=['user','movie','rating','timestamp'])
ratings.head()
last_skywalker = np.array([0.98,0.9,-0.9])
user1 = np.array([0.9,0.8,-0.6])
(user1*last_skywalker).sum()
casablanca = np.array([-0.99,-0.3,0.8])
(user1*casablanca).sum()
```
## Learning the Latent Factors
## Creating the DataLoaders
```
movies = pd.read_csv(path/'u.item', delimiter='|', encoding='latin-1',
usecols=(0,1), names=('movie','title'), header=None)
movies.head()
ratings = ratings.merge(movies)
ratings.head()
dls = CollabDataLoaders.from_df(ratings, item_name='title', bs=64)
dls.show_batch()
dls.classes
n_users = len(dls.classes['user'])
n_movies = len(dls.classes['title'])
n_factors = 5
user_factors = torch.randn(n_users, n_factors)
movie_factors = torch.randn(n_movies, n_factors)
one_hot_3 = one_hot(3, n_users).float()
user_factors.t() @ one_hot_3
user_factors[3]
```
## Collaborative Filtering from Scratch
```
class Example:
def __init__(self, a): self.a = a
def say(self,x): return f'Hello {self.a}, {x}.'
ex = Example('Sylvain')
ex.say('nice to meet you')
class DotProduct(Module):
def __init__(self, n_users, n_movies, n_factors):
self.user_factors = Embedding(n_users, n_factors)
self.movie_factors = Embedding(n_movies, n_factors)
def forward(self, x):
users = self.user_factors(x[:,0])
movies = self.movie_factors(x[:,1])
return (users * movies).sum(dim=1)
x,y = dls.one_batch()
x.shape
model = DotProduct(n_users, n_movies, 50)
learn = Learner(dls, model, loss_func=MSELossFlat())
learn.fit_one_cycle(5, 5e-3)
class DotProduct(Module):
def __init__(self, n_users, n_movies, n_factors, y_range=(0,5.5)):
self.user_factors = Embedding(n_users, n_factors)
self.movie_factors = Embedding(n_movies, n_factors)
self.y_range = y_range
def forward(self, x):
users = self.user_factors(x[:,0])
movies = self.movie_factors(x[:,1])
return sigmoid_range((users * movies).sum(dim=1), *self.y_range)
model = DotProduct(n_users, n_movies, 50)
learn = Learner(dls, model, loss_func=MSELossFlat())
learn.fit_one_cycle(5, 5e-3)
class DotProductBias(Module):
def __init__(self, n_users, n_movies, n_factors, y_range=(0,5.5)):
self.user_factors = Embedding(n_users, n_factors)
self.user_bias = Embedding(n_users, 1)
self.movie_factors = Embedding(n_movies, n_factors)
self.movie_bias = Embedding(n_movies, 1)
self.y_range = y_range
def forward(self, x):
users = self.user_factors(x[:,0])
movies = self.movie_factors(x[:,1])
res = (users * movies).sum(dim=1, keepdim=True)
res += self.user_bias(x[:,0]) + self.movie_bias(x[:,1])
return sigmoid_range(res, *self.y_range)
model = DotProductBias(n_users, n_movies, 50)
learn = Learner(dls, model, loss_func=MSELossFlat())
learn.fit_one_cycle(5, 5e-3)
```
### Weight Decay
```
x = np.linspace(-2,2,100)
a_s = [1,2,5,10,50]
ys = [a * x**2 for a in a_s]
_,ax = plt.subplots(figsize=(8,6))
for a,y in zip(a_s,ys): ax.plot(x,y, label=f'a={a}')
ax.set_ylim([0,5])
ax.legend();
model = DotProductBias(n_users, n_movies, 50)
learn = Learner(dls, model, loss_func=MSELossFlat())
learn.fit_one_cycle(5, 5e-3, wd=0.1)
```
### Creating Our Own Embedding Module
```
class T(Module):
def __init__(self): self.a = torch.ones(3)
L(T().parameters())
class T(Module):
def __init__(self): self.a = nn.Parameter(torch.ones(3))
L(T().parameters())
class T(Module):
def __init__(self): self.a = nn.Linear(1, 3, bias=False)
t = T()
L(t.parameters())
type(t.a.weight)
def create_params(size):
return nn.Parameter(torch.zeros(*size).normal_(0, 0.01))
class DotProductBias(Module):
def __init__(self, n_users, n_movies, n_factors, y_range=(0,5.5)):
self.user_factors = create_params([n_users, n_factors])
self.user_bias = create_params([n_users])
self.movie_factors = create_params([n_movies, n_factors])
self.movie_bias = create_params([n_movies])
self.y_range = y_range
def forward(self, x):
users = self.user_factors[x[:,0]]
movies = self.movie_factors[x[:,1]]
res = (users*movies).sum(dim=1)
res += self.user_bias[x[:,0]] + self.movie_bias[x[:,1]]
return sigmoid_range(res, *self.y_range)
model = DotProductBias(n_users, n_movies, 50)
learn = Learner(dls, model, loss_func=MSELossFlat())
learn.fit_one_cycle(5, 5e-3, wd=0.1)
```
## Interpreting Embeddings and Biases
```
movie_bias = learn.model.movie_bias.squeeze()
idxs = movie_bias.argsort()[:5]
[dls.classes['title'][i] for i in idxs]
idxs = movie_bias.argsort(descending=True)[:5]
[dls.classes['title'][i] for i in idxs]
g = ratings.groupby('title')['rating'].count()
top_movies = g.sort_values(ascending=False).index.values[:1000]
top_idxs = tensor([learn.dls.classes['title'].o2i[m] for m in top_movies])
movie_w = learn.model.movie_factors[top_idxs].cpu().detach()
movie_pca = movie_w.pca(3)
fac0,fac1,fac2 = movie_pca.t()
idxs = np.random.choice(len(top_movies), 50, replace=False)
idxs = list(range(50))
X = fac0[idxs]
Y = fac2[idxs]
plt.figure(figsize=(12,12))
plt.scatter(X, Y)
for i, x, y in zip(top_movies[idxs], X, Y):
plt.text(x,y,i, color=np.random.rand(3)*0.7, fontsize=11)
plt.show()
```
### Using fastai.collab
```
learn = collab_learner(dls, n_factors=50, y_range=(0, 5.5))
learn.fit_one_cycle(5, 5e-3, wd=0.1)
learn.model
movie_bias = learn.model.i_bias.weight.squeeze()
idxs = movie_bias.argsort(descending=True)[:5]
[dls.classes['title'][i] for i in idxs]
```
### Embedding Distance
```
movie_factors = learn.model.i_weight.weight
idx = dls.classes['title'].o2i['Silence of the Lambs, The (1991)']
distances = nn.CosineSimilarity(dim=1)(movie_factors, movie_factors[idx][None])
idx = distances.argsort(descending=True)[1]
dls.classes['title'][idx]
```
## Bootstrapping a Collaborative Filtering Model
## Deep Learning for Collaborative Filtering
```
embs = get_emb_sz(dls)
embs
class CollabNN(Module):
def __init__(self, user_sz, item_sz, y_range=(0,5.5), n_act=100):
self.user_factors = Embedding(*user_sz)
self.item_factors = Embedding(*item_sz)
self.layers = nn.Sequential(
nn.Linear(user_sz[1]+item_sz[1], n_act),
nn.ReLU(),
nn.Linear(n_act, 1))
self.y_range = y_range
def forward(self, x):
embs = self.user_factors(x[:,0]),self.item_factors(x[:,1])
x = self.layers(torch.cat(embs, dim=1))
return sigmoid_range(x, *self.y_range)
model = CollabNN(*embs)
learn = Learner(dls, model, loss_func=MSELossFlat())
learn.fit_one_cycle(5, 5e-3, wd=0.01)
learn = collab_learner(dls, use_nn=True, y_range=(0, 5.5), layers=[100,50])
learn.fit_one_cycle(5, 5e-3, wd=0.1)
@delegates(TabularModel)
class EmbeddingNN(TabularModel):
def __init__(self, emb_szs, layers, **kwargs):
super().__init__(emb_szs, layers=layers, n_cont=0, out_sz=1, **kwargs)
```
### Sidebar: kwargs and Delegates
### End sidebar
## Conclusion
## Questionnaire
1. What problem does collaborative filtering solve?
1. How does it solve it?
1. Why might a collaborative filtering predictive model fail to be a very useful recommendation system?
1. What does a crosstab representation of collaborative filtering data look like?
1. Write the code to create a crosstab representation of the MovieLens data (you might need to do some web searching!).
1. What is a latent factor? Why is it "latent"?
1. What is a dot product? Calculate a dot product manually using pure Python with lists.
1. What does `pandas.DataFrame.merge` do?
1. What is an embedding matrix?
1. What is the relationship between an embedding and a matrix of one-hot-encoded vectors?
1. Why do we need `Embedding` if we could use one-hot-encoded vectors for the same thing?
1. What does an embedding contain before we start training (assuming we're not using a pretained model)?
1. Create a class (without peeking, if possible!) and use it.
1. What does `x[:,0]` return?
1. Rewrite the `DotProduct` class (without peeking, if possible!) and train a model with it.
1. What is a good loss function to use for MovieLens? Why?
1. What would happen if we used cross-entropy loss with MovieLens? How would we need to change the model?
1. What is the use of bias in a dot product model?
1. What is another name for weight decay?
1. Write the equation for weight decay (without peeking!).
1. Write the equation for the gradient of weight decay. Why does it help reduce weights?
1. Why does reducing weights lead to better generalization?
1. What does `argsort` do in PyTorch?
1. Does sorting the movie biases give the same result as averaging overall movie ratings by movie? Why/why not?
1. How do you print the names and details of the layers in a model?
1. What is the "bootstrapping problem" in collaborative filtering?
1. How could you deal with the bootstrapping problem for new users? For new movies?
1. How can feedback loops impact collaborative filtering systems?
1. When using a neural network in collaborative filtering, why can we have different numbers of factors for movies and users?
1. Why is there an `nn.Sequential` in the `CollabNN` model?
1. What kind of model should we use if we want to add metadata about users and items, or information such as date and time, to a collaborative filtering model?
### Further Research
1. Take a look at all the differences between the `Embedding` version of `DotProductBias` and the `create_params` version, and try to understand why each of those changes is required. If you're not sure, try reverting each change to see what happens. (NB: even the type of brackets used in `forward` has changed!)
1. Find three other areas where collaborative filtering is being used, and find out what the pros and cons of this approach are in those areas.
1. Complete this notebook using the full MovieLens dataset, and compare your results to online benchmarks. See if you can improve your accuracy. Look on the book's website and the fast.ai forum for ideas. Note that there are more columns in the full dataset—see if you can use those too (the next chapter might give you ideas).
1. Create a model for MovieLens that works with cross-entropy loss, and compare it to the model in this chapter.
| github_jupyter |
# Fashion MNIST
-[Rishit Dagli](rishit.tech)
## About Me
[Twitter](https://twitter.com/rishit_dagli)
[GitHub](https://github.com/Rishit-dagli)
[Medium](https://medium.com/@rishit.dagli)
Note: Please unzip the files with the code in the cell below before you move forward
```
# !unzip /Fashion MNIST/fashionmnist.zip
```
# Some imports
```
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
```
# Load the data
Let's now load the data using `pandas`
```
data_train = pd.read_csv('Fashion MNIST/fashion-mnist_train.csv')
data_test = pd.read_csv('Fashion MNIST/fashion-mnist_test.csv')
```
# Some preprocessing
Let's now specify the size of our image
```
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
```
Now we will split the feaures and labels
```
x_train = np.array(data_train.iloc[:, 1:])
y_train = np.array(data_train.iloc[:, 0])
y_train
x_test = np.array(data_test.iloc[:, 1:])
y_test = np.array(data_test.iloc[:, 0])
```
It is importnat to resize our data, we have 60000 train samples and 10000 test samples
```
x_train = x_train.reshape(60000, 28, 28, 1)
x_train = x_train / 255.0
x_test = x_test.reshape(10000, 28, 28, 1)
x_test = x_test/255.0
```
# Model
Let's define a few hyper parameters
```
num_classes = 10
epochs = 10
img_rows, img_cols = 28, 28
optimizer = 'adam'
loss = 'sparse_categorical_crossentropy'
```
And finally the model now
```
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
```
You can try experimenting with different optimizers
```
model.compile(optimizer = optimizer,
loss = loss,
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
epochs = epochs)
```
# Evaluating the model
Let's see the test accuracy of our model
```
test_loss, test_acc = model.evaluate(x_test,
y_test)
test_acc
```
# Seeing inside convolutions
Have fun with this code adapted from [Laurence Moroney](http://www.laurencemoroney.com/) which enables us too see an image being processed inside a CNN
```
import matplotlib.pyplot as plt
f, axarr = plt.subplots(3,4)
FIRST_IMAGE=0
SECOND_IMAGE=7
THIRD_IMAGE=26
CONVOLUTION_NUMBER = 1
from tensorflow.keras import models
layer_outputs = [layer.output for layer in model.layers]
activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)
for x in range(0,4):
f1 = activation_model.predict(x_test[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[0,x].grid(False)
f2 = activation_model.predict(x_test[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[1,x].grid(False)
f3 = activation_model.predict(x_test[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[2,x].grid(False)
```

```
predicted_classes = model.predict_classes(x_test)
y_true = data_test.iloc[:, 0]
correct = np.nonzero(predicted_classes==y_true)
incorrect = np.nonzero(predicted_classes!=y_true)
correct
```
A sample image
```
plt.imshow(x_test[0].reshape(28,28))
```
## Conclusion
We performed extremely well with a train accuracy of 97 % and a test of 93 % we did this in just 10 epochs which took us as low as 80 seconds to train! This is some pretty good success
| github_jupyter |
```
%matplotlib inline
%pdb on
from pprint import pprint
import itertools
import numpy
from metrics import wer, cew, ssr, average, hreff
import montecarlo
import market
import dms
import withdrawal
import mortality
from portfolio import Portfolio
import harvesting
from decimal import Decimal as D
import plot
from matplotlib import pyplot as plt
import pandas
survival_function = mortality.make_mortality(mortality.ANNUITY_2000)
HREFF_FLOOR = 3
def run_one_new(dataset, strategy, hreff_floor=HREFF_FLOOR, debug_cashflows=False):
p = Portfolio(600000, 400000)
h = strategy(p).harvest()
h.send(None)
#w = withdrawal.VPW(p, h, years_left=35).withdrawals()
w = withdrawal.ConstantDollar(p, h).withdrawals()
def add_debug_log(annual):
if debug_cashflows:
debug.append({
'Returns' : annual.returns,
'Withdraw' : annual.withdraw_n,
'Portfolio' : annual.portfolio_n,
'Withdraw Orig' : annual.withdraw_pct_orig,
})
cashflows = []
returns = []
debug = []
# Initial withdrawal at start of retirement
annual = w.send(None)
add_debug_log(annual)
cashflows.append(annual.withdraw_pct_orig)
for i in dataset:
annual = w.send(i)
add_debug_log(annual)
returns.append(annual.returns)
cashflows.append(annual.withdraw_pct_orig)
if debug_cashflows:
pandas.DataFrame(data=debug).to_csv('WERCOMP-cashflows.csv')
w = wer(cashflows, returns)
h = hreff(cashflows, returns, floor=D(hreff_floor)/100)
return (w, h, returns)
def run_bootstrap(strategies, runs=1000, dataset=montecarlo.historical[60]):
wer_df = pandas.DataFrame(index=numpy.arange(0, runs), columns=[s.__name__ for s in strategies])
hreff_df = pandas.DataFrame(index=numpy.arange(0, runs), columns=[s.__name__ for s in strategies])
returns_df = pandas.DataFrame(index=numpy.arange(0, runs), columns=[i for i in range(60)])
for i in range(runs):
# First generate a lifespan, so we know how many years of returns we need to generate
lifespan = mortality.gen_lifespan(mortality.DEFAULT_COUPLE, survival_function)
returns = (dataset.random_year() for y in range(lifespan))
pairs = zip(strategies, itertools.tee(returns, len(strategies)))
for (s, r) in pairs:
(wer, hreff, returns) = run_one_new(r, s)
wer_df.loc[i][s.__name__] = wer
hreff_df.loc[i][s.__name__] = hreff
returns_df.loc[i] = returns + [None for _ in range(60-lifespan)]
return (wer_df, hreff_df, returns_df)
def run_sequential(strategies, dataset):
runs = len(dataset)
wer_df = pandas.DataFrame(index=numpy.arange(0, runs), columns=[s.__name__ for s in strategies])
hreff_df = pandas.DataFrame(index=numpy.arange(0, runs), columns=[s.__name__ for s in strategies])
returns_df = pandas.DataFrame(index=numpy.arange(0, runs), columns=[i for i in range(60)])
lifespan = 30
for i in range(runs - lifespan):
returns = itertools.islice(dataset.iter_from(dataset.start_year + i), lifespan)
pairs = zip(strategies, itertools.tee(returns, len(strategies)))
for (s, r) in pairs:
(wer, hreff, returns) = run_one_new(r, s)
wer_df.loc[i][s.__name__] = wer
hreff_df.loc[i][s.__name__] = hreff
returns_df.loc[i] = returns + [None for _ in range(60-lifespan)]
return (wer_df, hreff_df, returns_df)
strategies = [
harvesting.N_60_RebalanceHarvesting,
harvesting.N_100_RebalanceHarvesting,
harvesting.PrimeHarvesting,
harvesting.AltPrimeHarvesting,
harvesting.BondsFirst,
harvesting.OmegaNot,
harvesting.Weiss,
harvesting.AgeBased_100,
harvesting.AgeBased_110,
harvesting.AgeBased_120,
harvesting.Glidepath,
harvesting.InverseGlidepath,
harvesting.ActuarialHarvesting,
]
def make_report(df):
d = [(col, series.mean()) for (col, series) in df.iteritems()]
new_df = pandas.Series(dict(d))
new_df.sort_values(inplace=True)
return new_df
wer_df, hreff_df, returns_df = run_sequential(strategies, market.Returns_US_1871())
#make_report(wer_df).plot(kind='bar', title='US - Historical - WER')
make_report(hreff_df).plot(kind='bar', title='US - Historical - HREFF-%d' % HREFF_FLOOR)
wer_df, hreff_df, returns_df = run_sequential(strategies, market.UK1900())
#make_report(wer_df).plot(kind='bar', title='UK - Historical - WER')
make_report(hreff_df).plot(kind='bar', title='UK - Historical - HREFF-%d' % HREFF_FLOOR)
wer_df, hreff_df, returns_df = run_sequential(strategies, market.Japan_1957())
#make_report(wer_df).plot(kind='bar', title='Japan - Historical - WER')
make_report(hreff_df).plot(kind='bar', title='Japan - Historical - HREFF-%d' % HREFF_FLOOR)
wer_df, hreff_df, returns_df = run_bootstrap(strategies, runs=1000, dataset=market.Returns_US_1871())
#make_report(wer_df).plot(kind='bar', title='US - Monte Carlo - WER')
make_report(hreff_df).plot(kind='bar', title='US - Monte Carlo - HREFF-%d' % HREFF_FLOOR)
wer_df, hreff_df, returns_df = run_bootstrap(strategies, runs=1000, dataset=market.UK1900())
#make_report(wer_df).plot(kind='bar', title='UK - Monte Carlo - WER')
make_report(hreff_df).plot(kind='bar', title='UK - Monte Carlo - HREFF-%d' % HREFF_FLOOR)
wer_df, hreff_df, returns_df = run_bootstrap(strategies, runs=1000, dataset=market.Japan_1957())
#make_report(wer_df).plot(kind='bar', title='Japan - Monte Carlo - WER')
make_report(hreff_df).plot(kind='bar', title='Japan - Monte Carlo - HREFF-%d' % HREFF_FLOOR)
```
| github_jupyter |
# M-Estimators for Robust Linear Modeling
```
%matplotlib inline
from __future__ import print_function
from statsmodels.compat import lmap
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
```
* An M-estimator minimizes the function
$$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$
where $\rho$ is a symmetric function of the residuals
* The effect of $\rho$ is to reduce the influence of outliers
* $s$ is an estimate of scale.
* The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm
* We have several choices available for the weighting functions to be used
```
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
```
### Andrew's Wave
```
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
```
### Hampel's 17A
```
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
```
### Huber's t
```
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
```
### Least Squares
```
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
```
### Ramsay's Ea
```
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
```
### Trimmed Mean
```
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
```
### Tukey's Biweight
```
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
```
### Scale Estimators
* Robust estimates of the location
```
x = np.array([1, 2, 3, 4, 500])
```
* The mean is not a robust estimator of location
```
x.mean()
```
* The median, on the other hand, is a robust estimator with a breakdown point of 50%
```
np.median(x)
```
* Analagously for the scale
* The standard deviation is not robust
```
x.std()
```
Median Absolute Deviation
$$ median_i |X_i - median_j(X_j)|) $$
Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$
$$\hat{\sigma}=K \cdot MAD$$
where $K$ depends on the distribution. For the normal distribution for example,
$$K = \Phi^{-1}(.75)$$
```
stats.norm.ppf(.75)
print(x)
sm.robust.scale.mad(x)
np.array([1,2,3,4,5.]).std()
```
* The default for Robust Linear Models is MAD
* another popular choice is Huber's proposal 2
```
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDEUnivariate(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.mad(fat_tails)
sm.robust.mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
```
### Duncan's Occupational Prestige data - M-estimation for outliers
```
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.loc['minister', ['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.loc[np.abs(student) > 2])
print(infl.summary_frame().loc['minister'])
sidak = ols_model.outlier_test('sidak')
sidak.sort_values('unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
fdr.sort_values('unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
```
### Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
* Data is on the luminosity and temperature of 47 stars in the direction of Cygnus.
```
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.loc[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
```
* Why? Because M-estimators are not robust to leverage points.
```
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.loc[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sidak2.sort_values('unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
fdr2.sort_values('unadj_p', inplace=True)
print(fdr2)
```
* Let's delete that line
```
l = ax.lines[-1]
l.remove()
del l
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
```
* MM estimators are good for this type of problem, unfortunately, we don't yet have these yet.
* It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook.
```
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
%load_ext rpy2.ipython
%R library(robustbase)
%Rpush yy xx
%R mod <- lmrob(yy ~ xx);
%R params <- mod$coefficients;
%Rpull params
%R print(mod)
print(params)
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='red')
```
### Exercise: Breakdown points of M-estimator
```
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = lmap(se_loss, all_betas - beta_true)
```
#### Squared error loss
```
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
```
| github_jupyter |
# Doubly Robust Models
Basically, different ensemble models that utilize a weight model to augment the outcome model.
This notebook presents different combinations of mixing outcome and propensity models,
but since the possible combination are a lot, it does not intend to show all of them.
```
%matplotlib inline
from sklearn.linear_model import LogisticRegression, LinearRegression
from causallib.datasets import load_smoking_weight
from causallib.estimation import IPW, Standardization, StratifiedStandardization
from causallib.estimation import DoublyRobustVanilla, DoublyRobustIpFeature, DoublyRobustJoffe
from causallib.evaluation import PropensityEvaluator, OutcomeEvaluator
```
#### Data:
The effect of quitting to smoke on weight loss.
Data example is taken from [Hernan and Robins Causal Inference Book](https://www.hsph.harvard.edu/miguel-hernan/causal-inference-book/)
```
data = load_smoking_weight()
data.X.join(data.a).join(data.y).head()
```
## Vanilla Doubly Robust
Used for average outcomes.
Its individual outcome estimation is directly its outcome model one's,
but for population outcome, it corrects the observed outcome using the individual outcome prediction before taking weighted average.
```
ipw = IPW(LogisticRegression(solver="liblinear"), truncate_eps=0.05)
std = StratifiedStandardization(LinearRegression())
dr = DoublyRobustVanilla(std, ipw)
dr.fit(data.X, data.a, data.y)
```
Doubly-robust corrected population outcomes:
```
pop_outcome = dr.estimate_population_outcome(data.X, data.a, data.y)
pop_outcome
effect = dr.estimate_effect(pop_outcome[1], pop_outcome[0])
effect
```
## Doubly Robust IP-Feature
Trains a weight model, and then use its output (predicted weights) as additional features to the outcome model.
If possible (like in IPW) the entire weight-matrix (weight of each individual for each treatment value) is used,
but usually, only a weight vector (according to the actual treatment assignment) is used.
```
ipw = IPW(LogisticRegression(solver="liblinear"))
std = Standardization(LinearRegression())
dr = DoublyRobustIpFeature(std, ipw)
dr.fit(data.X, data.a, data.y)
ind_outcomes = dr.estimate_individual_outcome(data.X, data.a)
ind_outcomes.head()
effect = dr.estimate_effect(ind_outcomes[1], ind_outcomes[0],
effect_types=["diff", "ratio"])
effect
```
## Doubly Robust Joffe
This uses an importance sampling using the estimated weights.
On the first step weight model is trained and used to predict weights.
These predicted weights are then provided as `sample_weights` to the outcome model.
```
ipw = IPW(LogisticRegression(solver="liblinear"))
std = Standardization(LinearRegression())
dr = DoublyRobustJoffe(std, ipw)
dr.fit(data.X, data.a, data.y)
ind_outcomes = dr.estimate_individual_outcome(data.X, data.a)
ind_outcomes.head()
pop_outcome = dr.estimate_population_outcome(data.X, data.a)
pop_outcome
effect = dr.estimate_effect(pop_outcome[1], pop_outcome[0])
effect
```
## Confounders, Instruments and Effect Modifiers
On general there are three main types of covariates in a graphical causal model:
1. Confounders: variables that affect both the outcome and treatment
2. Instruments: variables that affect the treatment assignment but not the outcome.
3. Effect mods: variables that affect the outcome but not the treatment assignment
For a Doubly Robust model that holds both outcome model and weight (treatment assignment prediction) model,
These can specified by a list of covariates `outcome_covariates` and `weight_covariates`,
which their intersection correspond to _confounders_ and their symmetric difference are the effect modifiers and instruments, respectively.
```
# Say smoke quitting does not depend on your weight and on your age
weight_covariates = [col for col in data.X.columns
if not col.startswith("wt") and not col.startswith("age")]
ipw = IPW(LogisticRegression(solver="liblinear"))
std = Standardization(LinearRegression())
dr = DoublyRobustIpFeature(std, ipw,
weight_covariates=weight_covariates)
# By not specifying `outcome_covariates` the model will use all covariates
dr.fit(data.X, data.a, data.y);
pop_outcome = dr.estimate_population_outcome(data.X, data.a)
pop_outcome
dr.estimate_effect(pop_outcome[1], pop_outcome[0])
```
## Refitting weight model
The doubly robust model has an outcome model and a weight model.
As noted, the weight model is used to augment the outcome model,
implying the outcome model is dependent on the weight model but not vice versa.
This allows us to save computation power when having a multi-outcome problem.
Since the weight model will be the same throughout, there's no need to refit it every time the model is trained for a different outcome.
The `refit_weight_model` can be turned off by providing `False`.
This way if provided with an already fitted weight model, it won't be refitted upon repeating `fit()` calls on the Doubly Robust object.
```
ipw = IPW(LogisticRegression(solver="liblinear"), truncate_eps=0.05)
std = Standardization(LinearRegression(), encode_treatment=True)
dr = DoublyRobustVanilla(std, ipw)
```
Let's imagine we have different outcomes, `y1` and `y2`.
Calling the first fit with whatever outcome will fit the weight model, as it is not fitted yet.
However, on the second call, it will not be fitted as we provide `refit_weight_model=False`.
```
y1, y2 = data.y, data.y
dr.fit(data.X, data.a, y1) # weight model is fitted since it is not yet fitted
dr.fit(data.X, data.a, y2) # weight model is fitted since we did not specify otherwise
dr.fit(data.X, data.a, y1, refit_weight_model=False); # weight model is not fitted.
```
## Evaluation
Evaluation is performed for the inner outcome model and weight model separately
```
ipw = IPW(LogisticRegression(solver="liblinear"))
std = Standardization(LinearRegression())
dr = DoublyRobustIpFeature(std, ipw)
dr.fit(data.X, data.a, data.y);
prp_evaluator = PropensityEvaluator(dr.weight_model)
results = prp_evaluator.evaluate_simple(data.X, data.a, data.y,
plots=["roc_curve", "weight_distribution"])
results.scores.prediction_scores
out_evaluator = OutcomeEvaluator(dr)
out_evaluator._regression_metrics.pop("msle") # Outcome has negative values, so log-error is not approproate
results = out_evaluator.evaluate_simple(data.X, data.a, data.y,
plots=["common_support", "continuous_accuracy"])
results.scores
```
| github_jupyter |
```
#import necessary libraries
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
```
### Business Understanding
As a soccer lover, I'm fascinated to explore on FIFA 18 complete player dataset. I took the dataset from Kaggle (https://www.kaggle.com/thec03u5/fifa-18-demo-player-dataset)
The dataset contains player personal attributes (such as Nationality, club, photo, age, value, etc.); Player performance attributes (Overall, Potential, Aggression, Agility) and Player preferred position and ratings at all positions
##### Project Motivation
The motivation behind the project is to study and understand the soccer players collected in FIFA 18 and analyze which Club or National Team has the best-rated players, co-relate between age and overall rating, nationality, potential, etc. and the results could add value to fantasy premier league enthusiasts. I would like to attempt the following questions to be addressed
1. Which Country has the maximum number of Soccer Players collected in FIFA 18 and List the top 20 countries?
2. What is the age distribution of the FIFA 18 Players?
3. Identify the top 10 clubs with the highest total player market value, and the highest average player wage?
4. Identify the best Squad?
5. Do Correlation between Age, Overall, Potential, Position, Club, Nationality, Special vs Value/Wage
### Data Understanding
I will use FIFA 18 Complete Player Dataset from kaggle. For this project, I will use the CompleteDataset.csv which contains all the information of the Players in FIFA 18.
```
# Read in the Complete Dataset
CompleteDataset = pd.read_csv('./CompleteDataset.csv')
CompleteDataset.head()
# Get the Basic info of the dataset
CompleteDataset.describe()
CompleteDataset.info()
num_rows = CompleteDataset.shape[0] #Provide the number of rows in the dataset
num_cols = CompleteDataset.shape[1] #Provide the number of columns in the dataset
print("Row number: {}".format(num_rows))
print("Column number: {}".format(num_cols))
# To check the column names in the dataset
CompleteDataset.columns
```
### Data Cleaning and Preparation
There are a few steps need to be adopted before using the dataset for exploration. The steps include are the following:
1. Leaving or dropping unused columns
2. Checking columns with missing values
2. Transforming string values into numbers for Value & Wage
3. One-Hot encoding for Categorical variables such as Club, Nationality, Preferred Positions etc.,
```
# Data Preparation Step 1: Drop the columns which will not be used in this project
CompleteDataset.drop('Photo', axis = 1,inplace=True)
CompleteDataset.drop('Flag', axis = 1,inplace=True)
CompleteDataset.drop('Club Logo', axis = 1,inplace=True)
CompleteDataset.drop('ID', axis = 1,inplace=True)
CompleteDataset.head()
# Data Preparation Step 2: Check whether any column has missing values
columns_with_missing_values = set(CompleteDataset.columns[CompleteDataset.isnull().mean()!=0])
print(columns_with_missing_values)
```
Coincidentally most of these columns with missing values are ratings at all positions. These columns are not used for my objectives except Club. For the club, a player with missing value in 'Club', the most likely explanation is that this player doesn't fit into any club for the moment meaning he is still vacant for transfer. Any club attentive in him may sign this player without paying any transfer fee
```
# Supporting function to convert string values into numbers
def str2number(amount):
"""
This function perform convertion from amount values in string type to float type numbers
Parameter:
amount(str): Amount values in string type with M & K as Abbreviation for Million and Thousands
Returns:
float: A float number represents the numerical value of the input parameter amount(str)
"""
if amount[-1] == 'M':
return float(amount[1:-1])*1000000
elif amount[-1] == 'K':
return float(amount[1:-1])*1000
else:
return float(amount[1:])
# Data Preparation Step 3: Convert string values into numbers for Value & Wage
# Create New Wage_Number column to store numerical type Wage info
CompleteDataset['Wage_Number'] = CompleteDataset['Wage'].map(lambda x: str2number(x))
#Create New Value_Number column to store numerical type Value info
CompleteDataset['Value_Number'] = CompleteDataset['Value'].map(lambda x: str2number(x))
# Data Preparation Step 4: One-Hot Encoding for Categorical variables such as Club, Nationality, Preferred Positions
# Select only one preferred position (first one) and stored in New 'Preferred Position' column
CompleteDataset['Preferred Position'] = CompleteDataset['Preferred Positions'].str.split().str[0]
# One-hot encode the feature: "Club" , "Nationality" and "Preferred Position"
le = LabelEncoder()
CompleteDataset['Club_onehot_encode'] = le.fit_transform(CompleteDataset['Club'].astype(str))
CompleteDataset['Nationality_onehot_encode'] = le.fit_transform(CompleteDataset['Nationality'].astype(str))
CompleteDataset['Preferred_Position_onehot_encode'] = le.fit_transform(CompleteDataset['Preferred Position'].astype(str))
```
### Addressing my objectives
Post the data cleaning and processing, I would like to attempt the key business questions jotted above
```
# 1. Which Country has the maximum number of Soccer Players collected in FIFA 18 and List the top 20 countries?
nationality_vals = CompleteDataset.Nationality.value_counts()
print(nationality_vals.head(20))
(nationality_vals.head(20)/CompleteDataset.shape[0]).plot(kind="bar");
plt.title("Top 20 FIFA 18 Players Nationality Distribution(in percentage)");
```
From the above result and plot; England, Germany, Spain, and France are the top 4 countries that have a maximum number of players in FIFA 18.
It’s sensible to see the results considering Barclays Premier League, La Liga, Bundesliga were among the 5 Football Leagues in Europe.
These leagues signify the finest football in Europe drawing maximum football stars and a lot attention, fuel soccer growth and fascination with the sports. The fifth and sixth ranking is Argentina and Brazil. My favorite players belong to Argentina and Brazil.
```
# 2. What is the age distribution of the FIFA 18 Players?
age_vals = CompleteDataset.Age.value_counts()
print(age_vals.head(20))
(age_vals.head(20)/CompleteDataset.shape[0]).plot(kind="bar");
plt.title("FIFA 18 Players Age Distribution (in percentage)");
```
It’s evident that the maximum number of players is at 25 years of age. Players older than thirty years of age are declining and it makes sense that this particular sport require more fitness versus other sports. Thus the number for players elder than 30 drops with the growth of age.
```
# 3. Identify the top clubs with the highest total player market value, and the highest average player wage?
Value_Wage_DF = CompleteDataset[["Name", "Club", "Value_Number", "Wage_Number"]]
Value_Wage_DF.head()
# Find out the top 10 clubs with the highest average wage
Value_Wage_DF.groupby("Club")["Wage_Number"].mean().sort_values(ascending=False).head(10).plot(kind="bar");
plt.title("Top 10 clubs with the highest average wage");
# Find out the top 10 clubs with the highest total player market value
Value_Wage_DF.groupby("Club")["Value_Number"].sum().sort_values(ascending=False).head(10).plot(kind="bar");
plt.title("Top 10 clubs with the highest total Value");
```
FC Barcelona, Real Madrid CF, and FC Bayern Munich are the highest-earning players comparing to any other clubs.
```
# 4. Identify the best squad
BestSquad_DF = CompleteDataset[['Name', 'Age', 'Overall', 'Potential', 'Preferred Position']]
BestSquad_DF.head()
```
I feel that the above analysis would be beneficial in choosing the best squad based on the player overall value.
I chose the best squad for two formations, Formation 4–3–3 and Formation 3–4–1–2
In addition, I remember for example in FIFA Ultimate Team Mode, the gamer needs to choose their team squad and try to collect the best players to win the matches. This sort of analytics could be potential gamechanger.
```
def find_best_squad(position):
"""
This function perform selection of the player with highest Overall Value for each provided position
Parameter:
position(str): a particular position of a certain footbal formation
Returns:
Position: The position from Input Parameter
Player: The Best Player Name for this Position
Overall: The Overall Value for this Best Player
"""
BestSquad_DF_copy = BestSquad_DF.copy()
BestSquad = []
for i in position:
BestSquad.append([i,BestSquad_DF_copy.loc[[BestSquad_DF_copy[BestSquad_DF_copy['Preferred Position'] == i]['Overall'].idxmax()]]['Name'].to_string(index = False), BestSquad_DF_copy[BestSquad_DF_copy['Preferred Position'] == i]['Overall'].max()])
BestSquad_DF_copy.drop(BestSquad_DF_copy[BestSquad_DF_copy['Preferred Position'] == i]['Overall'].idxmax(), inplace = True)
return pd.DataFrame(np.array(BestSquad).reshape(11,3), columns = ['Position', 'Player', 'Overall']).to_string(index = False)
# Formation 433
squad_Formation433 = ['GK', 'LB', 'CB', 'CB', 'RB', 'LM', 'CDM', 'RM', 'LW', 'ST', 'RW']
print ('Best Squad of Formation 4-3-3')
print (find_best_squad(squad_Formation433))
# Formation 3412
squad_Formation3412 = ['GK', 'CB', 'CB', 'CB', 'LM', 'CM', 'CM', 'RM', 'CAM', 'ST', 'ST']
print ('Best Squad of Formation 3-4-1-2')
print (find_best_squad(squad_Formation3412))
# 5. Do Correlation between Age, Overall, Potential, Position, Club, Nationality, Special vs Value/Wage
Correlation_DF = CompleteDataset[['Name', 'Age', 'Overall', 'Potential', 'Preferred_Position_onehot_encode', 'Club_onehot_encode', 'Nationality_onehot_encode', 'Special', 'Value_Number', 'Wage_Number']]
Correlation_DF.corr()
colormap = plt.cm.inferno
plt.figure(figsize=(16,12))
plt.title('Correlation between Age, Overall, Potential, Position, Club, Nationality, Special vs Value/Wage', y=1.05, size=15)
sns.heatmap(Correlation_DF.corr(),linewidths=0.1,vmax=1.0,
square=True, cmap=colormap, linecolor='white', annot=True)
```
As per the above heatmap, Overall and Potential are positively related to Wage & Value. Special have positive correlation as well with Wage & Value.
On the other side Club, Nationality and Position are not so important characteristic features that co-relate with Wage & Value.
Besides that, additionally found that Wage and Value are highly correlated to each other which is quite rational.
| github_jupyter |
```
!pip install semantic-text-similarity
from semantic_text_similarity.models import WebBertSimilarity
from semantic_text_similarity.models import ClinicalBertSimilarity
web_model = WebBertSimilarity(device='cpu', batch_size=10) #defaults to GPU prediction
# clinical_model = ClinicalBertSimilarity(device='cuda', batch_size=10) #defaults to GPU prediction
import numpy as np
import pdb
import json
class GFG:
def __init__(self,graph):
self.graph = graph
self.ppl = len(graph)
self.jobs = len(graph[0])
def bpm(self, u, matchR, seen):
for v in range(self.jobs):
if self.graph[u][v] and seen[v] == False:
seen[v] = True
if matchR[v] == -1 or self.bpm(matchR[v], matchR, seen):
matchR[v] = u
return True
return False
# Returns maximum number of matching
def maxBPM(self):
matchR = [-1] * self.jobs
result = 0
for i in range(self.ppl):
seen = [False] * self.jobs
if self.bpm(i, matchR, seen):
result += 1
return result, matchR
def my_lcs(string, sub):
if(len(string)<= len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if (string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
def __init__(self):
self.beta = 1.2
def calc_score(self, candidate, refs):
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
if (lcs == None):
prec.append(0)
rec.append(0)
else:
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if (prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, refs, test):
score = []
for i in range(len(refs)):
hypo = test[i]
ref = refs[i]
if (hypo == " " or hypo == ""):
score.append(0)
else:
score.append(self.calc_score([hypo], [ref]))
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
```
For evaluation of property generation models XGP and XGP-W
```
f = open('./GPT_Multiple_Output/gpt2_raw_output.json')
data = json.load(f)
name = 'gpt2_raw_output'
import json
sts_predictions_array = []
sts_predictions_array2 = []
positive_indices = []
negative_indices = []
sts_recall = 0.0
sts_precision = 0.0
sts_fscore = 0.0
count = 0
sts_threshold = 3
counter = []
for k in range(len(data["Input"])):
if (k % 500 == 0):
print(k)
l1 = data["Gold"][k]
l2 = data["Output"][k]
if data["Correctness"][k]:
positive_indices.append(k)
# l2 = data["Output"][k]
else:
negative_indices.append(k)
# l2 = [data["Output"][k][0]]
bipartite_graph = np.zeros((len(l1), len(l2)))
bipartite_graph_double = np.zeros((len(l1), len(l2)))
for i in range(len(l1)):
for j in range(len(l2)):
sts_score = web_model.predict([(l1[i], l2[j])])[0]
bipartite_graph_double[i][j] = sts_score
if (sts_score >= sts_threshold):
bipartite_graph[i][j] = 1
else:
bipartite_graph[i][j] = 0
g = GFG(bipartite_graph_double)
number, division_list = g.maxBPM()
# property i will be matched with division_list[i] change this comment
score0 = 0
for i in range(len(l1)):
j = -1
for k in range(len(division_list)):
if(division_list[k] == i):
j = k
break
if (j != -1):
sts_score = bipartite_graph_double[i][j]
score0 += sts_score
count += 1
counter.append(count)
sts_predictions_array2.append(sts_score)
else:
count += 1
sts_predictions_array2.append(0)
sts_predictions_array.append(score0/len(l1))
g = GFG(bipartite_graph)
number, division_list = g.maxBPM()
# property i will be matched with division_list[i] change this comment
score_recall0 = 0
score_precision0 = 0
for i in range(len(l1)):
j = -1
for k in range(len(division_list)):
if(division_list[k] == i):
j = k
break
if (j != -1):
score_recall0 += 1
score_precision0 += 1
count += 1
else:
count += 1
sts_recall += score_recall0/len(l1)
sts_precision += score_precision0/len(l2)
a0 = score_recall0/len(l1)
b0 = score_precision0/len(l2)
if (a0+b0 != 0):
sts_fscore += 2*a0*b0/(a0+b0)
# print(count)
# print(len(counter))
x = len(data["Input"])
print("STS Score==============")
print(sts_recall/x)
print(sts_precision/x)
print(sts_fscore/x)
# print(np.average(sts_predictions_array)/5)
# predictions_positive = [sts_predictions_array[i] for i in positive_indices]
# predictions_negative = [sts_predictions_array[i] for i in negative_indices]
# print(np.average(predictions_positive)/5)
# print(np.average(predictions_negative)/5)
spice_thresholder = []
cider_refs_thresholder = []
cider_test_thresholder = []
count = 0
for k in range(len(data["Input"])):
# if (k % 500 == 0):
# print(k)
l1 = data["Gold"][k]
l2 = data["Output"][k]
# correctness = data["Correctness"][k]
# if not correctness:
# l2 = [data["Output"][k][0]]
# else:
# l2 = data["Output"][k]
for i in range(len(l1)):
for j in range(len(l2)):
struct = {
"image_id": count,
"caption": l2[j]
}
cider_test_thresholder.append(struct)
struct = {
"image_id": count,
"caption": l1[i]
}
cider_refs_thresholder.append(struct)
struct = {
"image_id": count,
"test": l2[j],
"refs": [l1[i]]
}
spice_thresholder.append(struct)
count += 1
%cd cider/
with open('./data/cider_' + name + '_refs.json', 'w') as outfile:
json.dump(cider_refs_thresholder, outfile)
with open('./data/cider_' + name + '_test.json', 'w') as outfile:
json.dump(cider_test_thresholder, outfile)
with open('../spice/spice_' + name + '.json', 'w') as outfile:
json.dump(spice_thresholder, outfile)
params = {
"pathToData" : "data/",
"refName" : 'cider_' + name + '_refs.json',
"candName" : 'cider_' + name + '_test.json',
"resultFile" : 'cider_' + name + '_results.json',
"idf" : "coco-val-df"
}
with open('params.json', 'w') as outfile:
json.dump(params, outfile)
!python2 cidereval.py
file2 = open('./cider_' + name + '_results.json')
cider_output = json.load(file2)
%cd ../meteor/
write_file = open(name + "_meteor_refs", "w")
for i in range(len(cider_refs_thresholder)):
new_line = cider_refs_thresholder[i]['caption'].replace("\n", " ") + " \n"
write_file.write(new_line)
write_file.close()
write_file2 = open(name + "_meteor_test", "w")
for i in range(len(cider_test_thresholder)):
if (cider_test_thresholder[i]['caption'] == "" or cider_test_thresholder[i]['caption'] == " "):
new_line = "empty \n"
else:
new_line = cider_test_thresholder[i]['caption'].replace("\n", " ") + " \n"
write_file2.write(new_line)
write_file2.close()
meteor_scores = !java -Xmx2G -jar meteor-1.5.jar ./gpt2_raw_output_meteor_test ./gpt2_raw_output_meteor_refs -l en -norm -a data/paraphrase-en.gz -q
meteor_scores = [float(meteor_scores[i]) for i in range(len(meteor_scores))]
meteor_scores[-1]
%cd ../spice/
#in spice directory
!java -Xmx8G -jar spice-1.0.jar spice_gpt2_raw_output.json -cache ./cache -out spice_gpt2_raw_output_output.json
file2 = open('./spice_' + name + '_output.json')
spice_output = json.load(file2)
len(spice_output)
rouge_test = [cider_test_thresholder[i]['caption'] for i in range(len(cider_test_thresholder))]
rouge_refs = [cider_refs_thresholder[i]['caption'] for i in range(len(cider_refs_thresholder))]
r = Rouge()
rouge_scores = r.compute_score(rouge_refs, rouge_test)
rouge_scores[0]
import json
spice_recall = 0.0
spice_precision = 0.0
spice_fscore = 0.0
cider_recall = 0.0
cider_precision = 0.0
cider_fscore = 0.0
meteor_recall = 0.0
meteor_precision = 0.0
meteor_fscore = 0.0
rouge_recall = 0.0
rouge_precision = 0.0
rouge_fscore = 0.0
count1 = 0
count = 0
spice_threshold = 0.4
cider_threshold = 3
meteor_threshold = 0.3
rouge_threshold = 0.3
counter = []
for k in range(len(data["Input"])):
if (k % 500 == 0):
print(k)
l1 = data["Gold"][k]
l2 = data["Output"][k]
bipartite_graph = np.zeros((len(l1), len(l2)))
bipartite_graph_double_spice = np.zeros((len(l1), len(l2)))
bipartite_graph_double_meteor = np.zeros((len(l1), len(l2)))
bipartite_graph_double_rouge = np.zeros((len(l1), len(l2)))
for i in range(len(l1)):
for j in range(len(l2)):
cider_score = cider_output['CIDEr'][count1]
meteor_score = meteor_scores[count1]
rouge_score = rouge_scores[1][count1]
spice_score = spice_output[count1]['scores']['All']['f']
count1 += 1
if (spice_score >= spice_threshold):
bipartite_graph_double_spice[i][j] = 1
else:
bipartite_graph_double_spice[i][j] = 0
if (cider_score >= cider_threshold):
bipartite_graph[i][j] = 1
else:
bipartite_graph[i][j] = 0
if (meteor_score >= meteor_threshold):
bipartite_graph_double_meteor[i][j] = 1
else:
bipartite_graph_double_meteor[i][j] = 0
if (rouge_score >= rouge_threshold):
bipartite_graph_double_rouge[i][j] = 1
else:
bipartite_graph_double_rouge[i][j] = 0
g = GFG(bipartite_graph_double_spice)
number, division_list = g.maxBPM()
score_recall1 = 0
score_precision1 = 0
for i in range(len(l1)):
j = -1
for k in range(len(division_list)):
if (division_list[k] == i):
j = k
break
if (j != -1):
score_recall1 += 1
score_precision1 += 1
count += 1
else:
count += 1
spice_recall += score_recall1/len(l1)
spice_precision += score_precision1/len(l2)
a1 = score_recall1/len(l1)
b1 = score_precision1/len(l2)
if (a1+b1 != 0):
spice_fscore += 2*a1*b1/(a1+b1)
g = GFG(bipartite_graph)
number, division_list = g.maxBPM()
score_recall2 = 0
score_precision2 = 0
for i in range(len(l1)):
j = -1
for k in range(len(division_list)):
if (division_list[k] == i):
j = k
break
if (j != -1):
score_recall2 += 1
score_precision2 += 1
count += 1
else:
count += 1
cider_recall += score_recall2/len(l1)
cider_precision += score_precision2/len(l2)
a2 = score_recall2/len(l1)
b2 = score_precision2/len(l2)
if (a2+b2 != 0):
cider_fscore += 2*a2*b2/(a2+b2)
g = GFG(bipartite_graph_double_meteor)
number, division_list = g.maxBPM()
score_recall3 = 0
score_precision3 = 0
for i in range(len(l1)):
j = -1
for k in range(len(division_list)):
if (division_list[k] == i):
j = k
break
if (j != -1):
score_recall3 += 1
score_precision3 += 1
count += 1
else:
count += 1
meteor_recall += score_recall3/len(l1)
meteor_precision += score_precision3/len(l2)
a2 = score_recall3/len(l1)
b2 = score_precision3/len(l2)
if (a2+b2 != 0):
meteor_fscore += 2*a2*b2/(a2+b2)
g = GFG(bipartite_graph_double_rouge)
number, division_list = g.maxBPM()
score_recall4 = 0
score_precision4 = 0
for i in range(len(l1)):
j = -1
for k in range(len(division_list)):
if (division_list[k] == i):
j = k
break
if (j != -1):
score_recall4 += 1
score_precision4 += 1
count += 1
else:
count += 1
rouge_recall += score_recall4/len(l1)
rouge_precision += score_precision4/len(l2)
a2 = score_recall4/len(l1)
b2 = score_precision4/len(l2)
if (a2+b2 != 0):
rouge_fscore += 2*a2*b2/(a2+b2)
x = len(data["Input"])
print("SPICE==============")
print(spice_recall/x)
print(spice_precision/x)
print(spice_fscore/x)
print("CIDEr==============")
print(cider_recall/x)
print(cider_precision/x)
print(cider_fscore/x)
print("METEOR==============")
print(meteor_recall/x)
print(meteor_precision/x)
print(meteor_fscore/x)
print("ROUGE==============")
print(rouge_recall/x)
print(rouge_precision/x)
print(rouge_fscore/x)
```
For evaluation of free-flow generation models XGF-I and XGF-II
```
import json
f = open('../GPT_Multiple_Output/gpt2_raw_freeflow_output.json')
data = json.load(f)
name = "gpt2_raw_freeflow_output"
spice_thresholder = []
cider_refs_thresholder = []
cider_test_thresholder = []
count = 0
for k in range(len(data["input"])):
# if (k % 500 == 0):
# print(k)
l1 = data["gold"][k].replace("<EOS>","")
l2 = data["output"][k]
# l1 = gold[k]
# l2 = prime[k]
struct = {
"image_id": count,
"caption": l2
}
cider_test_thresholder.append(struct)
struct = {
"image_id": count,
"caption": l1
}
cider_refs_thresholder.append(struct)
struct = {
"image_id": count,
"test": l2,
"refs": [l1]
}
spice_thresholder.append(struct)
count += 1
%cd ../cider
with open('./data/cider_' + name + '_refs.json', 'w') as outfile:
json.dump(cider_refs_thresholder, outfile)
with open('./data/cider_' + name + '_test.json', 'w') as outfile:
json.dump(cider_test_thresholder, outfile)
with open('../spice/spice_' + name + '.json', 'w') as outfile:
json.dump(spice_thresholder, outfile)
params = {
"pathToData" : "data/",
"refName" : 'cider_' + name + '_refs.json',
"candName" : 'cider_' + name + '_test.json',
"resultFile" : 'cider_' + name + '_results.json',
"idf" : "coco-val-df"
}
with open('params.json', 'w') as outfile:
json.dump(params, outfile)
!python2 cidereval.py
file2 = open('./cider_' + name + '_results.json')
cider_output = json.load(file2)
%cd ../spice/
write_file = open(name + "_meteor_refs", "w")
for i in range(len(cider_refs_thresholder)):
new_line = cider_refs_thresholder[i]['caption'].replace("\n", " ") + " \n"
write_file.write(new_line)
write_file.close()
write_file2 = open(name + "_meteor_test", "w")
for i in range(len(cider_test_thresholder)):
if (cider_test_thresholder[i]['caption'] == "" or cider_test_thresholder[i]['caption'] == " "):
new_line = "empty \n"
else:
new_line = cider_test_thresholder[i]['caption'].replace("\n", " ") + " \n"
write_file2.write(new_line)
write_file2.close()
meteor_scores = !java -Xmx2G -jar meteor-1.5.jar ./gpt2_raw_freeflow_output_meteor_test ./gpt2_raw_freeflow_output_meteor_refs -l en -norm -a data/paraphrase-en.gz -q
meteor_scores = [float(meteor_scores[i]) for i in range(len(meteor_scores))]
meteor_scores[-1]
#in spice directory
!java -Xmx8G -jar spice-1.0.jar spice_gpt2_raw_freeflow_output.json -cache ./cache2 -out spice_gpt2_raw_freeflow_output_output.json
file2 = open('./spice_' + name + '_output.json')
spice_output = json.load(file2)
len(spice_output)
rouge_test = [cider_test_thresholder[i]['caption'] for i in range(len(cider_test_thresholder))]
rouge_refs = [cider_refs_thresholder[i]['caption'] for i in range(len(cider_refs_thresholder))]
r = Rouge()
rouge_scores = r.compute_score(rouge_refs, rouge_test)
rouge_scores[0]
import json
import numpy as np
spice_predictions_array = []
cider_predictions_array = []
meteor_predictions_array = []
rouge_predictions_array = []
counter = []
for k in range(len(spice_output)):
if (k % 500 == 0):
print(k)
# l1 = data["Gold"][k]
# l2 = data["Output"][k]
# if data["Correctness"][k]:
# positive_indices.append(k)
# else:
# negative_indices.append(k)
spice_predictions_array.append(spice_output[k]['scores']['All']['f'])
cider_predictions_array.append(cider_output["CIDEr"][k])
meteor_predictions_array.append(meteor_scores[k])
rouge_predictions_array.append(rouge_scores[1][k])
# print(count)
# print(len(counter))
# # x = len(data["q_text"])
print("SPICE==============")
print(np.average(spice_predictions_array))
print("CIDEr==============")
print(np.average(cider_predictions_array)/10)
print("METEOR==============")
print(np.average(meteor_predictions_array))
print("ROUGE==============")
print(np.average(rouge_predictions_array))
sts_bert_output = [web_model.predict([(data['gold'][i], data['output'][i])])[0] for i in range(len(data['input']))]
print("STS-BERT===========")
print(np.average(sts_bert_output)/5)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import os,glob
from matplotlib import pyplot as plt
def modify_topics(t):
'''
Create a field/column with a list of the topics for that talk
Modify the topics csv so that the title of the Talk will be split into the appropriate dates
create a one hot encoding for the topics
- so it becomes easy to search for specific topics, or topics for specific speakers
'''
# just remove the NaN
# topics_columns = t.fillna(0).iloc[:,range(1,24)].values
topics_columns = t.fillna(0).iloc[:,1:].values
topics_columns = [[x for x in row if x != 0] for row in topics_columns]
t['topic_lists'] = topics_columns
colon = np.array([x[-8] == ':' for x in t[t.columns[0]]])
assert (colon.astype(int).sum() / t.shape[0]) == 1.0,'there are places where the colon isnt the -8th element in string'
t['Title'] = [x[:-8] for x in t[t.columns[0]]]
t['Year'] = [int(x[-7:-3]) for x in t[t.columns[0]]]
t['Month'] = [int(x[-2:]) for x in t[t.columns[0]]]
#create one hot encoding for topics
# this was a really fast process
all_topics = set()
for i in range(0,23):
all_topics = all_topics.union(set(t[str(i)].unique()))
#print(len(all_topics))
all_topics.remove(np.nan)
#print(len(all_topics))
z = np.zeros((t.shape[0],len(all_topics)))
Z = pd.DataFrame(z,columns = sorted(list(all_topics)))
#print(t.shape,Z.shape)
t = pd.concat([t,Z],axis=1)
#print(t.shape)
# my algorithm for the one hot encoding will be to iterate through the rows,
#an for the list of topics for that row to add one in the appropriate column
# this ran in just a few seconds
column_names = list(t.columns)
for i in range(t.shape[0]):
individual_topics = t.iloc[i]['topic_lists']
for topic in individual_topics:
t.iloc[i,column_names.index(topic)] += 1
#note that temples and temple have the exact same talks associated, so it's useless to keep both
t.drop(columns='temples',inplace=True)
return t
def cols(temp):
l = [x for x in temp.columns]
print(l)
#t for topics
t = pd.read_csv('data/topic_data.csv')
print(t.shape)
print(t.columns)
t = modify_topics(t)
print('t shape after one hot encoding topics',t.shape)
# t.tail(10)
#s for summary
s = pd.read_csv('summary2.csv',index_col=0)
print(s.shape)
s.rename(columns={'Month':'Month_letter'},inplace=True)
print(s.columns)
s['Month'] = s['Month_letter'].map({'A':4,'O':10})
#s for summary
# s2 = pd.read_csv('data/wrongsummary.csv',index_col=0)
# print(s2.shape)
# s2.rename(columns={'Month':'Month_letter'},inplace=True)
# print(s2.columns)
# s2['Month'] = s2['Month_letter'].map({'A':4,'O':10})
# s2.equals(s)
#check to se if the talk by elder eyring "try,try,try" appears as such instead of "try"
# s.loc[(s.Year == 2018) & (s.Month == 10)]
s.Year.min(),t.Year.min()
#drop the columns where s is less than 1971 to prep for merge between s and t
s.drop(index=s.loc[s.Year < 1971].index,inplace=True)
t.drop(columns=[str(x) for x in range(23)],inplace=True)
print('t.shape',t.shape,'s.shape',s.shape)
new = s.merge(t,how='inner',left_on=['Title','Year','Month'],right_on=['Title','Year','Month'])
print(new.shape)
```
# there would be 303 + 7 = 310 columns but there are 3 columns repeated
so 310 - 3 = 307 columns makes sense
```
# new.loc[(new.Year == 2018) & (new.Month == 10) & (new.Speaker == 'Henry B. Eyring')]
new.to_csv('merged_summary_topics.csv',index=False)
new2 = pd.read_csv('merged_summary_topics.csv')
# new2.loc[(new2.Year == 2018) & (new2.Month == 10) & (new2.Speaker == 'Henry B. Eyring')]
new.equals(new2)
new2.shape
new.Speaker.value_counts().iloc[:20]
x = new.Speaker.value_counts().iloc[:20]
x.sort_values(ascending=True,inplace=True)
fig = plt.figure(figsize=(4*2,7))
x.plot(kind='barh')
plt.xlabel('Number of Talks')
plt.title('Most Common Speakers')
plt.savefig('Most Common Speakers.png',dpi=300)
plt.tight_layout()
plt.show()
def most_common_talks(speaker):
y = new.loc[new.Speaker == speaker]
# the topics are the 9th column and on
z = y.iloc[:,9:]
tcounts = z.sum()
tcounts.sort_values(ascending=False,inplace=True)
fig = plt.figure(figsize=(5,7))
temp = tcounts.head(20)
temp.sort_values(ascending=True,inplace=True)
temp.plot(kind='barh')
plt.title(f'Most Common Topics for {speaker}')
plt.show()
new.Speaker.value_counts().iloc[:20]
# most_common_talks('Thomas S. Monson')
# most_common_talks('Russell M. Nelson')
def plot_two_speaker_topics(list_speakers,figure_title):
fig,ax = plt.subplots(1,2,figsize=(4*2,4*2))
speaker = list_speakers[0]
y = new.loc[new.Speaker == speaker]
# the topics are the 9th column and on
z = y.iloc[:,9:]
tcounts = z.sum()
tcounts.sort_values(ascending=False,inplace=True)
temp = tcounts.head(20)
temp.sort_values(ascending=True,inplace=True)
temp.plot(kind='barh',ax=ax[0])
ax[0].set_title(f'Most Common Topics for\n{speaker}')
speaker = list_speakers[1]
y = new.loc[new.Speaker == speaker]
# the topics are the 9th column and on
z = y.iloc[:,9:]
tcounts = z.sum()
tcounts.sort_values(ascending=False,inplace=True)
temp = tcounts.head(20)
temp.sort_values(ascending=True,inplace=True)
temp.plot(kind='barh',ax=ax[1])
ax[1].set_title(f'Most Common Topics for\n{speaker}')
plt.tight_layout()
plt.savefig(figure_title,dpi=300)
plt.show()
os.listdir()
plot_two_speaker_topics(['Thomas S. Monson','Russell M. Nelson'],'Monson_Nelson_most_common_talks.png')
plot_two_speaker_topics(['Jeffrey R. Holland','Dieter F. Uchtdorf'],'Holland_Uchtdorf_most_common_talks.png')
```
# look at some individual talks and the topics for that talk
```
# dict(new.Speaker.value_counts())
# I recently just listened to a talk by Elder Christofferson called "when thou art converted"
keep = ['Year','Month_letter','Month','Title','topic_lists']
new.loc[(new.Speaker == 'D. Todd Christofferson')][keep].sort_values(by=['Year','Month']
,ascending=[False,True],inplace=False)
index_ = 2373
print(new.iloc[index_,:9])
# print('\n\n',new.iloc[index_,:9].Title)
print('\n\n',new.iloc[index_,:9].Kicker)
new.iloc[index_,:9].topic_lists
```
| github_jupyter |
#RepresentationSpace - Discovering Interpretable GAN Controls for Architectural Image Synthesis
Using [Ganspace]( https://github.com/armaank/archlectures/ganspace) to find latent directions in a StyleGAN2 model trained trained on the [ArchML dataset](http://165.227.182.79/)
## Instructions and Setup
1) Click the play button of the blocks titled "Initialization" and wait for it to finish the initialization.
2) In the Run PCA Analysis section, choose a model, the number of PCA components and the intermediate network layer in the 'Model Specification' cell. Then click the play button to run. The defaults are ok as is. This block will take a while (~5-10 mins) to run.
3) In the Explore Directions block, generate samples, play with the sliders, and name what you find. In the next block, compare the directions and generate videos.
```
%%capture
#@title Initialization - Setup
# Clone git
%tensorflow_version 1.x
%rm -rf archlectures
!git clone https://github.com/armaank/archlectures
%cd archlectures/generative/
%ls
#@title Initialization - Download Models
%%capture
%%sh
chmod 755 get_models.sh
./get_models.sh
ls
#@title Initilization - Install Requirements
%%capture
from IPython.display import Javascript
display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 200})'''))
!pip install fbpca boto3
!git submodule update --init --recursive
!python -c "import nltk; nltk.download('wordnet')"
%cd ./ganspace/
from IPython.utils import io
import torch
import PIL
import numpy as np
import ipywidgets as widgets
from PIL import Image
import imageio
from models import get_instrumented_model
from decomposition import get_or_compute
from config import Config
from skimage import img_as_ubyte
# Speed up computation
torch.autograd.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
# Custom OPs no longer required
#!pip install Ninja
#%cd models/stylegan2/stylegan2-pytorch/op
#!python setup.py install
#!python -c "import torch; import upfirdn2d_op; import fused; print('OK')"
#%cd "/content/ganspace"
```
## Run PCA Analysis
```
#@title Model Specification
model = "Adaily_B" #@param ["Adaily_A", "Adaily_B"]
num_components = 80#@param {type:"number"}
layer = 'style'#@param ["style","input","convs","upsamples","noises"]
model_class = model # this is the name of model
model_name = 'StyleGAN2'
!python visualize.py --model $model_name --class $model_class --use_w --layer=style -c $num_components
```
## Explore RepresentationSpace
After running the previous cell, your components will be stored in an npz file in `/content/ganspace/cache/components/` - below the npz file is unpacked, and a component/direction is chosen at random.
Using the UI, you can explore the latent direction and give it a name, which will be appeneded to the named_directions dictionary and saved as `direction_name.npy` for later use.
The variable `seed` controls the starting image
The `Truncation` slider controls the quality of the image sample, .7 is a good starting point
`Distance` is the main slider, it controls the strength/emphasis of the component
`start layer` and `end layer` control the number of layers used in the calculations, using all of them (0, 18) is a good start
```
#@title Load Model
config = Config(
model='StyleGAN2',
layer=layer,
output_class=model_class,
components=num_components,
use_w=True,
batch_size=5_000, # style layer quite small
)
inst = get_instrumented_model(config.model, config.output_class,
config.layer, torch.device('cuda'), use_w=config.use_w)
path_to_components = get_or_compute(config, inst)
model = inst.model
named_directions = {} #init named_directions dict to save directions
comps = np.load(path_to_components)
lst = comps.files
latent_dirs = []
latent_stdevs = []
load_activations = False
for item in lst:
if load_activations:
if item == 'act_comp':
for i in range(comps[item].shape[0]):
latent_dirs.append(comps[item][i])
if item == 'act_stdev':
for i in range(comps[item].shape[0]):
latent_stdevs.append(comps[item][i])
else:
if item == 'lat_comp':
for i in range(comps[item].shape[0]):
latent_dirs.append(comps[item][i])
if item == 'lat_stdev':
for i in range(comps[item].shape[0]):
latent_stdevs.append(comps[item][i])
#@title Load Random Component
#load one at random
num = np.random.randint(20)
if num in named_directions.values():
print(f'Direction already named: {list(named_directions.keys())[list(named_directions.values()).index(num)]}')
random_dir = latent_dirs[num]
random_dir_stdev = latent_stdevs[num]
print(f'Loaded Component No. {num}')
#@title Run UI (save component with Enter key)
from ipywidgets import fixed
# Taken from https://github.com/alexanderkuk/log-progress
def log_progress(sequence, every=1, size=None, name='Items'):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{name}: {index} / ?'.format(
name=name,
index=index
)
else:
progress.value = index
label.value = u'{name}: {index} / {size}'.format(
name=name,
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = "{name}: {index}".format(
name=name,
index=str(index or '?')
)
def name_direction(sender):
if not text.value:
print('Please name the direction before saving')
return
if num in named_directions.values():
target_key = list(named_directions.keys())[list(named_directions.values()).index(num)]
print(f'Direction already named: {target_key}')
print(f'Overwriting... ')
del(named_directions[target_key])
named_directions[text.value] = [num, start_layer.value, end_layer.value]
save_direction(random_dir, text.value)
for item in named_directions:
print(item, named_directions[item])
def save_direction(direction, filename):
filename += ".npy"
np.save(filename, direction, allow_pickle=True, fix_imports=True)
print(f'Latent direction saved as {filename}')
def display_sample_pytorch(seed, truncation, direction, distance, start, end, disp=True, save=None, noise_spec=None, scale=2,):
# blockPrint()
with io.capture_output() as captured:
w = model.sample_latent(1, seed=seed).cpu().numpy()
model.truncation = truncation
w = [w]*model.get_max_latents() # one per layer
for l in range(start, end):
w[l] = w[l] + direction * distance * scale
#save image and display
out = model.sample_np(w)
final_im = Image.fromarray((out * 255).astype(np.uint8)).resize((500,500),Image.LANCZOS)
if disp:
display(final_im)
if save is not None:
if disp == False:
print(save)
final_im.save(f'out/{seed}_{save:05}.png')
def generate_mov(seed, truncation, direction_vec, layers, n_frames, out_name = 'out', scale = 2, noise_spec = None, loop=True):
"""Generates a mov moving back and forth along the chosen direction vector"""
# Example of reading a generated set of images, and storing as MP4.
%mkdir out
movieName = f'out/{out_name}.mp4'
offset = -10
step = 20 / n_frames
imgs = []
for i in log_progress(range(n_frames), name = "Generating frames"):
print(f'\r{i} / {n_frames}', end='')
w = model.sample_latent(1, seed=seed).cpu().numpy()
model.truncation = truncation
w = [w]*model.get_max_latents() # one per layer
for l in layers:
if l <= model.get_max_latents():
w[l] = w[l] + direction_vec * offset * scale
#save image and display
out = model.sample_np(w)
final_im = Image.fromarray((out * 255).astype(np.uint8))
imgs.append(out)
#increase offset
offset += step
if loop:
imgs += imgs[::-1]
with imageio.get_writer(movieName, mode='I') as writer:
for image in log_progress(list(imgs), name = "Creating animation"):
writer.append_data(img_as_ubyte(image))
seed = np.random.randint(0,100000)
style = {'description_width': 'initial'}
seed = widgets.IntSlider(min=0, max=100000, step=1, value=seed, description='Seed: ', continuous_update=False)
truncation = widgets.FloatSlider(min=0, max=2, step=0.1, value=0.7, description='Truncation: ', continuous_update=False)
distance = widgets.FloatSlider(min=-10, max=10, step=0.1, value=0, description='Distance: ', continuous_update=False, style=style)
# scale = widgets.FloatSlider(min=0, max=10, step=0.05, value=1, description='Scale: ', continuous_update=False)
start_layer = widgets.IntSlider(min=0, max=model.get_max_latents(), step=1, value=0, description='start layer: ', continuous_update=False)
end_layer = widgets.IntSlider(min=0, max=model.get_max_latents(), step=1, value=18, description='end layer: ', continuous_update=False)
# Make sure layer range is valid
def update_range_start(*args):
end_layer.min = start_layer.value
def update_range_end(*args):
start_layer.max = end_layer.value
start_layer.observe(update_range_start, 'value')
end_layer.observe(update_range_end, 'value')
text = widgets.Text(description="Name component here", style=style, width=200)
bot_box = widgets.HBox([seed, truncation, distance, start_layer, end_layer, text])
ui = widgets.VBox([bot_box])
out = widgets.interactive_output(display_sample_pytorch, {'seed': seed, 'truncation': truncation, 'direction': fixed(random_dir), 'distance': distance,'start': start_layer, 'end': end_layer})
display(ui, out)
text.on_submit(name_direction)
#@title Select from named directions
from IPython.display import display, clear_output
vardict = list(named_directions.keys())
select_variable = widgets.Dropdown(
options=vardict,
value=vardict[0],
description='Select variable:',
disabled=False,
button_style=''
)
def set_direction(b):
clear_output()
random_dir = latent_dirs[named_directions[select_variable.value][0]]
start_layer = named_directions[select_variable.value][1]
end_layer = named_directions[select_variable.value][2]
print(start_layer, end_layer)
out = widgets.interactive_output(display_sample_pytorch, {'seed': seed, 'truncation': truncation, 'direction': fixed(random_dir), 'distance': distance, 'scale': scale, 'start': fixed(start_layer), 'end': fixed(end_layer)})
display(select_variable)
display(ui, out)
random_dir = latent_dirs[named_directions[select_variable.value][0]]
start_layer = named_directions[select_variable.value][1]
end_layer = named_directions[select_variable.value][2]
seed = np.random.randint(0,100000)
style = {'description_width': 'initial'}
seed = widgets.IntSlider(min=0, max=100000, step=1, value=seed, description='Seed: ', continuous_update=False)
truncation = widgets.FloatSlider(min=0, max=2, step=0.1, value=0.7, description='Truncation: ', continuous_update=False)
distance = widgets.FloatSlider(min=-10, max=10, step=0.1, value=0, description='Distance: ', continuous_update=False, style=style)
scale = widgets.FloatSlider(min=0, max=10, step=0.05, value=1, description='Scale: ', continuous_update=False)
bot_box = widgets.HBox([seed, truncation, distance])
ui = widgets.VBox([bot_box])
out = widgets.interactive_output(display_sample_pytorch, {'seed': seed, 'truncation': truncation, 'direction': fixed(random_dir), 'distance': distance, 'scale': scale, 'start': fixed(start_layer), 'end': fixed(end_layer)})
display(select_variable)
display(ui, out)
select_variable.observe(set_direction, names='value')
#@title Generate Video from Representation (Optional)
direction_name = "c" #@param {type:"string"}
num_frames = 5 #@param {type:"number"}
truncation = 0.8 #@param {type:"number"}
num_samples = num_frames
assert direction_name in named_directions, \
f'"{direction_name}" not found, please save it first using the cell above.'
loc = named_directions[direction_name][0]
for i in range(num_samples):
s = np.random.randint(0, 10000)
generate_mov(seed = s, truncation = 0.8, direction_vec = latent_dirs[loc], scale = 2, layers=range(named_directions[direction_name][1], named_directions[direction_name][2]), n_frames = 20, out_name = f'{model_class}_{direction_name}_{i}', loop=True)
print('Video saved to ./ganspace/out/')
```
| github_jupyter |
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/PreferredAI/tutorials/blob/master/recommender-systems/08_retrieval.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/PreferredAI/tutorials/blob/master/recommender-systems/08_retrieval.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
# Recommendation Retrieval
A typical recommender system has two phases. In the first phase (preference learning), which can be done offline, we learn the model from observations. In the second phase (retrieval), upon the appearance of a user, we retrieve the top-ranked recommendations for that user. This latter phase needs to be done online. In this tutorial, we investigate ways to speed up the retrieval phase by avoiding an exhaustive search over all items. The focus is on matrix factorization-based recommender systems.
## 1. Setup
```
!pip install --quiet cornac==1.6.1 torch>=0.4.1
import os
import sys
import time
import random
import pickle
from operator import itemgetter
from collections import defaultdict
import tqdm.auto as tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import cornac
from cornac.utils import cache
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.models import PMF, BPR, IBPR
print(f"System version: {sys.version}")
print(f"Cornac version: {cornac.__version__}")
SEED = 42
VERBOSE = True
```
## 2. Recommendation Retrieval is a Similarity Search Problem
The two phases of a matrix factorization-based recommender system are:
1. **Learning phase:** derive a $d$-dimensional latent vector $\mathbf{u}_i$ for each user $i$ and a $d$-dimensional latent vector $\mathbf{v}_j$ for each item $j$. The user-item relationship is modeled using the inner product kernel, i.e., the user preference of user $i$ for an item $j$ is given by:
$$\hat{r}_{i,j} = \mathbf{u}_i^T \mathbf{v}_j$$
2. **Retrieval phase:** given a user vector $\mathbf{u}_i$, construct a recommendation list of $K$ items with the highest preference scores. This step requires $\mathcal{O}(d \times M)$ preference score computations for $M$ number of items.
### Probabilistic Matrix Factorization
For the retrieval phase, the straightforward approach is to exhaustively compute the preference score for every item and maintain the top-$K$. Let us take a closer look through an example based on Probabilistic Matrix Factorization (PMF) [1].
For PMF, the objective is to learn for each user $i$ a vector $\mathbf{u}_i$ and each item $j$ a vector $\mathbf{v}_j$, such that the following error function is minimized over the set of observed ratings $\mathbf{R}$:
\begin{equation}
\mathcal{L} = \sum_{r_{ui}\in \mathbf{R}}(r_{ui} - \mathbf{u}_i^T\mathbf{v}_j)^2 + \lambda \sum_{i = 1}^{N}||\mathbf{u}_i||^2 + \lambda\sum_{j = 1}^{M}||\mathbf{v}_j||^2,
\end{equation}
where $\lambda$ is a regularization term.
We begin by running PMF on the Movielens 1M dataset.
```
# Load the MovieLens 1M dataset
ml_1m = movielens.load_feedback(variant='1M')
# Instantiate an evaluation method
ratio_split = RatioSplit(
data=ml_1m, test_size=0.4, rating_threshold=1.0, exclude_unknowns=False
)
# Instatiate a PMF recommender model
pmf = PMF(k=100, max_iter=100, learning_rate=0.01, seed=SEED, verbose=VERBOSE)
# Instantiate evaluation metrics.
rec_10 = cornac.metrics.Recall(k=10)
pre_10 = cornac.metrics.Precision(k=10)
# Instantiate and then run an experiment.
cornac.Experiment(
eval_method=ratio_split, models=[pmf], metrics=[rec_10, pre_10]
).run()
```
### Exhaustive Search
To retrieve the top-$K$ recommendation for a user $i$, we need to compute the inner product between the user's latent vector $\mathbf{u}_i$ and that of every item $\mathbf{v}_j$. This is directly proportional to the number of items $M$ as well as the number of latent factors (dimensionality of the item vector) $d$, as we will see below.
**Observation #1: Retrieval time scales linearly with the number of items**
```
comp_time = []
# For each user vector, we compute the inner product score to each item vector
pmf_item_vect = pmf.V
pmf_user_vect = pmf.U
for incr_size in tqdm.trange(0, pmf_item_vect.shape[0], 10, disable=not VERBOSE):
start_time = time.time()
pred = np.matmul(pmf_item_vect[:incr_size, :], pmf_user_vect.T)
end_time = time.time()
comp_time.append((end_time - start_time) / pmf_user_vect.shape[0])
# plot the computation time as the number of items increases
plt.plot(range(0, pmf_item_vect.shape[0],10), comp_time, marker='o',
label='Computation_time')
plt.xlabel('Number of Items')
plt.ylabel('Computation Time (seconds)')
plt.title('Inner product computation time (seconds) vs. number of items')
plt.grid()
plt.gcf().set_size_inches(10, 5)
plt.tight_layout()
plt.show()
```
**Observation #2: Retrieval time scales linearly with the number of latent factors**
```
comp_time = []
for d in tqdm.trange(pmf.k, disable=not VERBOSE):
start_time = time.time()
pred = np.matmul(pmf_item_vect[:incr_size, :d], pmf_user_vect[:,:d].T)
end_time = time.time()
# average the computation time
comp_time.append((end_time - start_time) / pmf_user_vect.shape[0])
# plot the computation time as the number of dimension increases
plt.plot(range(pmf.k), comp_time, marker='o', label='Computation_time')
plt.title('Inner product computation time (seconds) vs. number of features')
plt.xlabel('Number of features')
plt.ylabel('Computation Time (seconds)')
plt.grid()
plt.gcf().set_size_inches(10, 5)
plt.tight_layout()
plt.show()
```
## 3. Scalable Recommendation Retrieval with Locality Sensitive Hashing
From the previous example, we can see that the cost of **exhaustive search** scales linearly to the number of items, i.e., $M$ and number of features, i.e., $d$. For modern real-world systems, the number of items could go up to the scale of millons. Therefore, it would be fruitful to make the search more efficient.
### Locality Sensitive Hashing
One way to do so is to use Locality Sensitive Hashing (LSH), which is a probabilistic space-partitioning indexing technique. One important element of LSH is the hashing function $h(.)$, which maps a data point into a hash value. This hashing function is usually designed with the *locality-sensitive* property, stated as: *two similar data points are more likely to get the same hash values as compared to two distant data points.*
One of the most popular search protocols using LSH is hash table look-up. In a nutshell, we first derive the hash codes for all items and construct a hash table that links a hash code to items of that code. When a user appears, we hash the user's latent vector into a hash code. We then look up items of that hash code and compute the top-$K$ based on inner product *only among those items*, thus drastically reducing the number of items we need to consider. To increase the search space, we can also use multiple hash tables, in which case we look up the multiple hash tables and compute the top-$K$ across the tables.
```
random.seed(SEED)
# Implementation of Locality Sensitive Hashing for Maximum Cosine Similarity Search
class LSHIndex:
def __init__(self, hash_family, k, L):
self.hash_family = hash_family
self.k = k
self.L = 0
self.hash_tables = []
self.resize(L)
def resize(self, L):
""" update the number of hash tables to be used """
if L < self.L:
self.hash_tables = self.hash_tables[:L]
else:
# initialise a new hash table for each hash function
hash_funcs = [[self.hash_family.create_hash_func() for h in range(self.k)]
for l in range(self.L, L)]
self.hash_tables.extend([(g, defaultdict(lambda:[])) for g in hash_funcs])
def hash(self, g, p):
return self.hash_family.combine([h.hash(p) for h in g])
def index(self, points):
""" index the supplied points """
self.points = points
for g, table in self.hash_tables:
for ix, p in enumerate(self.points):
table[self.hash(g,p)].append(ix)
# reset stats
self.tot_touched = 0
self.num_queries = 0
def query(self, q, metric, max_results):
""" find the max_results closest indexed points to q according to the supplied metric """
candidates = set()
for g, table in self.hash_tables:
matches = table.get(self.hash(g,q), [])
candidates.update(matches)
# update stats
self.tot_touched += len(candidates)
self.num_queries += 1
# rerank candidates
candidates = [(ix, metric(q, self.points[ix])) for ix in candidates]
candidates.sort(key = itemgetter(1))
return candidates[:max_results]
def get_avg_touched(self):
""" mean number of candidates inspected per query """
return self.tot_touched/self.num_queries
#################################################################################################
#--------------------------------- Cosine LSH Hash Family --------------------------------------#
#################################################################################################
class CosineHashFamily:
def __init__(self,d):
self.d = d
def create_hash_func(self):
# each CosineHash is initialised with a random projection vector
return CosineHash(self.rand_vec())
def rand_vec(self):
return [random.gauss(0,1) for i in range(self.d)]
def combine(self, hashes):
"""combine by treating as a bitvector"""
return sum(2**i if h > 0 else 0 for i,h in enumerate(hashes))
class CosineHash:
def __init__(self, r):
self.r = r
def hash(self, vec):
return self.sgn(dot(vec, self.r))
def sgn(self,x):
return int(x > 0)
#--- inner product ---
def dot(u,v):
return np.dot(u, v)
#-- cosine distance ---
def cosine_distance(u,v):
return 1 - dot(u,v)/(dot(u,u)*dot(v,v))**0.5
# helper function to measure precision, recall, and touched values
# of using LSH for top-k recommendation retrieval
# ------------------------------------------------------------------------
def evaluate_LSHTopK(test_data, item_vect, user_vect, lsh_index, sim_metric, topK):
lsh_prec = 0.0
lsh_recall = 0.0
#build index
lsh_index.index(item_vect.tolist())
for id in range(user_vect.shape[0]):
test_item_ids = list(test_data.csr_matrix.getrow(id).nonzero()[1])
if len(test_item_ids) > 0:
# top-k by LSH
lsh_rec_list = list(map(itemgetter(0), lsh_index.query(user_vect[id, :], sim_metric, topK)))
lsh_overlaps = list(set(lsh_rec_list).intersection(test_item_ids))
lsh_prec += len(lsh_overlaps) * 1.0/topK
lsh_recall += len(lsh_overlaps) * 1.0/len(test_item_ids)
touched = lsh_index.get_avg_touched() * 1.0/item_vect.shape[0]
return lsh_prec/user_vect.shape[0], lsh_recall/user_vect.shape[0], touched
```
### Issue with Using Locality Sensitive Hashing for Recommendation Retrieval
Recommendation retrieval, which relies on inner product computation is not directly compatible with Locality Sensitive Hashing, which is designed for nearest neighbor search or maximum cosine similarity search. This might lead to the significant degeneration of recommendation accuracy after indexing [2].
In the following, we introduce two approaches to this issue:
1. Vector Augmentation
2. Indexable Representation
### Solution I: Vector Augmentation
In this solution, we first apply the vector transformation for both user and item vectors, as introduced in the paper [3].
For each item vector $\mathbf{v}_j$:
\begin{equation}
\mathbf{\tilde{v}}_j = [\mathbf{v}_j; \sqrt{\phi^2 - ||\mathbf{v}_j||^2}] \hspace{1cm}(\mathrm{where}\ \phi = \max\{||\mathbf{v}_j||\})
\end{equation}
For each user vector $\mathbf{u}_i$:
\begin{equation}
\mathbf{\tilde{u}}_i = [\mathbf{u}_i; 0]
\end{equation}
We have the following observation:
\begin{equation}
\frac{\mathbf{\tilde{u}}_i^T\mathbf{\tilde{v}}_j}{||\mathbf{\tilde{u}}_i||\times ||\mathbf{\tilde{v}}_j||} = \frac{\mathbf{\tilde{u}}_i^T\mathbf{\tilde{v}}_j}{\phi ||\mathbf{\tilde{u}}_i||}
\end{equation}
i.e.,
\begin{equation}
\arg\max_{1\leq j\leq M}{\mathbf{{u}}_i^T\mathbf{{v}}_j} = \arg\max_{1\leq j\leq M}\frac{\mathbf{\tilde{u}}_i^T\mathbf{\tilde{v}}_j}{||\mathbf{\tilde{u}}_i||\times ||\mathbf{\tilde{v}}_j||}
\end{equation}
With this vector augmentation, we effectively convert a Maximum Inner Product Search (MIPS) problem to a Maximum Cosine Similarity Search (MCCS), which can be solved with LSH. Hypothetically, this will make the user and item latent vectors more compatible with LSH.
```
#apply vector augmentation
M = np.linalg.norm(pmf_item_vect, axis=1) # compute item vector norms
max_norm = max(M) # max item norm
xbox_item_vect = np.concatenate(
(pmf_item_vect, np.sqrt(max_norm**2 - pow(M, 2)).reshape(pmf_item_vect.shape[0], -1)),
axis=1
)
xbox_user_vect = np.concatenate(
(pmf_user_vect, np.zeros((pmf_user_vect.shape[0], 1))),
axis=1
)
```
#### Effectiveness of Vector Augmentation
In this experiment, we build LSH index on the output of PMF algorithm and compare the latent vectors before and after vector augmentation for LSH recommendation retrieval.
Since the augmented user and item vectors are in $(d+1)-$dimensional space, for comparison purpose we append 0s to the original non-augmented user and item vectors (to make them the same length), i.e.,
\begin{equation}
\mathbf{\tilde{v}}_j = [\mathbf{v}_j; 0]
\end{equation}
\begin{equation}
\mathbf{\tilde{u}}_i = [\mathbf{u}_i; 0]
\end{equation}
With this non-augmented "transformation", we have no change to the original inner product:
\begin{equation}
\mathbf{\tilde{u}}_i^T\mathbf{\tilde{v}}_j = \mathbf{{u}}_i^T\mathbf{{v}}_j
\end{equation}
```
padded_user_vect = np.concatenate((pmf_user_vect, np.zeros((pmf_user_vect.shape[0], 1))), axis=1)
padded_item_vect = np.concatenate((pmf_item_vect, np.zeros((pmf_item_vect.shape[0], 1))), axis=1)
```
Specifically, we report three evaluation metrics as follows:
1. lsh_prec@10 = $\text{precision@10 of LSH Indexing}$
2. lsh_rec@10 = $\text{recall@10 of LSH Indexing}$
3. touched = $\frac{\text{Average number of investigated items by LSH}}{\text{Total number of items}}$
```
topK = 10 # @param - top-K value
b_vals = [4, 8] # @param - number of hash function
L_vals = [10, 20] # @param - number of hashtables
test_data = ratio_split.test_set # testing data
print('#table \t#bit \t ?Augmented \t lsh_prec@{0} \t lsh_recall@{0} \t touched'.format(topK))
for nt in L_vals:
for b in b_vals:
#init lsh index:
#------ hash-family: the LSH scheme/family
#------ k : number of hash functions
#------ L : number of hash tables
lsh_index = LSHIndex(hash_family = CosineHashFamily(padded_item_vect.shape[1]), k = b, L=nt)
#performance without employing vector augmentation
print('---------------------------------------------------------------------------------')
prec_1, recall_1, touched_1 = evaluate_LSHTopK(
test_data, padded_item_vect, -padded_user_vect[:1000], lsh_index, dot, topK
)
print("{}\t{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}".format(nt, b, 'No', prec_1, recall_1, touched_1))
#performance with vector augmentation
prec_2, recall_2, touched_2 = evaluate_LSHTopK(
test_data, xbox_item_vect, -xbox_user_vect[:1000], lsh_index, dot, topK
)
print("{}\t{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}".format(nt, b, 'Yes', prec_2, recall_2, touched_2))
```
We observe from the results above that with vector augmentation, we can achieve higher recommendation accuracy after using Locality Sensitive Hashing.
What is the effect of increasing the number of bits, or the number of hash tables? Why?
### Solution II: Indexable Representation Learning
Another solution is "Indexable representation", which refers to recommendation algorithms whose user/item vectors are immediately sublinearly searchable. In this tutorial, we are going to experiment with one such model, namely Indexable Bayesian Personalized Ranking or IBPR [5] for short. IBPR is an extension of the popular framework for implicit feedback: Bayesian Personalized Ranking (BPR) [4].
#### Bayesian Personalized Ranking - BPR
Let us see the performance of BPR with exhaustive search.
```
rec_bpr = BPR(k=20, max_iter=100, learning_rate=0.001, seed=SEED, verbose=VERBOSE)
cornac.Experiment(
eval_method=ratio_split, models=[rec_bpr], metrics=[rec_10, pre_10],
).run()
```
We now measure the performance of BPR after LSH indexing.
```
bpr_user_vect = rec_bpr.u_factors
bpr_item_vect = rec_bpr.i_factors
# apply Xbox transformation
M = np.linalg.norm(bpr_item_vect, axis=1) # compute item vector norms
max_norm = max(M) # max item norm
xbox_bpr_item_vect = np.concatenate(
(bpr_item_vect, np.sqrt(max_norm**2 - pow(M, 2)).reshape(bpr_item_vect.shape[0], -1)),
axis=1
)
xbox_bpr_user_vect = np.concatenate(
(bpr_user_vect, np.zeros((bpr_user_vect.shape[0], 1))),
axis = 1
)
topK = 10 # @param
b_vals = [4, 8] # @param
L_vals = [10, 20] # @param
test_data = ratio_split.test_set # testing data
print('#table\t #bit \t model \t lsh_prec@{0} \t lsh_recall@{0} \t touched'.format(topK))
for nt in L_vals:
print('------------------------------------------------------------------------------')
for b in b_vals:
# lsh index:
#------ hash-family: the LSH scheme/family
#------ k : number of hash functions
#------ L : number of hash tables
lsh_index = LSHIndex(hash_family = CosineHashFamily(xbox_bpr_item_vect.shape[1]), k = b, L = nt)
lsh_bpr_prec, lsh_bpr_recall, touched_bpr = evaluate_LSHTopK(
test_data, xbox_bpr_item_vect, -xbox_bpr_user_vect[:1000, :], lsh_index, dot, topK
)
print("{}\t{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}".format(
nt, b, 'BPR', lsh_bpr_prec, lsh_bpr_recall, touched_bpr)
)
```
The results above show that in relative terms, the performance degenerates for both precision and recall.
#### Indexable Bayesian Personalized Ranking - IBPR
We will now test the effectiveness of indexable model IBPR. In this experiment, we train IBPR with the same data we use for BPR and measure the performance after LSH indexing.
```
rec_ibpr = IBPR(k=20, max_iter=15, learning_rate=0.001, verbose=VERBOSE)
cornac.Experiment(
eval_method=ratio_split, models =[rec_ibpr], metrics=[rec_10, pre_10]
).run()
ibpr_user_vect = rec_ibpr.U
ibpr_item_vect = rec_ibpr.V
topK = 10 # @param
b_vals = [4, 8] # @param
L_vals = [10, 20] # @param
test_data = ratio_split.test_set # testing data
print('#table\t #bit \t model \t lsh_prec@{0} \t lsh_recall@{0} \t touched'.format(topK))
for nt in L_vals:
print('------------------------------------------------------------------------------')
for b in b_vals:
# lsh index:
#------ hash-family: the LSH scheme/family
#------ k : number of hash functions
#------ L : number of hash tables
lsh_index = LSHIndex(hash_family = CosineHashFamily(ibpr_item_vect.shape[1]), k = b, L = nt)
lsh_ibpr_prec, lsh_ibpr_recall, touched_ibpr = evaluate_LSHTopK(
test_data, ibpr_item_vect, -ibpr_user_vect[:1000,:], lsh_index, dot, topK
)
print("{}\t{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}".format(
nt, b, 'IBPR', lsh_ibpr_prec, lsh_ibpr_recall, touched_ibpr)
)
```
We observe that the performance after LSH of IBPR (without vector augmentation) could be better than that of BPR (with vector augmentation), which may be attributed to having representations more compatible with indexing. As a result, we could speed up top-$K$ recommendation retrieval, while better preserving the accuracy.
## References
1. Mnih, Andriy, and Russ R. Salakhutdinov. "Probabilistic matrix factorization." Advances in neural information processing systems. 2008.
2. Stochastically Robust Personalized Ranking for LSH Recommendation Retrieval, Dung D. Le and Hady W. Lauw
AAAI Conference on Artificial Intelligence (AAAI-20)
3. Bachrach, Yoram, et al. "Speeding up the xbox recommender system using a euclidean transformation for inner-product spaces." Proceedings of the 8th ACM Conference on Recommender systems. 2014.
4. Rendle, Steffen, et al. "BPR: Bayesian personalized ranking from implicit feedback." arXiv preprint arXiv:1205.2618 (2012).
5. Le, Dung D., and Hady W. Lauw. "Indexable Bayesian personalized ranking for efficient top-k recommendation." Proceedings of the 2017 ACM on Conference on Information and Knowledge Management. 2017.
6. Cornac - A Comparative Framework for Multimodal Recommender Systems (https://cornac.preferred.ai/)
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import urban_dictionary_scraper
import logging
import pickle
from scipy import stats
import pandas as pd
import stanza
from tqdm.notebook import tqdm
from collections import OrderedDict
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from multiprocessing.pool import ThreadPool
import io
import itertools
import numpy as np
import re
import random
logging.basicConfig(level=logging.INFO)
session = urban_dictionary.get_session(throttle=0.1, expiry = (7*24*3600))
stanza.download('en')
"""
all_urls = urban_dictionary_scraper.fetch_all_word_urls(session)
with open("all_urls.pickle", "wb") as f:
pickle.dump(all_urls, f, pickle.HIGHEST_PROTOCOL)
"""
with open("all_urls.pickle", "rb") as f:
to_fetch = pickle.load(f)
with open("all_words.pickle", "rb") as f:
already_done = pickle.load(f)
for key in already_done.keys():
del to_fetch[key]
done = 100 * len(already_done) / (len(already_done) + len(to_fetch))
print(f"Done {done:.2f} percent")
t = ThreadPool(5)
#with ThreadPoolExecutor(max_workers=5) as executor:
try:
fetch_all_definitions(session, to_fetch, already_done, save_interval=10000, executor=t)
finally:
t.terminate()
t.join()
with open("data/all_words.pickle", "rb") as f:
words = pickle.load(f)
def is_clean(word, min_upvotes=20, max_word_length=40, max_symbols=2, allow_upper=False, min_word_length=4):
if word.upvotes < min_upvotes:
return False
elif len(word.word) > max_word_length:
return False
elif len(word.word) < min_word_length:
return False
elif len(re.findall(r"[^\w .]", word.word)) > max_symbols:
return False
elif not allow_upper and word.word.isupper():
return False
else:
return True
clean_list = [
(k, urban_dictionary_scraper.UrbanDictionaryWord(
title=e.title,
url=e.url,
definitions=[x for x in e.definitions if is_clean(x)],
))
for k,e in words.items() if any(is_clean(x) for x in e.definitions)
]
random.shuffle(clean_list)
cleaned_words = OrderedDict(clean_list)
print(f"Words reduced by {len(cleaned_words) / len(words)}")
with open("data/cleaned_words_all_def_min_upvotes_20_max_len_40_min_len_4_no_upper_randomized.pickle", "wb") as f:
pickle.dump(cleaned_words, f, pickle.HIGHEST_PROTOCOL)
nlp = stanza.Pipeline(processors="tokenize,pos")
def proper_noun_guess(word):
query = word.title.upper().strip().strip("\"").strip()
for definition in word.definitions:
try:
doc = nlp(definition.examples[0])
except IndexError:
print(f"{query}: INDEX ERROR")
return False
for sentence in doc.sentences:
last_prop = []
for word in sentence.words:
if word.upos == "PROPN":
last_prop.append(word.text.upper())
if query == " ".join(last_prop):
return True
else:
last_prop = []
pbar = tqdm(total=len(cleaned_words.values()))
for i, item in enumerate(cleaned_words.values()):
t = proper_noun_guess(item)
if t:
print(f"{item.title}: {t}")
pbar.update()
if i > 1000:
break
proper_noun_guess(next(iter(words.values())))
defns = pd.DataFrame(
[
[e.word, e.meaning, e.examples[0], e.creation_epoch, e.upvotes, e.downvotes]
for e in itertools.chain.from_iterable(e.definitions for e in words.values())
],
columns=["word", "meaning", "example", "creation_epoch", "upvotes", "downvotes"]
)
smoothing_prior = 20
defns["smoothed_upvotes"] = defns["upvotes"] / (defns["upvotes"] + defns["downvotes"] + smoothing_prior)
defns["smoothed_upvotes"].quantile(np.linspace(0.1, 1, 10))
cleaned_defs = defns[:]
# cleaned_defs = cleaned_defs[cleaned_defs["smoothed_upvotes"] >= 0.2]
cleaned_defs = cleaned_defs[cleaned_defs["upvotes"] >= 20]
cleaned_defs = cleaned_defs[cleaned_defs.word.str.len() <= 40]
cleaned_defs = cleaned_defs[cleaned_defs.word.str.len() >= 4]
cleaned_defs = cleaned_defs[~cleaned_defs.word.str.isupper()]
cleaned_defs = cleaned_defs[cleaned_defs.word.str.count("[^\w .]") <= 2]
print(f"Reduction from {len(defns)} to {len(cleaned_defs)} ({len(cleaned_defs) / len(defns)})")
cleaned_defs[cleaned_defs.word.str.upper().str.contains(",")].sample(20)
defns.word.str.count("[^\w ].").describe()
defns[defns.word.str.len() > 40].sample(n=20)
defns[defns.word.str.count("[^\w .]") > 2].sample(n=20)
(defns["meaning"].str.len() + defns["example"].str.len()).quantile(np.linspace(0.01, 1, 100))
lng_defs = defns[defns["meaning"].str.len() > 985]
(lng_defs["upvotes"] + lng_defs["downvotes"]).describe()
lng_defs = defns[defns["meaning"].str.len() < 985]
(lng_defs["upvotes"] + lng_defs["downvotes"]).describe()
```
| github_jupyter |
# Representing Qubit States
You now know something about bits, and about how our familiar digital computers work. All the complex variables, objects and data structures used in modern software are basically all just big piles of bits. Those of us who work on quantum computing call these *classical variables.* The computers that use them, like the one you are using to read this article, we call *classical computers*.
In quantum computers, our basic variable is the _qubit:_ a quantum variant of the bit. These have exactly the same restrictions as normal bits do: they can store only a single binary piece of information, and can only ever give us an output of `0` or `1`. However, they can also be manipulated in ways that can only be described by quantum mechanics. This gives us new gates to play with, allowing us to find new ways to design algorithms.
To fully understand these new gates, we first need to understand how to write down qubit states. For this we will use the mathematics of vectors, matrices, and complex numbers. Though we will introduce these concepts as we go, it would be best if you are comfortable with them already. If you need a more in-depth explanation or a refresher, you can find the guide [here](../ch-prerequisites/linear_algebra.html).
## Contents
1. [Classical vs Quantum Bits](#cvsq)
1.1 [Statevectors](#statevectors)
1.2 [Qubit Notation](#notation)
1.3 [Exploring Qubits with Qiskit](#exploring-qubits)
2. [The Rules of Measurement](#rules-measurement)
2.1 [A Very Important Rule](#important-rule)
2.2 [The Implications of this Rule](#implications)
3. [The Bloch Sphere](#bloch-sphere)
3.1 [Describing the Restricted Qubit State](#bloch-sphere-1)
3.2 [Visually Representing a Qubit State](#bloch-sphere-2)
## 1. Classical vs Quantum Bits <a id="cvsq"></a>
### 1.1 Statevectors<a id="statevectors"></a>
In quantum physics we use _statevectors_ to describe the state of our system. Say we wanted to describe the position of a car along a track, this is a classical system so we could use a number $x$:

$$ x=4 $$
Alternatively, we could instead use a collection of numbers in a vector called a _statevector._ Each element in the statevector contains the probability of finding the car in a certain place:

$$
|x\rangle = \begin{bmatrix} 0\\ \vdots \\ 0 \\ 1 \\ 0 \\ \vdots \\ 0 \end{bmatrix}
\begin{matrix} \\ \\ \\ \leftarrow \\ \\ \\ \\ \end{matrix}
\begin{matrix} \\ \\ \text{Probability of} \\ \text{car being at} \\ \text{position 4} \\ \\ \\ \end{matrix}
$$
This isn’t limited to position, we could also keep a statevector of all the possible speeds the car could have, and all the possible colours the car could be. With classical systems (like the car example above), this is a silly thing to do as it requires keeping huge vectors when we only really need one number. But as we will see in this chapter, statevectors happen to be a very good way of keeping track of quantum systems, including quantum computers.
### 1.2 Qubit Notation <a id="notation"></a>
Classical bits always have a completely well-defined state: they are either `0` or `1` at every point during a computation. There is no more detail we can add to the state of a bit than this. So to write down the state of a of classical bit (`c`), we can just use these two binary values. For example:
c = 0
This restriction is lifted for quantum bits. Whether we get a `0` or a `1` from a qubit only needs to be well-defined when a measurement is made to extract an output. At that point, it must commit to one of these two options. At all other times, its state will be something more complex than can be captured by a simple binary value.
To see how to describe these, we can first focus on the two simplest cases. As we saw in the last section, it is possible to prepare a qubit in a state for which it definitely gives the outcome `0` when measured.
We need a name for this state. Let's be unimaginative and call it $0$ . Similarly, there exists a qubit state that is certain to output a `1`. We'll call this $1$. These two states are completely mutually exclusive. Either the qubit definitely outputs a ```0```, or it definitely outputs a ```1```. There is no overlap. One way to represent this with mathematics is to use two orthogonal vectors.
$$
|0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix} \, \, \, \, |1\rangle =\begin{bmatrix} 0 \\ 1 \end{bmatrix}.
$$
This is a lot of notation to take in all at once. First, let's unpack the weird $|$ and $\rangle$. Their job is essentially just to remind us that we are talking about the vectors that represent qubit states labelled $0$ and $1$. This helps us distinguish them from things like the bit values ```0``` and ```1``` or the numbers 0 and 1. It is part of the bra-ket notation, introduced by Dirac.
If you are not familiar with vectors, you can essentially just think of them as lists of numbers which we manipulate using certain rules. If you are familiar with vectors from your high school physics classes, you'll know that these rules make vectors well-suited for describing quantities with a magnitude and a direction. For example, the velocity of an object is described perfectly with a vector. However, the way we use vectors for quantum states is slightly different to this, so don't hold on too hard to your previous intuition. It's time to do something new!
With vectors we can describe more complex states than just $|0\rangle$ and $|1\rangle$. For example, consider the vector
$$
|q_0\rangle = \begin{bmatrix} \tfrac{1}{\sqrt{2}} \\ \tfrac{i}{\sqrt{2}} \end{bmatrix} .
$$
To understand what this state means, we'll need to use the mathematical rules for manipulating vectors. Specifically, we'll need to understand how to add vectors together and how to multiply them by scalars.
<p>
<details>
<summary>Reminder: Matrix Addition and Multiplication by Scalars (Click here to expand)</summary>
<p>To add two vectors, we add their elements together:
$$|a\rangle = \begin{bmatrix}a_0 \\ a_1 \\ \vdots \\ a_n \end{bmatrix}, \quad
|b\rangle = \begin{bmatrix}b_0 \\ b_1 \\ \vdots \\ b_n \end{bmatrix}$$
$$|a\rangle + |b\rangle = \begin{bmatrix}a_0 + b_0 \\ a_1 + b_1 \\ \vdots \\ a_n + b_n \end{bmatrix} $$
</p>
<p>And to multiply a vector by a scalar, we multiply each element by the scalar:
$$x|a\rangle = \begin{bmatrix}x \times a_0 \\ x \times a_1 \\ \vdots \\ x \times a_n \end{bmatrix}$$
</p>
<p>These two rules are used to rewrite the vector $|q_0\rangle$ (as shown above):
$$
\begin{aligned}
|q_0\rangle & = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle \\
& = \tfrac{1}{\sqrt{2}}\begin{bmatrix}1\\0\end{bmatrix} + \tfrac{i}{\sqrt{2}}\begin{bmatrix}0\\1\end{bmatrix}\\
& = \begin{bmatrix}\tfrac{1}{\sqrt{2}}\\0\end{bmatrix} + \begin{bmatrix}0\\\tfrac{i}{\sqrt{2}}\end{bmatrix}\\
& = \begin{bmatrix}\tfrac{1}{\sqrt{2}} \\ \tfrac{i}{\sqrt{2}} \end{bmatrix}\\
\end{aligned}
$$
</details>
</p>
<p>
<details>
<summary>Reminder: Orthonormal Bases (Click here to expand)</summary>
<p>
It was stated before that the two vectors $|0\rangle$ and $|1\rangle$ are orthonormal, this means they are both <i>orthogonal</i> and <i>normalised</i>. Orthogonal means the vectors are at right angles:
</p><p><img src="images/basis.svg"></p>
<p>And normalised means their magnitudes (length of the arrow) is equal to 1. The two vectors $|0\rangle$ and $|1\rangle$ are <i>linearly independent</i>, which means we cannot describe $|0\rangle$ in terms of $|1\rangle$, and vice versa. However, using both the vectors $|0\rangle$ and $|1\rangle$, and our rules of addition and multiplication by scalars, we can describe all possible vectors in 2D space:
</p><p><img src="images/basis2.svg"></p>
<p>Because the vectors $|0\rangle$ and $|1\rangle$ are linearly independent, and can be used to describe any vector in 2D space using vector addition and scalar multiplication, we say the vectors $|0\rangle$ and $|1\rangle$ form a <i>basis</i>. In this case, since they are both orthogonal and normalised, we call it an <i>orthonormal basis</i>.
</details>
</p>
Since the states $|0\rangle$ and $|1\rangle$ form an orthonormal basis, we can represent any 2D vector with a combination of these two states. This allows us to write the state of our qubit in the alternative form:
$$ |q_0\rangle = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle $$
This vector, $|q_0\rangle$ is called the qubit's _statevector,_ it tells us everything we could possibly know about this qubit. For now, we are only able to draw a few simple conclusions about this particular example of a statevector: it is not entirely $|0\rangle$ and not entirely $|1\rangle$. Instead, it is described by a linear combination of the two. In quantum mechanics, we typically describe linear combinations such as this using the word 'superposition'.
Though our example state $|q_0\rangle$ can be expressed as a superposition of $|0\rangle$ and $|1\rangle$, it is no less a definite and well-defined qubit state than they are. To see this, we can begin to explore how a qubit can be manipulated.
### 1.3 Exploring Qubits with Qiskit <a id="exploring-qubits"></a>
First, we need to import all the tools we will need:
```
from qiskit import QuantumCircuit, assemble, Aer
from qiskit.visualization import plot_histogram, plot_bloch_vector
from math import sqrt, pi
```
In Qiskit, we use the `QuantumCircuit` object to store our circuits, this is essentially a list of the quantum operations on our circuit and the qubits they are applied to.
```
qc = QuantumCircuit(1) # Create a quantum circuit with one qubit
```
In our quantum circuits, our qubits always start out in the state $|0\rangle$. We can use the `initialize()` method to transform this into any state. We give `initialize()` the vector we want in the form of a list, and tell it which qubit(s) we want to initialise in this state:
```
qc = QuantumCircuit(1) # Create a quantum circuit with one qubit
initial_state = [0,1] # Define initial_state as |1>
qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit
qc.draw() # Let's view our circuit
```
We can then use one of Qiskit’s simulators to view the resulting state of our qubit. To begin with we will use the statevector simulator, but we will explain the different simulators and their uses later.
```
svsim = Aer.get_backend('statevector_simulator') # Tell Qiskit how to simulate our circuit
```
To get the results from our circuit, we use `execute` to run our circuit, giving the circuit and the backend as arguments. We then use `.result()` to get the result of this:
```
qc = QuantumCircuit(1) # Create a quantum circuit with one qubit
initial_state = [0,1] # Define initial_state as |1>
qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit
qobj = assemble(qc) # Create a Qobj from the circuit for the simulator to run
result = svsim.run(qobj).result() # Do the simulation and return the result
```
from `result`, we can then get the final statevector using `.get_statevector()`:
```
out_state = result.get_statevector()
print(out_state) # Display the output state vector
```
**Note:** Python uses `j` to represent $i$ in complex numbers. We see a vector with two complex elements: `0.+0.j` = 0, and `1.+0.j` = 1.
Let’s now measure our qubit as we would in a real quantum computer and see the result:
```
qc.measure_all()
qc.draw()
```
This time, instead of the statevector we will get the counts for the `0` and `1` results using `.get_counts()`:
```
qobj = assemble(qc)
result = svsim.run(qobj).result()
counts = result.get_counts()
plot_histogram(counts)
```
We can see that we (unsurprisingly) have a 100% chance of measuring $|1\rangle$. This time, let’s instead put our qubit into a superposition and see what happens. We will use the state $|q_0\rangle$ from earlier in this section:
$$ |q_0\rangle = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle $$
We need to add these amplitudes to a python list. To add a complex amplitude, Python uses `j` for the imaginary unit (we normally call it "$i$" mathematically):
```
initial_state = [1/sqrt(2), 1j/sqrt(2)] # Define state |q_0>
```
And we then repeat the steps for initialising the qubit as before:
```
qc = QuantumCircuit(1) # Must redefine qc
qc.initialize(initial_state, 0) # Initialise the 0th qubit in the state `initial_state`
qobj = assemble(qc)
state = svsim.run(qobj).result().get_statevector() # Execute the circuit
print(state) # Print the result
qobj = assemble(qc)
results = svsim.run(qobj).result().get_counts()
plot_histogram(results)
```
We can see we have equal probability of measuring either $|0\rangle$ or $|1\rangle$. To explain this, we need to talk about measurement.
## 2. The Rules of Measurement <a id="rules-measurement"></a>
### 2.1 A Very Important Rule <a id="important-rule"></a>
There is a simple rule for measurement. To find the probability of measuring a state $|\psi \rangle$ in the state $|x\rangle$ we do:
$$p(|x\rangle) = | \langle x| \psi \rangle|^2$$
The symbols $\langle$ and $|$ tell us $\langle x |$ is a row vector. In quantum mechanics we call the column vectors _kets_ and the row vectors _bras._ Together they make up _bra-ket_ notation. Any ket $|a\rangle$ has a corresponding bra $\langle a|$, and we convert between them using the conjugate transpose.
<details>
<summary>Reminder: The Inner Product (Click here to expand)</summary>
<p>There are different ways to multiply vectors, here we use the <i>inner product</i>. The inner product is a generalisation of the <i>dot product</i> which you may already be familiar with. In this guide, we use the inner product between a bra (row vector) and a ket (column vector), and it follows this rule:
$$\langle a| = \begin{bmatrix}a_0^*, & a_1^*, & \dots & a_n^* \end{bmatrix}, \quad
|b\rangle = \begin{bmatrix}b_0 \\ b_1 \\ \vdots \\ b_n \end{bmatrix}$$
$$\langle a|b\rangle = a_0^* b_0 + a_1^* b_1 \dots a_n^* b_n$$
</p>
<p>We can see that the inner product of two vectors always gives us a scalar. A useful thing to remember is that the inner product of two orthogonal vectors is 0, for example if we have the orthogonal vectors $|0\rangle$ and $|1\rangle$:
$$\langle1|0\rangle = \begin{bmatrix} 0 , & 1\end{bmatrix}\begin{bmatrix}1 \\ 0\end{bmatrix} = 0$$
</p>
<p>Additionally, remember that the vectors $|0\rangle$ and $|1\rangle$ are also normalised (magnitudes are equal to 1):
$$
\begin{aligned}
\langle0|0\rangle & = \begin{bmatrix} 1 , & 0\end{bmatrix}\begin{bmatrix}1 \\ 0\end{bmatrix} = 1 \\
\langle1|1\rangle & = \begin{bmatrix} 0 , & 1\end{bmatrix}\begin{bmatrix}0 \\ 1\end{bmatrix} = 1
\end{aligned}
$$
</p>
</details>
In the equation above, $|x\rangle$ can be any qubit state. To find the probability of measuring $|x\rangle$, we take the inner product of $|x\rangle$ and the state we are measuring (in this case $|\psi\rangle$), then square the magnitude. This may seem a little convoluted, but it will soon become second nature.
If we look at the state $|q_0\rangle$ from before, we can see the probability of measuring $|0\rangle$ is indeed $0.5$:
$$
\begin{aligned}
|q_0\rangle & = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle \\
\langle 0| q_0 \rangle & = \tfrac{1}{\sqrt{2}}\langle 0|0\rangle + \tfrac{i}{\sqrt{2}}\langle 0|1\rangle \\
& = \tfrac{1}{\sqrt{2}}\cdot 1 + \tfrac{i}{\sqrt{2}} \cdot 0\\
& = \tfrac{1}{\sqrt{2}}\\
|\langle 0| q_0 \rangle|^2 & = \tfrac{1}{2}
\end{aligned}
$$
You should verify the probability of measuring $|1\rangle$ as an exercise.
This rule governs how we get information out of quantum states. It is therefore very important for everything we do in quantum computation. It also immediately implies several important facts.
### 2.2 The Implications of this Rule <a id="implications"></a>
### #1 Normalisation
The rule shows us that amplitudes are related to probabilities. If we want the probabilities to add up to 1 (which they should!), we need to ensure that the statevector is properly normalized. Specifically, we need the magnitude of the state vector to be 1.
$$ \langle\psi|\psi\rangle = 1 \\ $$
Thus if:
$$ |\psi\rangle = \alpha|0\rangle + \beta|1\rangle $$
Then:
$$ \sqrt{|\alpha|^2 + |\beta|^2} = 1 $$
This explains the factors of $\sqrt{2}$ you have seen throughout this chapter. In fact, if we try to give `initialize()` a vector that isn’t normalised, it will give us an error:
```
vector = [1,1]
qc.initialize(vector, 0)
```
#### Quick Exercise
1. Create a state vector that will give a $1/3$ probability of measuring $|0\rangle$.
2. Create a different state vector that will give the same measurement probabilities.
3. Verify that the probability of measuring $|1\rangle$ for these two states is $2/3$.
You can check your answer in the widget below (accepts answers ±1% accuracy, you can use numpy terms such as '`pi`' and '`sqrt()`' in the vector):
```
# Run the code in this cell to interact with the widget
from qiskit_textbook.widgets import state_vector_exercise
state_vector_exercise(target=1/3)
```
### #2 Alternative measurement
The measurement rule gives us the probability $p(|x\rangle)$ that a state $|\psi\rangle$ is measured as $|x\rangle$. Nowhere does it tell us that $|x\rangle$ can only be either $|0\rangle$ or $|1\rangle$.
The measurements we have considered so far are in fact only one of an infinite number of possible ways to measure a qubit. For any orthogonal pair of states, we can define a measurement that would cause a qubit to choose between the two.
This possibility will be explored more in the next section. For now, just bear in mind that $|x\rangle$ is not limited to being simply $|0\rangle$ or $|1\rangle$.
### #3 Global Phase
We know that measuring the state $|1\rangle$ will give us the output `1` with certainty. But we are also able to write down states such as
$$\begin{bmatrix}0 \\ i\end{bmatrix} = i|1\rangle.$$
To see how this behaves, we apply the measurement rule.
$$ |\langle x| (i|1\rangle) |^2 = | i \langle x|1\rangle|^2 = |\langle x|1\rangle|^2 $$
Here we find that the factor of $i$ disappears once we take the magnitude of the complex number. This effect is completely independent of the measured state $|x\rangle$. It does not matter what measurement we are considering, the probabilities for the state $i|1\rangle$ are identical to those for $|1\rangle$. Since measurements are the only way we can extract any information from a qubit, this implies that these two states are equivalent in all ways that are physically relevant.
More generally, we refer to any overall factor $\gamma$ on a state for which $|\gamma|=1$ as a 'global phase'. States that differ only by a global phase are physically indistinguishable.
$$ |\langle x| ( \gamma |a\rangle) |^2 = | \gamma \langle x|a\rangle|^2 = |\langle x|a\rangle|^2 $$
Note that this is distinct from the phase difference _between_ terms in a superposition, which is known as the 'relative phase'. This becomes relevant once we consider different types of measurement and multiple qubits.
### #4 The Observer Effect
We know that the amplitudes contain information about the probability of us finding the qubit in a specific state, but once we have measured the qubit, we know with certainty what the state of the qubit is. For example, if we measure a qubit in the state:
$$ |q\rangle = \alpha|0\rangle + \beta|1\rangle$$
And find it in the state $|0\rangle$, if we measure again, there is a 100% chance of finding the qubit in the state $|0\rangle$. This means the act of measuring _changes_ the state of our qubits.
$$ |q\rangle = \begin{bmatrix} \alpha \\ \beta \end{bmatrix} \xrightarrow{\text{Measure }|0\rangle} |q\rangle = |0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix}$$
We sometimes refer to this as _collapsing_ the state of the qubit. It is a potent effect, and so one that must be used wisely. For example, were we to constantly measure each of our qubits to keep track of their value at each point in a computation, they would always simply be in a well-defined state of either $|0\rangle$ or $|1\rangle$. As such, they would be no different from classical bits and our computation could be easily replaced by a classical computation. To achieve truly quantum computation we must allow the qubits to explore more complex states. Measurements are therefore only used when we need to extract an output. This means that we often place all the measurements at the end of our quantum circuit.
We can demonstrate this using Qiskit’s statevector simulator. Let's initialise a qubit in superposition:
```
qc = QuantumCircuit(1) # We are redefining qc
initial_state = [0.+1.j/sqrt(2),1/sqrt(2)+0.j]
qc.initialize(initial_state, 0)
qc.draw()
```
This should initialise our qubit in the state:
$$ |q\rangle = \tfrac{i}{\sqrt{2}}|0\rangle + \tfrac{1}{\sqrt{2}}|1\rangle $$
We can verify this using the simulator:
```
qobj = assemble(qc)
state = svsim.run(qobj).result().get_statevector()
print("Qubit State = " + str(state))
```
We can see here the qubit is initialised in the state `[0.+0.70710678j 0.70710678+0.j]`, which is the state we expected.
Let’s now measure this qubit:
```
qc.measure_all()
qc.draw()
```
When we simulate this entire circuit, we can see that one of the amplitudes is _always_ 0:
```
qobj = assemble(qc)
state = svsim.run(qobj).result().get_statevector()
print("State of Measured Qubit = " + str(state))
```
You can re-run this cell a few times to reinitialise the qubit and measure it again. You will notice that either outcome is equally probable, but that the state of the qubit is never a superposition of $|0\rangle$ and $|1\rangle$. Somewhat interestingly, the global phase on the state $|0\rangle$ survives, but since this is global phase, we can never measure it on a real quantum computer.
### A Note about Quantum Simulators
We can see that writing down a qubit’s state requires keeping track of two complex numbers, but when using a real quantum computer we will only ever receive a yes-or-no (`0` or `1`) answer for each qubit. The output of a 10-qubit quantum computer will look like this:
`0110111110`
Just 10 bits, no superposition or complex amplitudes. When using a real quantum computer, we cannot see the states of our qubits mid-computation, as this would destroy them! This behaviour is not ideal for learning, so Qiskit provides different quantum simulators: The `qasm_simulator` behaves as if you are interacting with a real quantum computer, and will not allow you to use `.get_statevector()`. Alternatively, `statevector_simulator`, (which we have been using in this chapter) does allow peeking at the quantum states before measurement, as we have seen.
## 3. The Bloch Sphere <a id="bloch-sphere"></a>
### 3.1 Describing the Restricted Qubit State <a id="bloch-sphere-1"></a>
We saw earlier in this chapter that the general state of a qubit ($|q\rangle$) is:
$$
|q\rangle = \alpha|0\rangle + \beta|1\rangle
$$
$$
\alpha, \beta \in \mathbb{C}
$$
(The second line tells us $\alpha$ and $\beta$ are complex numbers). The first two implications in section 2 tell us that we cannot differentiate between some of these states. This means we can be more specific in our description of the qubit.
Firstly, since we cannot measure global phase, we can only measure the difference in phase between the states $|0\rangle$ and $|1\rangle$. Instead of having $\alpha$ and $\beta$ be complex, we can confine them to the real numbers and add a term to tell us the relative phase between them:
$$
|q\rangle = \alpha|0\rangle + e^{i\phi}\beta|1\rangle
$$
$$
\alpha, \beta, \phi \in \mathbb{R}
$$
Finally, since the qubit state must be normalised, i.e.
$$
\sqrt{\alpha^2 + \beta^2} = 1
$$
we can use the trigonometric identity:
$$
\sqrt{\sin^2{x} + \cos^2{x}} = 1
$$
to describe the real $\alpha$ and $\beta$ in terms of one variable, $\theta$:
$$
\alpha = \cos{\tfrac{\theta}{2}}, \quad \beta=\sin{\tfrac{\theta}{2}}
$$
From this we can describe the state of any qubit using the two variables $\phi$ and $\theta$:
$$
|q\rangle = \cos{\tfrac{\theta}{2}}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle
$$
$$
\theta, \phi \in \mathbb{R}
$$
### 3.2 Visually Representing a Qubit State <a id="bloch-sphere-2"></a>
We want to plot our general qubit state:
$$
|q\rangle = \cos{\tfrac{\theta}{2}}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle
$$
If we interpret $\theta$ and $\phi$ as spherical co-ordinates ($r = 1$, since the magnitude of the qubit state is $1$), we can plot any single qubit state on the surface of a sphere, known as the _Bloch sphere._
Below we have plotted a qubit in the state $|{+}\rangle$. In this case, $\theta = \pi/2$ and $\phi = 0$.
(Qiskit has a function to plot a bloch sphere, `plot_bloch_vector()`, but at the time of writing it only takes cartesian coordinates. We have included a function that does the conversion automatically).
```
from qiskit_textbook.widgets import plot_bloch_vector_spherical
coords = [pi/2,0,1] # [Theta, Phi, Radius]
plot_bloch_vector_spherical(coords) # Bloch Vector with spherical coordinates
```
#### Warning!
When first learning about qubit states, it's easy to confuse the qubits _statevector_ with its _Bloch vector_. Remember the statevector is the vector discussed in [1.1](#notation), that holds the amplitudes for the two states our qubit can be in. The Bloch vector is a visualisation tool that maps the 2D, complex statevector onto real, 3D space.
#### Quick Exercise
Use `plot_bloch_vector()` or `plot_bloch_sphere_spherical()` to plot a qubit in the states:
1. $|0\rangle$
2. $|1\rangle$
3. $\tfrac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$
4. $\tfrac{1}{\sqrt{2}}(|0\rangle - i|1\rangle)$
5. $\tfrac{1}{\sqrt{2}}\begin{bmatrix}i\\1\end{bmatrix}$
We have also included below a widget that converts from spherical co-ordinates to cartesian, for use with `plot_bloch_vector()`:
```
from qiskit_textbook.widgets import bloch_calc
bloch_calc()
import qiskit
qiskit.__qiskit_version__
```
| github_jupyter |
# Ensemble
```
import numpy as np
import math
import torch
import import_ipynb
from . import dist
class Ensemble:
def __init__(
self,
nets: list,
mode: callable = None,
reputations=None
):
"""Create an ensemble model and its methods.
Parameters
----------
nets : list
List of neural networks (models) .
mode : function
Function. Inputs of the function are outputs from each model.
Outputs of the function are the aggregated results.
reputations : np.array
Sum of `reputations` SHOULD be 1.
`len(reputations)` SHOULD be same as number of `nets`.
"""
self.nets = nets
self.num_nets = len(self.nets)
mode = mode or avg
if reputations is None:
reputations = dist.uniform(self.num_nets)
self.set_mode(mode)
self.update_reputations(reputations)
def set_mode(self, new_mode: callable):
self.mode = new_mode
def update_reputations(self, new_reputations):
assert len(new_reputations) == self.num_nets, \
"dim of `reputations` SHOULD be same as len(nets)"
assert math.isclose(sum(new_reputations), 1.), \
"sum of `reputations` SHOULD be 1."
if type(new_reputations) != np.ndarray: # list, et al.
new_reputations = np.array(new_reputations) # converts into np.array
self.reputations = new_reputations
def __call__(self, inputs):
return self.forward(inputs)
def forward(self, inputs):
"""Calculate inference result of this (self) ensemble model.
"""
# Calculates inference result
outputs = list()
for net in self.nets:
outputs.append(net(inputs))
outputs = torch.stack(outputs) # to Tensor
return self.mode(outputs, self.reputations)
def eval(self):
for net in self.nets:
net.eval()
def avg(outputs, reputations=None):
if reputations is None:
reputations = dist.uniform(len(outputs))
# Calculates `result` which is the final one
result = torch.empty_like(outputs)
for net_idx, (output, reputation) in enumerate(zip(outputs, reputations)):
result[net_idx] = output.mul(reputation)
return torch.sum(result, dim=0)
def med(outputs, reputations=None):
"""Calculate weighted median.
See https://en.wikipedia.org/wiki/Weighted_median for weighted median.
"""
if reputations is None:
reputations = dist.uniform(len(outputs))
# calculates sorted `outputs`' indexes
selectors = outputs.data.sort(dim=0)[1] # [0]: values, [1]: indexes
# shape: (num_nets, batch_size, num_classes)
# value: which network (index of net)
# calculates sorted reputations
sorted_repus = torch.from_numpy(reputations)[selectors]
# selects median values
result = torch.empty_like(outputs[0])
# shape: (batch_size, num_classe)
net_max, batch_max, class_max = selectors.shape
for batch_idx in range(batch_max):
for class_idx in range(class_max):
accumulated_repus = 0.
for net_idx in range(net_max):
selector = selectors[net_idx][batch_idx][class_idx] # index of selected net
accumulated_repus += sorted_repus[net_idx][batch_idx][class_idx]
if accumulated_repus >= 0.5:
# saves median value at `result` and then `break`
result[batch_idx][class_idx] = outputs[selector][batch_idx][class_idx]
break
return result
def max(outputs, reputations=None):
"""Calculate weighted max.
"""
if reputations is None:
reputations = dist.uniform(len(outputs))
# calculates max indexes via reputations
selectors = torch.empty_like(outputs)
for net_idx, (output, reputation) in enumerate(zip(outputs, reputations)):
selectors[net_idx] = output.mul(reputation)
selectors = selectors.data.max(dim=0)[1] # [0]: values, [1]: indexes
# shape: (batch_size, num_classes)
# value: which network (index of net)
# selects max values
result = torch.empty_like(outputs[0])
# shape: (batch_size, num_classe)
batch_max, class_max = selectors.shape
for batch_idx in range(batch_max):
for class_idx in range(class_max):
# saves max value at `result`
net_idx = selectors[batch_idx][class_idx]
result[batch_idx][class_idx] = outputs[net_idx][batch_idx][class_idx]
return result
```
# main
```
if __name__ == "__main__":
import os
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader # TODO: DistributedDataParallel
import import_ipynb
from ml import train, test
import nets
"""Hyperparams"""
numNets = 5
numWorkers = 4
cuda = True
base_path = './ensemble_test'
trainFiles = [None for _ in range(numNets)]
testFiles = [None for _ in range(numNets)]
for i in range(numNets):
path = os.path.join(base_path, str(i))
os.makedirs(path, exist_ok=True)
trainFiles[i] = open(os.path.join(path, 'train.csv'), 'w')
testFiles[i] = open(os.path.join(path, 'test.csv'), 'w')
epochs = 2
batchSz = 256
"""Datasets"""
# # gets mean and std
# transform = transforms.Compose([transforms.ToTensor()])
# dataset = dset.CIFAR10(root='cifar', train=True, download=True, transform=transform)
# normMean, normStd = dist.get_norm(dataset)
normMean = [0.49139968, 0.48215841, 0.44653091]
normStd = [0.24703223, 0.24348513, 0.26158784]
normTransform = transforms.Normalize(normMean, normStd)
trainTransform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = dset.CIFAR10(root='cifar', train=True, download=True, transform=trainTransform)
testset = dset.CIFAR10(root='cifar', train=False, download=True, transform=trainTransform)
# splits datasets
splited_trainset = dist.random_split_by_dist(
trainset,
size=numNets,
dist=dist.pareto,
alpha=2.
)
splited_testset = dist.random_split_by_dist(
testset,
size=numNets,
dist=dist.pareto,
alpha=2.
)
# num_workers: number of CPU cores to use for data loading
# pin_memory: being able to speed up the host to device transfer by enabling
kwargs = {'num_workers': numWorkers, 'pin_memory': cuda}
# loaders
trainLoaders = [DataLoader(
splited_trainset[i], batch_size=batchSz, shuffle=True, **kwargs
) for i in range(numNets)]
testLoaders = [DataLoader(
splited_testset[i], batch_size=batchSz, shuffle=True, **kwargs
) for i in range(numNets)]
"""Nets"""
num_classes = 10
resnets = [nets.resnet18(num_classes=num_classes) for _ in range(numNets)]
criterions = [nn.CrossEntropyLoss() for _ in range(numNets)]
optimizers = [optim.SGD(net.parameters(), lr=1e-1, momentum=0.9) for net in resnets]
if cuda:
for net in resnets:
# if multi-gpus
if torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
# use cuda
net.cuda()
"""Train & Test models"""
for i in range(numNets):
for epoch in range(epochs):
train(
resnets[i], criterions[i], optimizers[i], trainLoaders[i],
epoch=epoch, cuda=cuda, log=True, log_file=trainFiles[i]
)
test(
resnets[i], criterions[i], testLoaders[i],
epoch=epoch, cuda=cuda, log=True, log_file=testFiles[i]
)
"""Test the ensemble model"""
ensemble = Ensemble(resnets, mode=med, reputations=[0.05, 0.2, 0.3, 0.4, 0.05])
testFile = open(os.path.join(base_path, 'test.csv'), 'w')
for i in range(numNets):
test(
ensemble, criterions[i], testLoaders[i],
epoch=0, cuda=cuda, log=True, log_file=testFile
)
```
| github_jupyter |
# Working with NetCDF files
One of the most common file formats within environmental science is NetCDF ([Network Common Data Form](https://www.unidata.ucar.edu/software/netcdf/)).
This format allows for storage of multiple variables, over multiple dimensions (i.e. N-dimensional arrays).
Files also contain the associated history and variable attributes.
Example dataset format: (from http://xarray.pydata.org/en/stable/data-structures.html)
<br>
<img src="../figures/dataset-diagram.png">
<br>
If you're not familiar with NetCDF, and would like to know more, there is a bit more general information at the bottom of this notebook.
For now, we'll simply focus on how to access and work with these files in python ...
# NetCDF in python
There are a few different packages that can be used to access data from NetCDF files.
These include:
* [netCDF4](https://unidata.github.io/netcdf4-python/netCDF4/index.html)
* Core NetCDF package within python.
* [iris](https://scitools.org.uk/iris/docs/latest/index.html)
* Developed for earth system data.
* Data and metadata read into and stored within "cubes".
* [xarray](http://xarray.pydata.org/en/stable/)
* A higher-level package, with a pandas-like interface for netCDF.
* What we'll focus on here today...
## netCDF4
Contains everything you need to read/modify/create netCDF files. e.g.
```python
from netCDF4 import Dataset
import numpy as np
openfile = Dataset('../data/cefas_GETM_nwes.nc4')
bathymetry = openfile.variables['bathymetry'][:]
```
Variables are read into NumPy arrays (masked arrays if missing values specified).
## xarray
* Alternative to plain netCDF4 access from python.
* Brings the power of pandas to environmental sciences, by providing N-dimensional variants of the core pandas data structures.
* Worth using for N-dimensional data, even when not reading netCDF files?
| Pandas | xarray |
|---|---|
| 1-D Series | DataArray |
| DataFrame | Dataset |
DataArray uses names for each dimension, making it easier to track than by just using axis numbers.
For example, if you want to average your DataArray (da) over time, it is possible to write `da.mean(dim='time')`
You don't have to remember the index of the time axis.
Compare:
```python
# xarray style
>>> da.sel(time='2018-01-12').max(dim='ensemble')
# standard numpy style
>>> array[3, :, :].max(axis=2)
```
Without xarray, you need to first check which row refers to `time='2018-01-12'`, and which dimension is relevant for the ensemble.
In the NumPy example, these choices are also not obvious to anyone reading the code at a later date.
#### The main advantages of using xarray versus plain netCDF4 are:
* intelligent selection along labelled dimensions (and also indices)
* [groupby operations](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.groupby.html)
* data alignment across named axes
* IO (netcdf)
* Attributes/metadata held with the dataset.
* conversion from and to Pandas.DataFrames
## Xarray as Pandas for N dimensions
```
# Import everything that we are going to need... but not more
import pandas as pd
import xarray as xr
import numpy as np
pd_s = pd.Series(range(3), index=list('abc'))
print(pd_s)
#convert 1D series to ND aware DataArray
da = xr.DataArray(pd_s)
print(da)
#convert 2D DataFrame to ND aware Dataset
df = pd.DataFrame.from_dict({'A': [1, 2, 3], 'B': [4, 5, 6]},
orient='index', columns=['one', 'two', 'three'])
df
ds = xr.Dataset.from_dataframe(df)
ds
```
---
# Let's open a netCDF file
Xarray allows you to open both local and remote datasets.
Remote datasets can be accessed through [OpenDAP](http://xarray.pydata.org/en/stable/io.html#opendap), allowing you to download (and subset) data available online.
e.g. you can access ocean colour data directly into python (from dataset acccessible online [here](earthdata.nasa.gov/collaborate/open-data-services-and-software/api/opendap/opendap-servers)):
```python
remote_data = xr.open_dataset(
'https://oceandata.sci.gsfc.nasa.gov:443/opendap/MODISA/L3SMI/2020/176/A2020176.L3m_DAY_CHL_chlor_a_9km.nc')
```
Here we'll use a file available locally on your machine (find in your data folder): `cefas_GETM_nwes.nc4`
This is output from a 3D ocean model, GETM, from the European NW Shelf.
### Open our dataset
```
GETM = xr.open_dataset('../data/cefas_GETM_nwes.nc4')
GETM
```
We can extract information on the dimensions, coordinates and attributes of the dataset
```
# List dimensions
GETM.dims
```
* latc = latitude
* lonc = longitude
* time = time!
* level = depth surface within the 3D model (terrain-following vertical coordinate).
```
# Extract coordinates
print(type(GETM.coords['latc']))
GETM.coords['latc'].shape
# List name of dataset attributes
GETM.attrs.keys()
# List variables
#GETM.data_vars
for var in GETM.data_vars:
print(var)
```
Note that time is automatically read/stored in datetime format - xarray understands the units and reference time.
* This makes for easier subsetting or slicing, as you'll find out later...
### Extract variable from dataset
```
temp = GETM['temp']
print(temp.shape)
temp
# Can also use:
# GETM.temp
```
Check variable attributes, in the same way we access DataSet attributes
```
print(temp.attrs)
print( f"Variable { temp.attrs['long_name'] } has units { temp.attrs['units'] }" )
```
### Accessing data values
Data can be subset using standard indexing methods.
```
temp[0, 0, 90, 100]
```
Note that the DataArray subset keeps track of the associated coordinates, as well as other attributes.
Behind the scenes, data values are still stored as NumPy arrays.
```
print(type(temp.values))
temp.values[0, 0, 90, 100]
```
---
## Xarray Indexing and selecting data
Xarray offers a variety of ways to subset your data.
From http://xarray.pydata.org/
<br>
<img src="../figures/xarray_indexing_table.png">
Subsets of our temperature variable, `temp`:
```
#positional by integer
print(temp.dims)
temp[0, 2, :, :].shape
# positional by label (coordinate value)
print( temp.loc['1996-02-02T01:00:00', 6, :, :].shape )
# by name and integer - note that we use round brackets here
print( temp.isel(level=1, latc=90, lonc=100).shape )
# by name and label
print( temp.sel(time='1996-02-02T01:00:00').shape )
```
Using axes names, it's also possible to make a subset of an entire Dataset (across all variables)
```
GETM.sel(time='1996-02-02T01:00:00', level=6)
```
### Define selection using nearest value
In examples above, you use the coordinate values to make the selection by label.
If the value you want doesn't exist, it is possible to interpolate e.g. to the nearest index:
```
temp.sel(level=2, lonc=-5.0, latc=50.0, method='nearest')
```
Tolerance limits can set for "nearest" coordinate values.
```
# e.g. latc=-50 should not yield data
lat = -50
limit = 0.5
try:
print(temp.sel(level=1, lonc=-5.0, latc=lat, method='nearest', tolerance=limit))
except KeyError:
print(f'ERROR: {lat} outside tolerance of {limit}')
```
Note: Other `method` options available are:
* `backfill` / `bfill` - propagate values backward
* `pad` / `ffill` - propagate values forward
* `None` - default, exact matches only
More information can be found in the xarray docs [here](http://xarray.pydata.org/en/stable/indexing.html).
You can also interpolate between values, as discussed [here](xarray.pydata.org/en/stable/interpolation.html).
### Slicing data
You can also extract **slices** or subsets over ranges of axes values, e.g.
```
GETM.sel(time=slice('1996-02-02','1996-02-04'), lonc=slice(-5, 10))
```
Slices do not need match specific values, they will find all options within the given range (inclusive).
---
# Exercise 1
From our GETM dataset (loaded above), can you extract the follow data for the ocean conditions off Great Yarmouth?
The coordinates here are 52.6 deg N, 1.75 deg E.
a) the bathymetry (ocean depth)
b) the temperature profile (i.e. all levels) at the same location, on 1st February 1996?
```
# Your code here:
# Hint: can you match the latitude and longitude exactly, or do you need to find the nearest value?
#a)
# b)
```
---
---
# Plotting is easy
Xarray enables simple plotting, to easily view your data.
```
GETM['temp'].isel(time=0, level=0).plot()
```
It will automatically plot 2D shading or 1D lines, dependent on the shape of the DataArray.
```
GETM.temp.sel(lonc=1.75, latc=52.6, level=1, method='nearest').plot()
```
## Other plotting packages are still available
You may still want to tailor plots to your own design e.g. creating figures for publication or presentation.
For example, let's look an example with cartopy.
```
%matplotlib inline
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
```
Define a general mapping function
```
#def make_map(ds, var='', title=None, units=None):
def make_map():
# create figure and axes instances
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(111, projection=ccrs.Stereographic(central_latitude=60))
ax.set_extent([-10, 15, 49, 60], crs=ccrs.PlateCarree())
gl = ax.gridlines(draw_labels=False)
feature = cartopy.feature.NaturalEarthFeature(name='coastline',
category='physical',
scale='50m',
edgecolor='0.5',
facecolor='0.8')
ax.add_feature(feature)
return fig, ax
make_map();
```
We can plot our chosen data on the map, and use attributes to create annotate the figure.
```
# Extract our chosen data and coordinates
latc = GETM.coords['latc']
lonc = GETM.coords['lonc']
var = GETM['temp'].sel(time='1996-02-02T01:00:00', level=21)
# Create the figure (using function above)
fig, ax = make_map()
# draw filled contours onto the map axes (ax).
h = ax.contourf(lonc, latc, var, 50, cmap=plt.cm.coolwarm, transform=ccrs.PlateCarree())
# add colorbar.
cbar = fig.colorbar(h)
# with unit label
cbar.set_label(var.units)
# add a title
ax.set_title(f'A slice of {var.long_name}');
```
---
#### Reminder: Choice of colormaps
The "default" colormap in python is viridis. However, colormaps can (and should) be varied to suit the data being shown.
For example, you would likely prefer a *sequential* scale for bathymetry, as opposed to a *diverging* scale for rainfall anomalies?
There is a large variety of maps to choose from in matplotlib, as shown [here](https://matplotlib.org/2.0.1/users/colormaps.html).
**You should always choose *perceptually uniform* shading to ensure that data is not misrepresented.**
There are a large number of articles to explaing why you should avoid using rainbow/jet e.g.
* [The end of the rainbow](http://www.climate-lab-book.ac.uk/2014/end-of-the-rainbow/)
* [A dangerous rainbow: Why colormaps matter](https://blogs.mathworks.com/headlines/2018/10/10/a-dangerous-rainbow-why-colormaps-matter/)
---
## Arithmetic operations
You can work with DataArrays in the same way as a NumPy array.
Benefit here is that calculations using DataArrays will give a result that is also a DataArray.
```
top = GETM['temp'].isel(time=0, level=4)
bottom = GETM['temp'].isel(time=0, level=0)
diff = top - bottom
print(type(diff))
diff.plot()
```
### Available methods and statistics
Methods available in pandas, are also available in xarray.
When performing calculations, can refer to dimensions by name or axis number.
```
# average over time (using axis number)
time_ave = GETM['temp'].mean(axis=0)
print(time_ave.shape)
# average over time and level (vertical)
timelev_ave = GETM['temp'].mean(['time','level'])
timelev_ave.plot()
# average over time and longitude
# i.e. zonal average (meridional section)
timelon_ave = GETM['temp'].mean(['time','lonc']).isel(level=4)
timelon_ave.plot()
```
---
## Dataset can easily be saved to a new netCDF file
Let's create a new dataset, containing just the average temperature, over time and level.
```
# Create a subset of the dataset, average over axes:
ds_temp = GETM[['temp']].mean('time','level')
# output to netcdf
ds_temp.to_netcdf('../data/temp_avg_level_time.nc')
```
Note the extra brackets used to extract the temperature variable:
```
# When variable names are passed in a list, this produces a new Dataset:
print(type( GETM[['temp']]) )
# Passing just a string extracts the variable into a DataArray
print(type( GETM['temp']) )
```
---
# Exercise 2
From our GETM dataset again, we want to investigate the variability of temperature with depth across the seabed.
a) Extract bathymetry from the dataset.
b) Extract temperature at the seabed (level index = 0), and average over time.
c) Produce a scatter plot of depth (bathymetry) vs. seabed temperature.
```
# Your code here:
# a)
# b)
# c)
```
---
# Bonus Exercise
For those who have finished the exercises above, and want more...
Earlier we mentioned that you can also access remote data sets online. e.g.
```
remote_data = xr.open_dataset(
'https://oceandata.sci.gsfc.nasa.gov:443/opendap/MODISA/L3SMI/2020/176/A2020176.L3m_DAY_CHL_chlor_a_9km.nc')
```
From this remote dataset:
a) Extract the chlorophyll concentration, covering just the North Sea (or another region of your choice).
b) Plot a map to show your result - check you've made a subset of the right region!
```
# Your code here:
# a)
# Hint: You will need to extract the relevant variable over a range of latitude and londitude values.
# * Find the relevant variable name to extract from the data set.
# * Extract coordinate values if needed?
# * Subset over your chose range of latitude and longitude.
# b)
# Note: data is only downloaded when you make the plot
```
---
---
---
# More on the netCDF file format
## History
* netCDF is a collection of formats for storing arrays
* popular scientific file format for gridded datasets
* netCDF classic
* more widespread
* 2 GB file limit (if you don't use the unlimited dimension)
* often preffered for distributing products
* netCDF 64 bit offset
* supports larger files
* NetCDF4
* based on HDF5
* compression
* multiple unlimited variables
* new types inc. user defined
* herarchical groups
* Developed by Unidata-UCAR with the aim of storing climate model data (3D+time)
* Auxilary information about each variable can be added
* Readable text equivalent called CDL (use ncdump/ncgen)
* Can be used with Climate and Forecast (CF) data convention
http://cfconventions.org/
## Data model
* Dimensions:describe the axes of the data arrays.
* Variables: N-dimensional arrays of data.
* Attributes: annotate variables or files with small notes or supplementary metadata.
Example for an ocean model dataset:
* Dimensions
* lat
* lon
* depth
* time
* Variable
* Temperature
* Salinity
* Global Attibutes
* Geographic grid type
* History
* Variable attributes (Temperature)
* Long_name: "sea water temperature"
* Missing_value: 1.09009E36
* Units: deg. C
* range: -2:50
## Tools for working with netCDF files
### Readable by many software tools
NetCDF can be read by many different software tools e.g. ArcGIS, QGIS, Surfer, Ferret, Paraview etc.
It can also be read by many different languages (one of the key motivations behind its use).
### C and Fortran libraries
These are used to underpin interfaces to other languages such as python (e.g. python package netCDF4)
Include in these are ncdump/ncgen software, used to convert to and from human-readable format.
### nco tools
An *extremely useful* set of tools, to process netCDF files directly from the command line.
For example, files can be subset, concatenated, averaged, or variables processed with simple arithmetic.
Full documentation, showing the wide range of functionality, can be found here: http://nco.sourceforge.net/nco.html.
### cdo tools
Another powerful command line tool: https://code.mpimet.mpg.de/projects/cdo/
## Quick viewers?
To view the file contents quickly and easily (without reading into python or elsewhere), there are a few different options.
e.g. ncdump, ncview, panoply, pyncview, etc.
### ncdump
This program should be available through your python installation, and is a useful way to quickly check the contents or attributes of a netCDF file.
You can peek inside your netcdf file from the prompt window (or terminal) using `ncdump -h <filename>`
Be sure to use the `-h` option, otherwise it will literally dump the entire contents of your file into the screen in front of you (not what you normally want!).
e.g.:
```
$ ncdump -h data/cefas_GETM_nwes.nc4
netcdf cefas_GETM_nwes {
dimensions:
latc = 360 ;
lonc = 396 ;
time = UNLIMITED ; // (6 currently)
level = 5 ;
variables:
double bathymetry(latc, lonc) ;
bathymetry:units = "m" ;
bathymetry:long_name = "bathymetry" ;
bathymetry:valid_range = -5., 4000. ;
bathymetry:_FillValue = -10. ;
bathymetry:missing_value = -10. ;
float h(time, level, latc, lonc) ;
h:units = "m" ;
h:long_name = "layer thickness" ;
h:_FillValue = -9999.f ;
h:missing_value = -9999.f ;
double latc(latc) ;
latc:units = "degrees_north" ;
double level(level) ;
level:units = "level" ;
double lonc(lonc) ;
lonc:units = "degrees_east" ;
float temp(time, level, latc, lonc) ;
temp:units = "degC" ;
temp:long_name = "temperature" ;
temp:valid_range = -2.f, 40.f ;
temp:_FillValue = -9999.f ;
temp:missing_value = -9999.f ;
double time(time) ;
time:long_name = "time" ;
time:units = "seconds since 1996-01-01 00:00:00" ;
...
```
# References
* xarray [docs](http://xarray.pydata.org/en/stable/)
* netCDF4 [docs](https://unidata.github.io/netcdf4-python/netCDF4/index.html)
* Stephan Hoyer's [ECMWF talk](https://docs.google.com/presentation/d/16CMY3g_OYr6fQplUZIDqVtG-SKZqsG8Ckwoj2oOqepU/edit#slide=id.g2b68f9254d_1_27)
| github_jupyter |
### Setup
```
%load_ext autoreload
%autoreload 2
# Set file paths
import os
import os.path as op
from pathlib import Path
import Buzznauts as buzz
buzz_root = Path(buzz.__path__[0]).parent.absolute()
# Data paths
fmri_dir = op.join(buzz_root, "data", "fmri")
stimuli = op.join(buzz_root, "data", "stimuli")
videos_dir = op.join(stimuli, "videos")
frames_dir = op.join(stimuli, "frames")
annotation_file = op.join(frames_dir, 'annotations.txt')
pretrained_dir = op.join(buzz_root, "data", "pretrained")
pretrained_vaegan = op.join(pretrained_dir, "vaegan_enc_weights.pickle")
# Visualizations path
viz_dir = op.join(buzz_root, "visualizations")
viz_vae_dir = op.join(viz_dir, "vae")
# Model path
models_dir = op.join(buzz_root, "models")
model_vae_dir = op.join(models_dir, "vae")
# Results paths
results_dir = op.join(buzz_root, "results", "vae")
# Import interactive tools
from tqdm.notebook import tqdm, trange
# Import pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, SubsetRandomSampler
import torchvision
from torchvision import datasets, transforms
from Buzznauts.utils import set_seed, set_device, seed_worker, set_generator
from Buzznauts.data.utils import plot_video_frames
from Buzznauts.data.videodataframe import VideoFrameDataset, ImglistToTensor, FrameDataset
```
## Model Architecture
```
class ConvVarAutoEncoder(nn.Module):
def __init__(self, K, data_shape=(3, 128, 128), num_filters=[192, 256, 384, 512, 768], filter_size=3):
super(ConvVarAutoEncoder, self).__init__()
## 5 Conv Layers
filter_reduction = 5 * (filter_size // 2)
self.shape_after_conv = calc_output_size(data_shape, filter_size, num_filters)
self.flat_shape = self.shape_after_conv[0] * self.shape_after_conv[1] * self.shape_after_conv[2]
# Double for each additional layer of Conv
flat_size_after_conv = self.shape_after_conv[0] * self.shape_after_conv[1] * self.shape_after_conv[2]
# ENCODER
self.q_bias = BiasLayer(data_shape)
self.q_conv_1 = nn.Conv2d(data_shape[0], num_filters[0], filter_size)
self.q_conv_2 = nn.Conv2d(num_filters[0], num_filters[1], filter_size)
self.q_conv_3 = nn.Conv2d(num_filters[1], num_filters[2], filter_size)
self.q_conv_4 = nn.Conv2d(num_filters[2], num_filters[3], filter_size)
self.q_conv_5 = nn.Conv2d(num_filters[3], num_filters[4], filter_size)
self.q_flatten = nn.Flatten()
self.q_fc_phi = nn.Linear(self.flat_shape, K+1)
# DECODER
self.p_fc_upsample = nn.Linear(K, self.flat_shape)
self.p_unflatten = nn.Unflatten(-1, self.shape_after_conv)
self.p_deconv_1 = nn.ConvTranspose2d(num_filters[4], num_filters[3], filter_size)
self.p_deconv_2 = nn.ConvTranspose2d(num_filters[3], num_filters[2], filter_size)
self.p_deconv_3 = nn.ConvTranspose2d(num_filters[2], num_filters[1], filter_size)
self.p_deconv_4 = nn.ConvTranspose2d(num_filters[1], num_filters[0], filter_size)
self.p_deconv_5 = nn.ConvTranspose2d(num_filters[0], data_shape[0], filter_size)
self.p_bias = BiasLayer(data_shape)
# Define a special extra parameter to learn scalar sig_x for all pixels
self.log_sig_x = nn.Parameter(torch.zeros(()))
def infer(self, x):
"""Map (batch of) x to (batch of) phi which can then be passed to
rsample to get z
"""
s = self.q_bias(x)
s = F.elu(self.q_conv_1(s))
s = F.elu(self.q_conv_2(s))
s = F.elu(self.q_conv_3(s))
s = F.elu(self.q_conv_4(s))
s = F.elu(self.q_conv_5(s))
flat_s = s.view(s.size()[0], -1)
phi = self.q_fc_phi(flat_s)
return phi
def generate(self, zs):
"""Map [b,n,k] sized samples of z to [b,n,p] sized images
"""
# Note that for the purposes of passing through the generator, we need
# to reshape zs to be size [b*n,k]
b, n, k = zs.size()
s = zs.view(b*n, -1)
s = F.elu(self.p_fc_upsample(s)).view((b*n,) + self.shape_after_conv)
s = F.elu(self.p_deconv_1(s))
s = F.elu(self.p_deconv_2(s))
s = F.elu(self.p_deconv_3(s))
s = F.elu(self.p_deconv_4(s))
s = self.p_deconv_5(s)
s = self.p_bias(s)
mu_xs = s.view(b, n, -1)
return mu_xs
def decode(self, zs):
# Included for compatability with conv-AE code
return self.generate(zs.unsqueeze(0))
def forward(self, x):
# VAE.forward() is not used for training, but we'll treat it like a
# classic autoencoder by taking a single sample of z ~ q
phi = self.infer(x)
zs = rsample(phi, 1)
return self.generate(zs).view(x.size())
def elbo(self, x, n=1):
"""Run input end to end through the VAE and compute the ELBO using n
samples of z
"""
phi = self.infer(x)
zs = rsample(phi, n)
mu_xs = self.generate(zs)
return log_p_x(x, mu_xs, self.log_sig_x.exp()) - kl_q_p(zs, phi)
def load_my_state_dict(self, state_dict):
curr_state=self.state_dict()
for name, param in state_dict.items():
if name not in curr_state:
continue
if isinstance(param, torch.Tensor):
param = param.data
curr_state[name].copy_(param)
class BiasLayer(nn.Module):
def __init__(self, shape):
super(BiasLayer, self).__init__()
init_bias = torch.zeros(shape)
self.bias = nn.Parameter(init_bias, requires_grad=True)
def forward(self, x):
return x + self.bias
def calc_output_size(input_size, kernel_size, kchannels, padding=0, stride=1):
output_size = input_size
for kc in kchannels:
output_height = (output_size[1] + padding + padding - kernel_size) / (stride) + 1
output_width = (output_size[2] + padding + padding - kernel_size) / (stride) + 1
output_size = [kc, int(output_height), int(output_width)]
return tuple(output_size)
```
### ELBO loss helper functions
```
def kl_q_p(zs, phi):
"""Given [b,n,k] samples of z drawn from q, compute estimate of KL(q||p).
phi must be size [b,k+1]
This uses mu_p = 0 and sigma_p = 1, which simplifies the log(p(zs)) term to
just -1/2*(zs**2)
"""
b, n, k = zs.size()
mu_q, log_sig_q = phi[:,:-1], phi[:,-1]
log_p = -0.5*(zs**2)
log_q = -0.5*(zs - mu_q.view(b,1,k))**2 / log_sig_q.exp().view(b,1,1)**2 - log_sig_q.view(b,1,-1)
# Size of log_q and log_p is [b,n,k]. Sum along [k] but mean along [b,n]
return (log_q - log_p).sum(dim=2).mean(dim=(0,1))
def log_p_x(x, mu_xs, sig_x):
"""Given [batch, ...] input x and [batch, n, ...] reconstructions, compute
pixel-wise log Gaussian probability
Sum over pixel dimensions, but mean over batch and samples.
"""
b, n = mu_xs.size()[:2]
# Flatten out pixels and add a singleton dimension [1] so that x will be
# implicitly expanded when combined with mu_xs
x = x.reshape(b, 1, -1)
_, _, p = x.size()
squared_error = (x - mu_xs.view(b, n, -1))**2 / (2*sig_x**2)
# Size of squared_error is [b,n,p]. log prob is by definition sum over [p].
# Expected value requires mean over [n]. Handling different size batches
# requires mean over [b].
return -(squared_error + torch.log(sig_x)).sum(dim=2).mean(dim=(0,1))
def rsample(phi, n_samples):
"""Sample z ~ q(z;phi)
Ouput z is size [b,n_samples,K] given phi with shape [b,K+1]. The first K
entries of each row of phi are the mean of q, and phi[:,-1] is the log
standard deviation
"""
b, kplus1 = phi.size()
k = kplus1-1
mu, sig = phi[:, :-1], phi[:,-1].exp()
eps = torch.randn(b, n_samples, k, device=phi.device)
return eps*sig.view(b,1,1) + mu.view(b,1,k)
```
### Model Weights
```
import pickle
def load_vaegan_weights(model, pretrained_path):
# load pretrained weights
pretrained_fn = open(pretrained_path,'rb')
pretrained = pickle.load(pretrained_fn)
# have a look what's in the pretrained file
old_keynames=[]
for key, value in pretrained.items():
old_keynames.append(key)
# get the keynames of our model
curr_state=model.state_dict()
new_keynames=[]
for key, value in curr_state.items():
if key.startswith('q_conv'):
new_keynames.append(key)
# change the names of the pretrained model to match our model
for i in range(len(old_keynames)):
pretrained[new_keynames[i]] = pretrained[old_keynames[i]]
del pretrained[old_keynames[i]]
# change size & make the weights a torch
# In TF, Conv2d filter shape is [filter_height, filter_width, in_channels, out_channels],
# while in Pytorch is (out_channels, in_channels, kernel_size[0], kernel_size[1])
# So we need to permute [3,2,0,1]
for key, value in pretrained.items():
if len(value.shape)==4:
new_val=torch.tensor(value)
new_val=new_val.permute(3,2,0,1)
else:
new_val=torch.tensor(value)
pretrained[key] = new_val
return pretrained
def reset_weights(model):
"""Try resetting model weights to avoid weight leakage.
Parameters
----------
model: torch.nn.Module
"""
for layer in model.children():
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
```
### Main
```
from sklearn.model_selection import KFold
# Configuration
# Set seed to the random generators to ensure reproducibility
seed = set_seed()
# Set computational device (cuda if GPU is available, else cpu)
device = set_device()
# Number of folds for cross-validation
k_folds = 5
# Define the K-fold Cross Validator
kfold = KFold(n_splits=k_folds, shuffle=True)
# Number of epochs
num_epochs = 10
# Batch size
batch_size = 32
# Size of the VAE's latent space
K_VAE = 128
#---------------
# Create Dataset
#---------------
# Number of splits in each video
num_segments = 5
# Number of frames per split
frames_per_segment = 6
# Total number of training frames
total_frames = num_segments * frames_per_segment
# Frame size
frame_size = 32
width = frame_size
height = frame_size
# Num of channels
num_channels = 3
# Data shape
data_shape = (num_channels, frame_size, frame_size)
# Input size
input_size = (batch_size, num_channels, frame_size, frame_size)
# Tensorize convert PIL images to tensors and resize each frame to frame_size
tensorize = transforms.Compose([
ImglistToTensor(), # list of PIL images to (FRAMES x CHANNELS x HEIGHT x WIDTH) tensor
transforms.Resize(frame_size), # image batch, resize smaller edge to 128
])
# Preprocess center crop to 100x128, normalize and apply random affine
# and horizontal flips to each frame
preprocess = transforms.Compose([
transforms.CenterCrop((frame_size, frame_size)), # image batch, center crop to square 100x128
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.RandomAffine(degrees=15, translate=(0.05, 0.05), scale=(0.78125, 1.0)),
transforms.RandomHorizontalFlip(p=0.5)
])
# Videoframe dataset: each sample is of size (FRAMES X CHANNELS X HEIGHT X WIDTH)
videoframe_dataset = VideoFrameDataset(
root_path=frames_dir,
annotationfile_path=annotation_file,
num_segments=num_segments,
frames_per_segment=frames_per_segment,
imagefile_template='img_{:05d}.jpg',
transform=tensorize,
random_shift=False,
test_mode=False
)
# Frame dataset: each sample is of size (CHANNELS X HEIGHT X WIDTH)
frame_dataset = FrameDataset(
videoframedataset=videoframe_dataset,
transform=preprocess
)
# Model summary
from torchinfo import summary
convVAE = ConvVarAutoEncoder(data_shape=data_shape, K=K_VAE)
summary(convVAE, input_size=input_size)
```
#### Train!
```
# Save loss values during training for each fold
loss_train = {f'Fold_{i}': [] for i in range(1, k_folds+1)}
# Save loss during validation for each fold
loss_val = {f'Fold_{i}': [] for i in range(1, k_folds+1)}
# Save overall loss during validation for each fold
loss_val_overall = {f'Fold_{i}': None for i in range(1, k_folds+1)}
# K-fold Cross Validation model evaluation
for fold, (train_idx, val_idx) in enumerate(kfold.split(frame_dataset)):
print(f'FOLD {fold+1}')
print('-------------------------')
# Sample elements randomly from a given list of idx, no replacement
train_subsampler = SubsetRandomSampler(train_idx)
val_subsampler = SubsetRandomSampler(val_idx)
# Define data loaders for training and testing data in this fold
train_loader = DataLoader(
dataset=frame_dataset,
batch_size=batch_size,
sampler=train_subsampler,
num_workers=2,
pin_memory=True,
worker_init_fn=seed_worker,
generator=set_generator())
val_loader = DataLoader(
dataset=frame_dataset,
batch_size=batch_size,
sampler=val_subsampler,
num_workers=2,
pin_memory=True,
worker_init_fn=seed_worker,
generator=set_generator())
# Instantiate network
convVAE = ConvVarAutoEncoder(data_shape=data_shape, K=K_VAE)
convVAE.apply(reset_weights)
pretrained = load_vaegan_weights(convVAE, pretrained_vaegan)
convVAE.load_my_state_dict(pretrained)
# Freezing layers
freeze_idx = [2, 3, 4, 5]
for idx, param in enumerate(convVAE.parameters()):
if idx in freeze_idx: param.requires_grad = False
# Initialize optimizer
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, convVAE.parameters()),
lr=3e-4, weight_decay=0)
convVAE.to(device)
convVAE.train()
# Run the training loop for defined number of epochs
for epoch in trange(num_epochs, desc='Epochs'):
# Set current loss value
current_loss = 0.0
# Iterate over the DataLoader for training data
for i, (frame, label) in enumerate(tqdm(train_loader,
total=len(train_loader) // batch_size,
desc='Batches', leave=False)):
frame = frame.to(device)
# Zero the gradients
optimizer.zero_grad()
# Compute loss
loss = -convVAE.elbo(frame)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
# Saving loss
loss_train[f'Fold_{fold+1}'].append(-loss.item())
# Print statistics
current_loss += loss.item()
if i % 100 == 99:
print('Loss after mini-batch %5d: %.3f' %
(i + 1, current_loss / 100))
current_loss = 0.0
# Evaluation for this fold
convVAE.eval()
correct, total = 0, 0
with torch.no_grad():
# Iterate over the DataLoader for validation data
for i, (frame, label) in enumerate(tqdm(val_loader,
total=len(val_loader) // batch_size,
desc='Batches', leave=False)):
# Compute loss
loss = -convVAE.elbo(frame)
# Saving loss
loss_val[f'Fold_{fold+1}'].append(-loss.item())
# Print overall fold loss
loss_val_overall[f'Fold_{fold+1}'] = sum(loss_val[f'Fold_{fold+1}'])
print('Total loss for fold %d: %d %%' % (fold, results[f'Fold_{fold+1}']))
print('--------------------------------')
# Print fold results
print(f'K-FOLD CROSS VALIDATION RESULTS FOR {k_folds} FOLDS')
print('----------------------------------------------------')
overall_sum = 0.0
for key, value in loss_val_overall.items():
print(f'Fold {key+1}: {value} %')
overall_sum += value
print(f'Average: {overall_sum/len(loss_val_overall.items())} %')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import csv
df_user = pd.read_csv("u.user", engine='python', sep='|', names=["userID", "Age", "Gender", "occupation", "Zip-Code"])
df_user.head()
df_movie = pd.read_csv("u.item", engine='python', sep='|', names=["movieID", "Movie title", "release date", "video release date",
"IMDb URL", "unknown", "Action", "Adventure", "Animation",
"Children's", "Comedy", "Crime", "Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi",
"Thriller", "War", "Western"])
df_movie.head()
df_ratings = pd.read_csv("u.data", engine='python', sep='\t', names=["userID", "movieID", "Rating", "Timestamp"])
df_ratings.head()
df_merged1 = df_movie.merge(df_ratings, how='outer')
df_merged1.head()
#Merging Users and Ratings
df_merged2 = df_user.merge(df_ratings, how='inner')
df_merged2.head()
#Merging Users/Ratings/Movies
df_merged3 = df_merged1.merge(df_merged2, how='inner')
df_merged3.head()
df_merged3 = df_merged3.fillna(0)
df_merged3.UserID = df_merged3.userID.astype(int)
df_merged3.Rating = df_merged3.Rating.astype(int)
df_merged3.shape
df_merged3.sort_values(by=['userID'], ascending=True)
#Rearranging merged3 columns into suitable format
master_data = df_merged3[['userID', 'movieID', 'Movie title', 'Rating', 'unknown', 'Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western', 'Zip-Code', 'Gender', 'Age', 'occupation', 'Timestamp']]
master_data.head()
master_data.sort_values(by=['userID'], ascending=True)
#PERFORMING EDAs
bins_list = [1, 18, 25, 35, 45, 50, 56]
master_data.hist(column='Age', bins = bins_list)
#Checking ratings on Jurassic Park
master_data[master_data['movieID'] == 82].hist(column='Rating')
#Avg rating of Jurassic park
master_data[master_data['movieID'] == 82].Rating.mean()
#Predictive analysis
#Analysis factors affecting movie rating
master_data.head()
master_data['Gender'].replace(['F','M'],[0,1],inplace=True)
md_small = master_data.iloc[:, [1, 2, 3, 23, 24, 25, 26]]
md_small.head()
#Convert as many dtypes into int to get better coef insights
md_small.dtypes
#Finding coorelation coef
md_small[md_small.columns[1:]].corr()['Rating'][:]
temp_genre = master_data.iloc[:, [4, 5, 6, 7, 8, 9 , 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]]
temp_genre.head()
master_features = pd.merge(md_small, temp_genre, left_index=True, right_index=True)
master_features.head()
master_features.dtypes
#Preparing data for linear regression
#Drop all obj dtype
X_feature = md_small.drop(['Zip-Code', 'Movie title', 'occupation'], axis=1)
X_feature.head()
#Preparing to train on first 40 movies
X_feature_small = X_feature[X_feature['movieID'] < 40]
X_feature_small_trimmed = X_feature_small.drop(['movieID','Rating'], axis=1)
X_feature_small_trimmed.head()
Y_target = master_features['Rating'][master_features['movieID']< 40]
x_train, x_test, y_train, y_test = train_test_split(X_feature_small_trimmed,Y_target,random_state=1)
logreg = LogisticRegression(max_iter=100000)
logreg.fit(x_train,y_train)
y_pred = logreg.predict(x_test)
metrics.accuracy_score(y_test,y_pred)
print ('actual: ', y_test.values[0:30])
print ('predicted: ', y_pred[0:30])
```
| github_jupyter |
# Computer vision data
```
%matplotlib inline
from fastai.gen_doc.nbdoc import *
from fastai import *
from fastai.vision import *
```
This module contains the classes that define datasets handling [`Image`](/vision.image.html#Image) objects and their tranformations. As usual, we'll start with a quick overview, before we get in to the detailed API docs.
## Quickly get your data ready for training
To get you started as easily as possible, the fastai provides two helper functions to create a [`DataBunch`](/basic_data.html#DataBunch) object that you can directly use for training a classifier. To demonstrate them you'll first need to download and untar the file by executing the following cell. This will create a data folder containing an MNIST subset in `data/mnist_sample`.
```
path = untar_data(URLs.MNIST_SAMPLE); path
```
There are a number of ways to create an [`ImageDataBunch`](/vision.data.html#ImageDataBunch). One common approach is to use *Imagenet-style folders* (see a ways down the page below for details) with [`ImageDataBunch.from_folder`](/vision.data.html#ImageDataBunch.from_folder):
```
tfms = get_transforms(do_flip=False)
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=24)
```
Here the datasets will be automatically created in the structure of *Imagenet-style folders*. The parameters specified:
- the transforms to apply to the images in `ds_tfms` (here with `do_flip`=False because we don't want to flip numbers),
- the target `size` of our pictures (here 24).
As with all [`DataBunch`](/basic_data.html#DataBunch) usage, a `train_dl` and a `valid_dl` are created that are of the type PyTorch [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).
If you want to have a look at a few images inside a batch, you can use [`ImageDataBunch.show_batch`](/vision.data.html#ImageDataBunch.show_batch). The `rows` argument is the number of rows and columns to display.
```
data.show_batch(rows=3, figsize=(5,5))
```
The second way to define the data for a classifier requires a structure like this:
```
path\
train\
test\
labels.csv
```
where the labels.csv file defines the label(s) of each image in the training set. This is the format you will need to use when each image can have multiple labels. It also works with single labels:
```
pd.read_csv(path/'labels.csv').head()
```
You can then use [`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv):
```
data = ImageDataBunch.from_csv(path, ds_tfms=tfms, size=28)
data.show_batch(rows=3, figsize=(5,5))
```
An example of multiclassification can be downloaded with the following cell. It's a sample of the [planet dataset](https://www.google.com/search?q=kaggle+planet&rlz=1C1CHBF_enFR786FR786&oq=kaggle+planet&aqs=chrome..69i57j0.1563j0j7&sourceid=chrome&ie=UTF-8).
```
planet = untar_data(URLs.PLANET_SAMPLE)
```
If we open the labels files, we seach that each image has one or more tags, separated by a space.
```
df =pd.read_csv(planet/'labels.csv')
df.head()
data = ImageDataBunch.from_csv(planet, folder='train', size=128, suffix='.jpg', sep=' ',
ds_tfms=get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.))
```
The `show_batch`method will then print all the labels that correspond to each image.
```
data.show_batch(rows=3, figsize=(10,8), ds_type=DatasetType.Valid)
```
You can find more ways to build an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) without the factory methods in [`data_block`](/data_block.html#data_block).
```
show_doc(ImageDataBunch, doc_string=False)
```
### Factory methods
Normally we'll use one of the convenience wrappers below. However, these wrappers all accept a `kwargs` that is passed to the general [`DataBunch.create`](/basic_data.html#DataBunch.create) method (like `bs`, `num_workers`...)
If you quickly want to get a [`ImageDataBunch`](/vision.data.html#ImageDataBunch) and train a model, you should process your data to have it in one of the formats the following functions handle.
```
show_doc(ImageDataBunch.from_folder)
```
"*Imagenet-style*" datasets look something like this (note that the test folder is optional):
```
path\
train\
clas1\
clas2\
...
valid\
clas1\
clas2\
...
test\
```
For example:
```
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=24)
```
Note that this (and all factory methods in this section) pass any `kwargs` to [`ImageDataBunch.create`](/vision.data.html#ImageDataBunch.create).
```
show_doc(ImageDataBunch.from_csv)
```
Create [`ImageDataBunch`](/vision.data.html#ImageDataBunch) from `path` by splitting the data in `folder` and labelled in a file `csv_labels` between a training and validation set. Use `valid_pct` to indicate the percentage of the total images for the validation set. An optional `test` folder contains unlabelled data and `suffix` contains an optional suffix to add to the filenames in `csv_labels` (such as '.jpg').
For example:
```
data = ImageDataBunch.from_csv(path, ds_tfms=tfms, size=24);
show_doc(ImageDataBunch.from_df)
```
Same as [`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv), but passing in a `DataFrame` instead of a csv file. E.gL
```
df = pd.read_csv(path/'labels.csv', header='infer')
df.head()
data = ImageDataBunch.from_df(path, df, ds_tfms=tfms, size=24)
```
Different datasets are labeled in many different ways. The following methods can help extract the labels from the dataset in a wide variety of situations. The way they are built in fastai is constructive: there are methods which do a lot for you but apply in specific circumstances and there are methods which do less for you but give you more flexibility.
In this case the hierachy is:
1. [`ImageDataBunch.from_name_re`](/vision.data.html#ImageDataBunch.from_name_re): Gets the labels from the filenames using a regular expression
2. [`ImageDataBunch.from_name_func`](/vision.data.html#ImageDataBunch.from_name_func): Gets the labels from the filenames using any function
3. [`ImageDataBunch.from_lists`](/vision.data.html#ImageDataBunch.from_lists): Labels need to be provided as an input in a list
```
show_doc(ImageDataBunch.from_name_re)
```
Creates an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) from `fnames`, calling a regular expression (containing one *re group*) on the file names to get the labels, putting aside `valid_pct` for the validation. In the same way as [`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv), an optional `test` folder contains unlabelled data.
Our previously created dataframe contains the labels in the filenames so we can leverage it to test this new method. [`ImageDataBunch.from_name_re`](/vision.data.html#ImageDataBunch.from_name_re) needs the exact path of each file so we will append the data path to each filename before creating our [`ImageDataBunch`](/vision.data.html#ImageDataBunch) object.
```
fn_paths = [path/name for name in df['name']]; fn_paths[:2]
pat = r"/(\d)/\d+\.png$"
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=24)
data.classes
show_doc(ImageDataBunch.from_name_func)
```
Works in the same way as [`ImageDataBunch.from_name_re`](/vision.data.html#ImageDataBunch.from_name_re), but instead of a regular expression it expects a function that will determine how to extract the labels from the filenames. (Note that `from_name_re` uses this function in its implementation).
To test it we could build a function with our previous regex. Let's try another, similar approach to show that the labels can be obtained in a different way.
```
def get_labels(file_path): return '3' if '/3/' in str(file_path) else '7'
data = ImageDataBunch.from_name_func(path, fn_paths, label_func=get_labels, ds_tfms=tfms, size=24)
data.classes
show_doc(ImageDataBunch.from_lists)
```
The most flexible factory function; pass in a list of `labels` that correspond to each of the filenames in `fnames`.
To show an example we have to build the labels list outside our [`ImageDataBunch`](/vision.data.html#ImageDataBunch) object and give it as an argument when we call `from_lists`. Let's use our previously created function to create our labels list.
```
labels_ls = list(map(get_labels, fn_paths))
data = ImageDataBunch.from_lists(path, fn_paths, labels=labels_ls, ds_tfms=tfms, size=24)
data.classes
show_doc(ImageDataBunch.create_from_ll)
```
Create an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) from `dss` with `bs`, `num_workers`, `collate_fn` and a potential `test` folder. `ds_tfms` is a tuple of two lists of transforms to be applied to the training and the validation (plus test optionally) set. `tfms` are the transforms to apply to the [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). The `size` and the `kwargs` are passed to the transforms for data augmentation.
### Methods
```
show_doc(ImageDataBunch.show_batch)
```
Create a `rows` by `rows` grid of images from dataset `ds_type` for a `figsize` figure. This function works for all type of computer vision data (see [`data_block`](/data_block.html#data_block) for more examples).
Once you have your [`ImageDataBunch`](/vision.data.html#ImageDataBunch), you can have a quick look at your data by using this:
```
data.show_batch(rows=3, figsize=(6,6))
```
In the next two methods we will use a new dataset, CIFAR. This is because the second method will get the statistics for our dataset and we want to be able to show different statistics per channel. If we were to use MNIST, these statistics would be the same for every channel. White pixels are [255,255,255] and black pixels are [0,0,0] (or in normalized form [1,1,1] and [0,0,0]) so there is no variance between channels.
```
path = untar_data(URLs.CIFAR); path
show_doc(channel_view)
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, valid='test', size=24)
def channel_view(x:Tensor)->Tensor:
"Make channel the first axis of `x` and flatten remaining axes"
return x.transpose(0,1).contiguous().view(x.shape[1],-1)
```
This function takes a tensor and flattens all dimensions except the channels, which it keeps as the first axis. This function is used to feed [`ImageDataBunch.batch_stats`](/vision.data.html#ImageDataBunch.batch_stats) so that it can get the pixel statistics of a whole batch.
Let's take as an example the dimensions our MNIST batches: 128, 3, 24, 24.
```
t = torch.Tensor(128, 3, 24, 24)
t.size()
tensor = channel_view(t)
tensor.size()
show_doc(ImageDataBunch.batch_stats)
```
Gets the statistics of each channel of a batch of data. If no functions are specified, default statistics are mean and standard deviation.
```
data.batch_stats()
show_doc(ImageDataBunch.normalize)
```
Adds the normalize transform to the set of transforms associated with the data. In the fast.ai library we have `imagenet_stats`, `cifar_stats` and `mnist_stats` so we can add normalization easily with any of these datasets. Let's see an example with our dataset of choice: MNIST.
```
data.normalize(cifar_stats)
data.batch_stats()
```
## Data normalization
You may also want to normalize your data, which can be done by using the following functions.
```
show_doc(normalize)
show_doc(denormalize)
show_doc(normalize_funcs, doc_string=False)
```
Create [`normalize`](/vision.data.html#normalize) and [`denormalize`](/vision.data.html#denormalize) functions using `mean` and `std`. `device` will store them on the device specified. `do_y` determines if the target should also be normaized or not.
On MNIST the mean and std are 0.1307 and 0.3081 respectively (looked on Google). If you're using a pretrained model, you'll need to use the normalization that was used to train the model. The imagenet norm and denorm functions are stored as constants inside the library named <code>imagenet_norm</code> and <code>imagenet_denorm</code>. If you're training a model on CIFAR-10, you can also use <code>cifar_norm</code> and <code>cifar_denorm</code>.
You may sometimes see warnings about *clipping input data* when plotting normalized data. That's because even although it's denormalized when plotting automatically, sometimes floating point errors may make some values slightly out or the correct range. You can safely ignore these warnings in this case.
```
data = ImageDataBunch.from_folder(untar_data(URLs.MNIST_SAMPLE),
ds_tfms=tfms, size=24)
data.normalize()
data.show_batch(rows=3, figsize=(6,6))
show_doc(get_annotations)
```
To use this dataset and collate samples into batches, you'll need to following function:
```
show_doc(bb_pad_collate)
```
Finally, to apply transformations to [`Image`](/vision.image.html#Image) in a [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset), we use this last class.
## ItemList specific to vision
The vision application adds a few subclasses of [`ItemList`](/data_block.html#ItemList) specific to images.
```
show_doc(ImageItemList, title_level=3)
```
Create a [`ItemList`](/data_block.html#ItemList) in `path` from filenames in `items`. `create_func` will default to [`open_image`](/vision.image.html#open_image). `label_cls` can be specified for the labels, `xtra` contains any extra information (usually in the form of a dataframe) and `processor` is applied to the [`ItemList`](/data_block.html#ItemList) after splitting and labelling.
```
show_doc(ImageItemList.from_folder)
show_doc(ImageItemList.from_df)
show_doc(get_image_files)
show_doc(ImageItemList.open)
```
Open the image in `fn`. Subclass and overwrite this function if you want to use a custom opening function.
```
show_doc(ImageItemList.show_xys)
show_doc(ImageItemList.show_xyzs)
show_doc(ObjectCategoryList, title_level=3)
show_doc(ObjectItemList, title_level=3)
show_doc(SegmentationItemList, title_level=3)
show_doc(SegmentationLabelList, title_level=3)
show_doc(PointsItemList, title_level=3)
show_doc(ImageImageList, title_level=3)
```
## Building your own dataset
This module also contains a few helper functions to allow you to build you own dataset for image classification.
```
show_doc(download_images)
show_doc(verify_images)
```
It will try if every image in this folder can be opened and has `n_channels`. If `n_channels` is 3 – it'll try to convert image to RGB. If `delete=True`, it'll be removed it this fails. If `resume` – it will skip already existent images in `dest`. If `max_size` is specifided, image is resized to the same ratio so that both sizes are less than `max_size`, using `interp`. Result is stored in `dest`, `ext` forces an extension type, `img_format` and `kwargs` are passed to PIL.Image.save. Use `max_workers` CPUs.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(PointsItemList.get)
show_doc(SegmentationLabelList.new)
show_doc(ImageItemList.from_csv)
show_doc(ObjectCategoryList.get)
show_doc(ImageItemList.get)
show_doc(SegmentationLabelList.reconstruct)
show_doc(ImageImageList.show_xys)
show_doc(ImageImageList.show_xyzs)
show_doc(ImageItemList.open)
show_doc(PointsItemList.analyze_pred)
show_doc(SegmentationLabelList.analyze_pred)
show_doc(PointsItemList.reconstruct)
show_doc(SegmentationLabelList.open)
show_doc(ImageItemList.reconstruct)
show_doc(resize_to)
show_doc(ObjectCategoryList.reconstruct)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
# ClusterAI 2020
# Ciencia de Datos - Ingenieria Industrial UTN BA
# Curso I5521
# Clase 01: analisis exploratorio de datos con datos de Google Play
#### Elaborado por: Nicolás Aguirre
```
from google.colab import drive
drive.mount('/gdrive')
DRIVE_FOLDER = 'ClusterAI2020/'
CLASS_FOLDER = 'clase_01/'
DATA_PATH = "../data/clase_01/"
%cd {'/gdrive/My Drive/'+DRIVE_FOLDER+CLASS_FOLDER}
```
## Librerias
```
#Importar paquetes de herramientas:
#Datos
import pandas as pd
import numpy as np
#Graficos
import matplotlib.pyplot as plt
import seaborn as sns
#Otros
import warnings
warnings.filterwarnings('ignore')
```
# Dataset
El dataset que usaremos se encuentra en:
https://www.kaggle.com/lava18/google-play-store-apps
Una vez descargado, indicamos la direccion del archivo descargado a la funcion "pd.read_csv()" para importarlo como un objeto Pandas DataFrame. Si el archivo se encuentra en la misma carpeta que la notebook, con indicarle el nombre es suficiente.
Ademas, usaremos la funcion "np.shape()" y y el metodo ".head()" para:
* Verificar que se haya cargado bien el dataset: En algunos casos, debido a un error en el formato del archivo ".csv", las columnas y/o registros se cargan incorrectamente. En estos casos "pd.read_csv()" no devuelve error pero lo notaremos cuando usemos la funcion ".head()".
* Tener la dimension del dataset: Cantidad de registros y cantidad de columnas.
* Tener una base de la cantidad original de registros. Para que a medida que vayamos aplicando distintos filtros que limpien nuestros datos tengamos una numero de referencia. Si aplicamos un filtro, y de repente perdemos el 90% de los datos, lo mas probable es que en algo nos hayamos equivocado.
```
google_df = pd.read_csv('clusterai_2020_clase01_dset_googleplaystore.csv')
filas = np.shape(google_df)[0] # [0] para la primera dimension
print(f'Filas: {filas}')
columnas = np.shape(google_df)[1] # [1] para la segunda dimension
print(f'Columnas: {columnas}\r\n')
print('Output de ".head(5)": ')
google_df.head(5)
```
### Columnas
Si queremos saber el nombre de las columas en una variable utilizamos el metodo *.columns()*.
Para guardarlo, simplemente lo asignamos a una variable.
```
nombre_columnas = google_df.columns.values
nombre_columnas
```
# Limpieza de Datos
En esta parte nos vamos a encargar de limpiar:
* Duplicados --> [.drop_duplicates()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html)
* Simbolos --> [str.replace](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) and [str.extract](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.extract.html)
* 'NaN' --> [.dropna()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html)
#### Duplicados
```
# Vamos a eliminar las de la columna 'App' los valores repetidos, conservando unicamente la primera ocurrencia.
google_df.drop_duplicates(subset='App', inplace=True, keep='first')
# Las columnas para quitar los duplicados se indican en el argumento 'subset'
# mientras que la primera ocurrencia se indica con el argumento 'keep'
# La opcion 'inplace' nos evitar tener asignar la salida a una variable.
# Directamente se guarda sobre 'google_df'. Equivale a:
#google_df = google_df.drop_duplicates(subset='App', keep='first')
```
#### Simbolos
Las columnas 'Installs', 'Size', 'Price' y 'Reviews' tienen informacion en la que estamos interesados.
El problema es que estan en formato texto (o *string*) y hay simbolos y valores que nos impiden manipularlos en formato numerico ( *int* o *float*)
```
#Installs
q_installs = len(google_df['Installs'].unique()) # Cantidad de valores unicos en 'Installs'
print(f'En total hay {q_installs} tipos de valores en "Install"\r\n')
print("\n", google_df['Installs'].unique())
```
Podemos notar 2 simbolos de la cell anterior que debemos eliminar para poder usar el dato como numero y no como cadena de texto, y un valor "Free" en algun registro que esta mal cargado.
Primero vamos a quitar el "Free" y luego vamos a reemplazaar los simbolos "+" y ","
```
# Eliminacion manual de valores que no deberiamos tener en columas
google_df = google_df[google_df['Installs'] != 'Free']
# Aqui lo que hicimos fue conservar unicamente los registros cuyos valores en la columa "Installs" sean distintos (=!) a 'Free'
filas = np.shape(google_df)[0] # [0] para la primera dimension
print(f'Filas: {filas}')
#Reemplaamos los caraccteres '+' y ','
google_df['Installs'] = google_df['Installs'].str.replace(',','')
google_df['Installs'] = google_df['Installs'].str.replace('+','')
google_df = google_df.astype({"Installs": int})
q_installs = len(google_df['Installs'].unique())
print(f'En total hay {q_installs} labels de Install\r\n')
print("\n", google_df['Installs'].unique())
#Size
q_size = len(google_df['Size'].unique()) # Cantidad de valores unicos en 'Size'
print(f'En total hay {q_size} tipos de valores en "Size"\r\n')
print("\n", google_df['Size'].unique())
filas = np.shape(google_df)[0]
print(f'Filas: {filas}')
```
En la columna "Size", tenemos problemas de unidades, en algunos casos tenemos el tamaño en Megabytes (M) y en otras en Kilobytes (k), y algunos valores con el texto "Varies with device".
Primero, vamos a reemplazar los valores 'Varies with device' por NaN's.
Luego, vamos a separar los numeros que esten en formato texto y los guardaremos en formato numerico.
Ademas, vamos a homogeneizar el tamaño a 'M'. Para eso, extraeremos y reemplazaremos los caracteres 'k' y 'M'.
Finalmente, los NaN's correspondiente a los valores "Varies with device", reemplazarlos por la media de cada categoria, para eliminar la manor cantidad de registros. Esta ultima tambien es una solucion, asi que proponemos que tambien la prueben.
```
#Dejamos el peso de las app en Mb y convertimos aquellas que esten en Kb.
google_df['Size'].replace('Varies with device', np.nan, inplace = True )
#Eliminamos las letras k y M que estan al final de cada valor. #(1)
output = google_df.Size.replace(r'[kM]', '', regex=True).astype(float) # Valores enteros sin las letras k/M
print(output,'\r\n')
print(output.value_counts())
#Separamos los grupos K y M #(2)
output = google_df.Size.str.extract(r'([kM])', expand=False)
print(output,'\r\n')
print(output.value_counts())
#Los NaN los reemplazamos por 1 para no perder registros #(3)
output = google_df.Size.str.extract(r'([kM])', expand=False).fillna(1)
print(output,'\r\n')
print(output.value_counts())
#Homogeneizamos las unidades, k = 10**-3 y M = 1 #(4)
output = google_df.Size.str.extract(r'([kM])', expand=False).fillna(1)\
.replace(['k','M'], [10**-3, 1]).astype(float)
#print(output,'\r\n')
print(output.value_counts())
# Juntamos todo
google_df.Size = (google_df.Size.replace(r'[kM]', '', regex=True).astype(float) * \
google_df.Size.str.extract(r'([KM])', expand=False)
.fillna(1)
.replace(['k','M'], [10**-3, 1]).astype(float))
google_df.Size
#Reemplazamos aquellos registros con 'Varies with device' (ahora NaN) con la media del peso segun la categoria
google_df['Size'].fillna(google_df.groupby('Genres')['Size'].transform('mean'), inplace = True)
# A float
google_df['Size'] = google_df['Size'].astype(float)
google_df['Installs'] = google_df['Installs'].astype(float)
#quitamos simbolo '$' y pasamos a float.
google_df['Price'] = google_df['Price'].str.replace('$','')
google_df['Price'] = google_df['Price'].astype(float)
google_df['Reviews'] = google_df['Reviews'].astype(int)
```
**Links para curiosos**
Regular Expressions:
- [Documentacion](https://docs.python.org/3/howto/regex.html)
- [YouTube](https://www.youtube.com/watch?v=8DvywoWv6fI&list=WL&index=2&t=21317s)
- [Ejemplos](https://www.geeksforgeeks.org/pattern-matching-python-regex/)
## NaN
Ahora verificamos que las columnas no tengan NaN.
En caso de haberlos, tendremos que decidir:
- si son suficientes como para eliminar TODA la columa y perder esa informacion, o
- decidir eliminar unicamente los registros.
```
col_NaN = google_df.isnull().any()
print(col_NaN,'\r\n')
# Cantidad de valores nulos ordenados descendentemente
total = google_df.isnull().sum().sort_values(ascending=False)
total
# Cantidad de valores nulos ordenados descendentemente
total = google_df.isnull().sum().sort_values(ascending=False)
# Porcetaje de lo que representa para cada columna
percent = (google_df.isnull().sum()/google_df.isnull().count()).sort_values(ascending=False)
# Mostramos los 2 resultados en conjunto.
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(6)
# Limpiamos registros '.dropna'
google_df.dropna(how ='any', inplace = True)
# Verificamos como quedo el dataset
print(f'Nos quedamos con un dataframe de {google_df.shape[0]} filas x {google_df.shape[1]} columnas')
```
### Ahora que terminamos con la limpieza general del dataset, pasemos a analizar:
- Rating
- Categorias
- Categorias vs Rating
- Reviews
- Precio
# Rating
A continuacion veamos la distribucion estimada del 'Rating' y su histograma
```
plt.figure(figsize=(10,3))
sns.kdeplot(google_df['Rating'], color= "Blue", shade = True)
plt.xlabel("Rating",size = 20)
plt.ylabel("Frecuency",size = 20)
plt.title('Distribution of Rating',size = 20)
#plt.imsave('KDE',format='png') # Guardar la imagen
plt.show()
plt.figure(figsize=(10,3))
sns.countplot(google_df.Rating, color="Blue")
plt.xlabel("Rating",size = 20)
plt.ylabel("Qty",size = 20)
plt.title('Histogram of Rating',size = 20)
plt.xticks([]) # por si deseamos eliminar los intervalos en el eje-x
#plt.imsave('Histogram',format='png') # Guardar la imagen
plt.show()
```
# Categorias
A modo de ejemplo, podriamos querer saber como es el comportamiento del Rating por Categoria:
* Vamos a visualizar el top 10 de categorias con mayor cantidad de apps.
* Boxplot de Categoria vs Rating
```
# Gardamos en una variable la cantidad de categorias
q_categorias = len(google_df['Category'].unique())
print(f'En total hay {q_categorias} categorias\r\n')
# Mostramos en la cell los tipos de categorias
print("\n", google_df['Category'].unique())
# top
top_n = 10
# Guardamos los indices de los top_n categorias
idx_top = google_df['Category'].value_counts(ascending=False).index[0:top_n]
plt.figure(figsize=(15,3))
# Indicamos la columna 'Category', de los datos 'google_df', en el orden 'idx_top'
g = sns.countplot(x="Category", data=google_df,
order=idx_top,
palette = "muted")
g.set_xticklabels(g.get_xticklabels(), rotation=90, ha="right")
plt.title('Cantidad de App por Categorias', size = 20)
plt.xlabel("Categoria", size = 20)
plt.ylabel("Cantidades", size = 20)
plt.show()
```
# Categoria vs Rating
```
# Todas las categorias
g = sns.catplot(x="Category",y="Rating",
data=google_df,
kind="box",
order=idx_top,
palette = "muted",
height = 5 ,aspect=3)
g.despine(left=True) # Para quitar linea del Y del plot
g.set_xticklabels(rotation=90)
plt.xlabel("Categoria",size = 20)
plt.ylabel("Rating",size = 20)
plt.title('Boxplot de Rating VS Categorias',size = 20)
plt.show()
```
## Mediana, Q1-Q3, whiskers y outliers
En muchas ocaciones, luego de ver los plots vamos a querer guardar en variables valores como la media, los valores atipicos y los "whiskers" para cada categoria.
A modo de ejemplo, veamos como obtenerlos para una unica categoria.
```
cat_select = 'LIFESTYLE'
df_pivot = google_df[google_df['Category']==cat_select]
plt.figure()
bxplot = plt.boxplot(df_pivot['Rating'])
#bxplot = plt.boxplot(df_pivot['Rating'],whis=[15, 82])
plt.show()
# Mediana
medians = bxplot["medians"][0].get_ydata()
# Marcas de Boxplot
low_limits = bxplot["whiskers"][0].get_ydata()
up_limits = bxplot["whiskers"][1].get_ydata()
# Valores Q1 - Q3
Q1 = low_limits[0]
Q3 = up_limits[0]
# whiskers: Valores extremos de las lineas que salen del intervalo [Q1-Q3]
low_whiskers = low_limits[1]
up_whiskers = up_limits[1]
# Outliers = fliers: Valores mas alla de los whiskers
outliers = bxplot["fliers"][0].get_ydata()
print(f'Valor Mediana:\r\n {medians[0]}\r\n')
print(f'[Q1 - Q3] : [{Q1} - {Q3}]\r\n')
print(f'[Inferior - Superior]: [{low_whiskers} - {up_whiskers}] \r\n')
print(f'Valores outliers:\r\n{outliers}\r\n')
```
# Reviews
```
# En este primer plot vamos a ver una estimacion de la distribucion de de los reviews de la cantidad de Reviews
plt.figure(figsize=(10,3))
g = sns.kdeplot(google_df.Reviews, color="Green", shade = True)
plt.xlabel("Reviews",size = 20)
plt.ylabel("Frecuency",size = 20)
plt.title('Distribution of Reveiw',size = 20)
plt.show()
# En este segundo plot vamos a ver el histograma de la cantidad de reviews.
# El parametro bins define la cantidad de sub-intervalos en los que vamos a dividir el eje-x
plt.figure(figsize=(10,3))
plt.hist(google_df['Reviews'], bins=100,color='g' ,alpha=0.5)
plt.xlabel("Reviews",size = 20)
plt.ylabel("Cantidades",size = 20)
plt.show()
```
Este ultimo grafico nos da alguna informacion?
Muchas veces los graficos parecen que no nos muestran nada.
En los casos donde tenemos muchas informacion concentrada, una buena practica es hacer un cambio de escala.
```
plt.figure(figsize=(10,3))
plt.hist(np.log(1+google_df.Reviews),bins=100, color='g' ,alpha=0.5)
plt.xlabel("Log(Reviews)",size = 20)
plt.ylabel("Cantidades",size = 20)
plt.show()
```
Ahora podemos ver mejor como se distribuyen las cantidades de reviews ...
Alla en el fondo, donde antes no veiamos nada, ahora podemos ver que hay un par de apps con muchisimos reviews...
veamos cuales son ...
```
google_df[google_df.Reviews > 5000000].head()
```
**Habra alguna relacion entre los "Reviews" y alguna otra variable?**
```
corrmat = google_df.corr()
f, ax = plt.subplots(figsize=(9, 7))
ax = sns.heatmap(corrmat,
annot=True,
cmap=sns.diverging_palette(240, 10, as_cmap=True))
```
**Tiene sentido?**
# Precio
Tenemos a nuestra disposicion tambien los precios de las Apps, asi que vamos a usarlos!
* Estadistica descriptiva que nos da [.describe()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.describe.html) y si algo nos llama la atencion, explorarlo y si hace falta corregirlo.
* Vamos a armar a criterio nuestro categorias de precio de las apps.
* Crucemos informacion entre las categorias de precio Precio y Rating, y saquemos conclusiones.
```
google_df['Price'].describe()
```
#### Hay una app que cuesta U$D 400 ! !
```
google_df[google_df['Price'] == 400]
```
### Ahora armemos a nuestro gusto brands segun los precios y veamos como se distribuyen
```
#Primero defininmos los limites de cada categoria y creemos la columna 'PriceBand'
google_df.loc[ google_df['Price'] == 0, 'PriceBand'] = '0 Free'
google_df.loc[(google_df['Price'] > 0) & (google_df['Price'] <= 0.99), 'PriceBand'] = '1 Muy Barato'
google_df.loc[(google_df['Price'] > 0.99) & (google_df['Price'] <= 2.99), 'PriceBand'] = '2 Barato'
google_df.loc[(google_df['Price'] > 2.99) & (google_df['Price'] <= 4.99), 'PriceBand'] = '3 Normal'
google_df.loc[(google_df['Price'] > 4.99) & (google_df['Price'] <= 14.99), 'PriceBand'] = '4 Caro'
google_df.loc[(google_df['Price'] > 14.99) & (google_df['Price'] <= 29.99), 'PriceBand'] = '5 Muy Caro'
google_df.loc[(google_df['Price'] > 29.99), 'PriceBand'] = '6 #VamoACalmarnos'
#Veamos como es el mean value para las bandas que definimos
google_df[['PriceBand', 'Rating']].groupby(['PriceBand'], as_index=False).mean()
# Ahora en vez de utilizar 'box'(boxplot)
# ingresaremos 'boxen' al argumento 'kind' de la funcion 'catplot'
g = sns.catplot(x="PriceBand", y="Rating",
data=google_df,
kind="boxen", # box , violin
height = 5,aspect=3 ,palette = "muted")
g.despine(left=True)
g.set_xticklabels(rotation=90)
g = g.set_ylabels("Rating")
plt.title('Boxen plot Rating VS PriceBand',size = 20)
plt.show()
```
Para bases de datos muy grandes, muchas veces los limites de confiabilidad del boxplot por defecto consideran erroneamente "outliers" a muestras con muy baja probabilidad, que si deberian considerarse como parte del espacio de muestra.
Ademas, el "boxplot" no deja visualizar como se distribuyen de muestras en los extremos.
Si creemos que alguno de estos factores, entre otros, nos puede estar sucediendo y nos esconde informacion que creemos relevante mostrar, lo mejor es probar con algun otro tipo de ploteo ([boxen](https://vita.had.co.nz/papers/letter-value-plot.pdf) o "violin").
# Propuestas:
- Apps Pagas vs Apps Free
- Content Rating (Everyone, Teen, +18, etc)
Hint: google_df['Genres'] = google_df['Genres'].str.split(';').str[0]
- Genres vs Rating
- Genres (Estadistica descriptiva w.r.t , i.e, "Rating")
- Mismo analisis, pero en vez de reemplazar Varies with device por la media de w.r.t. categoria, eliminando los registros y ver si el supuesto que hicimos impacta en los resultados.
```
```
| github_jupyter |
<img src="images/dask_horizontal.svg"
width="45%"
alt="Dask logo\">
# Scaling your data work with Dask
## PyData Global 2020
### Materials & setup
- Tutorial materials available at https://github.com/coiled/pydata-global-dask
- Two ways to go through the tutorial:
1. Run locally on your laptop
2. Run using Binder (no setup required)
### About the speakers
- **[James Bourbeau](https://www.jamesbourbeau.com/)**: Dask maintainer and Software Engineer at [Coiled](https://coiled.io/).
- **[Hugo Bowne-Anderson](http://hugobowne.github.io/)**: Head of Data Science Evangelism and Marketing at [Coiled](https://coiled.io/).
# Overview
Dask is a flexible, open source library for parallel computing in Python
- Documentation: https://docs.dask.org
- GitHub: https://github.com/dask/dask
From a high-level Dask:
- Enables parallel and larger-than-memory computations
- Scales the existing Python ecosystem
- Uses familiar APIs you're used to from projects like NumPy, Pandas, and scikit-learn
- Allows you to scale existing workflows with minimal code changes
- Dask works on your laptop, but also scales out to large clusters
- Offers great built-in diagnosic tools
<img src="images/dask-components.svg"
width="85%"
alt="Dask components\">
# Dask in action!
```
# Sets up Dask's distributed scheduler
from dask.distributed import Client
client = Client()
client
# Run this cell to download NYC flight dataset
%run prep.py -d flights
# Perform Pandas-like operations
import os
import dask.dataframe as dd
df = dd.read_csv(os.path.join("data", "nycflights", "*.csv"),
parse_dates={"Date": [0, 1, 2]},
dtype={"TailNum": str,
"CRSElapsedTime": float,
"Cancelled": bool})
df.groupby("Origin").DepDelay.mean().compute()
```
## Tutorial goals
The goal for this tutorial is to cover the basics of Dask. Attendees should walk away with an understanding of what
Dask offers, how it works, and ideas of how Dask can help them effectively scale their own data intensive workloads.
The tutorial consists of several Jupyter notebooks which contain explanatory material on how Dask works. Specifically, the notebooks presented cover the following topics:
- [Dask Delayed](1-delayed.ipynb)
- [Dask DataFrame](2-dataframe.ipynb)
- [Schedulers](3-schedulers.ipynb)
Each notebook also contains hands-on exercises to illustrate the concepts being presented. Let's look at our first example to get a sense for how they work.
### Exercise: Print `"Hello world!"`
Use Python to print the string "Hello world!" to the screen.
```
# Your solution here
# Run this cell to see a solution
%load solutions/overview.py
```
Note that several of the examples here have been adapted from the Dask tutorial at https://tutorial.dask.org.
## Next step
Let's start by covering our first Dask collection, the `dask.delayed` interface, in the [Dask delayed notebook](1-delayed.ipynb).
| github_jupyter |
# Plotting programs
This notebook illustrates a secondary feature of the plotting library - plotting program related quantities such as
- Program spending
- Coverage
The functionality is build on the standard plotting library - as described in the plotting documentation, the general workflow is to
1. Create a `PlotData` instance containing the values to be rendered on a plot
2. Pass the `PlotData` object to `plot_series` or `plot_bars` to render the figure
The strategy is to construct a `PlotData` object that contains program-related data. Then, `plot_series` and `plot_bars` can be used as normal, together with all of the other functionality for assigning colours, bar plot stacking, legend management, and figure saving.
First, we will perform a simulation using programs
```
# IMPORTS
%load_ext autoreload
%autoreload 2
%matplotlib inline
import sys
sys.path.append('..')
import atomica as au
import matplotlib.pyplot as plt
import numpy as np
import sciris as sc
from IPython.display import display, HTML
CSS = """
.output {
flex-flow: row wrap;
}
"""
HTML('<style>{}</style>'.format(CSS))
# Make demo project and default budget run
P = au.demo(which='tb')
instructions = au.ProgramInstructions(2018)
result1 = P.run_sim(P.parsets[0],P.progsets[0],progset_instructions=instructions,result_name='Default budget')
# Do a simple budget scenario so that we have different spending
alloc = {'BCG': 365000, 'PCF': 22568000, 'ACF': 22282133, 'ACF-p': 793333, 'HospDS': 108461100, 'HospMDR': 7205000, 'HospXDR': 1346000, 'AmbDS': 0, 'AmbMDR': 0, 'XDRnew': 951200, 'PrisDS': 1414500, 'PrisDR': 220000}
instructions = au.ProgramInstructions(alloc=alloc,start_year=2018)
result2 = P.run_sim(P.parsets[0],P.progsets[0],progset_instructions=instructions,result_name='Modified budget')
# Do a budget scenario with time-varying spending
alloc = {'ACF': au.TimeSeries([2018,2030],[2e8,1.5e8]),'BCG': au.TimeSeries([2018,2025],[2e7,3e7])}
instructions = au.ProgramInstructions(alloc=alloc,start_year=2018)
result3 = P.run_sim(P.parsets[0],P.progsets[0],progset_instructions=instructions,result_name='Time-varying budget')
```
### Creating `PlotData` from programs
To make a standard plot of model outputs, you pass a `Result` object to the `PlotData` constructor:
```
d = au.PlotData(result3,outputs='alive',pops='all',project=P)
au.plot_series(d,axis='pops');
```
This constructor is specific to plotting model outputs i.e. the values associated with the integration objects in a `Model` such as compartments, characteristics, parameters, and links. Therefore, the `outputs` argument should correspond to the code name of one of these quantities.
To plot programs, you instead construct a `PlotData` instance using the `au.PlotData.programs()` static method. For example:
```
d = au.PlotData.programs(result3,outputs='BCG')
au.plot_series(d);
```
For this method, the `outputs` argument should correspond to the code name of _programs_, and the `pop` argument is not supported because the program quantities for spending and coverage are not population specific.
## Plotting spending and coverage
`au.PlotData.programs()` takes an optional argument, `quantity`, that selects whether to extract values associated with
- `spending` which are budget amounts from the `alloc`
- `coverage_eligible` which is the number of people reached by the program. This is equal to the sum of the compartment sizes for all compartments and populations the program is marked as reaching in the progbook
- `coverage_fraction` - this is the fraction of the available people covered by the program, and is equal to `coverage_number/coverage_eligible` with a maximum value of `1.0`
- `coverage_number` which is the number of people actually covered by the program - this is returned as a number of people covered per year
<div class='alert alert-warning'>
Note that program coverages plotted here are always on an individual program basis, prior to any modality interactions
</div>
```
d = au.PlotData.programs(result3,outputs='BCG',quantity='spending')
au.plot_series(d);
d = au.PlotData.programs(result3,outputs='BCG',quantity='coverage_number')
au.plot_series(d);
d = au.PlotData.programs(result3,outputs='BCG',quantity='coverage_eligible')
au.plot_series(d);
d = au.PlotData.programs(result3,outputs='BCG',quantity='coverage_fraction')
au.plot_series(d);
```
<div class='alert alert-danger'>
Spending values are interpolated onto every time-step but are only used after the program start year. Currently this is not visually indicated on the plot.
</div>
As with plotting normal results, you can pass in a `list` of `Result` objects to compare budget quantities in two different simulations. Here, our different result objects correspond to different budget scenarios:
```
d = au.PlotData.programs([result1,result2,result3],outputs='BCG',quantity='spending')
au.plot_series(d,axis='results');
d = au.PlotData.programs([result1,result2,result3],outputs='BCG',quantity='coverage_number')
au.plot_series(d,axis='results');
d = au.PlotData.programs([result1,result2,result3],outputs='BCG',quantity='coverage_eligible')
au.plot_series(d,axis='results');
d = au.PlotData.programs([result1,result2,result3],outputs='BCG',quantity='coverage_fraction')
au.plot_series(d,axis='results');
```
## Bar plots and selecting times
A common task is making a bar plot for allocations in specific year(s). The simulation is fundamentally run with a spending value at each timestep, and the `PlotData` object has values for every simulation time, as shown above. To select a single year, simply interpolate the `PlotData` object onto the year that you want to plot. This parsimoniously handles time-varying budgets.
```
d = au.PlotData.programs(result3,quantity='spending')
d.interpolate(2018)
au.plot_bars(d,stack_outputs='all');
d = au.PlotData.programs(result3,outputs='BCG',quantity='spending')
d.interpolate(2018)
au.plot_bars(d);
```
As with normal `PlotData` objects, if you specify multiple outputs, they will be rendered as separate bar elements, and can optionally be stacked
```
d = au.PlotData.programs(result3,outputs=['HospDS','HospMDR','HospXDR'],quantity='spending')
d.interpolate(2018)
au.plot_bars(d);
au.plot_bars(d,stack_outputs='all');
```
Notice how as usual, changing the stacking for the bar plot does not require assembling a new `PlotData` object, it is simply rendering the same data in a different style. If you interpolate onto multiple years, these will be rendered as normal by `plot_bars`:
```
d = au.PlotData.programs(result3,outputs=['HospDS','HospMDR','HospXDR'],quantity='spending')
d.interpolate([2018,2025])
au.plot_bars(d);
au.plot_bars(d,stack_outputs='all');
```
Similarly, if you pass in multiple results, these will also be handled as normal by `plot_bars` even when combined with multiple years:
```
d = au.PlotData.programs([result2,result3],outputs=['HospDS','HospMDR','HospXDR'],quantity='spending')
d.interpolate([2018,2025])
au.plot_bars(d,stack_outputs='all');
```
## Time aggregation
To aggregate values over time, you can pass in an argument `t_bins` which behaves the same as for standard `PlotData` objects. However, unlike normal `PlotData` objects, the time aggregation type is fixed because only certain aggregations make sense in the context of programs:
- `spending` which is in units of '\$/year' will be integrated over time
- `coverage_eligible` (in units of 'people') will be averaged over time
- `coverage_fraction` (which is dimensionless) will be averaged over time
- `coverage_number` (in units of 'people/year') will be integrated over time
Note that `coverage_number` being integrated over time typically only makes sense for treatment-style programs rather than continuous ART-style programs.
You can specify a scalar to aggregate over fixed bin sizes, or bin edges to aggregate over a specific time period:
```
# Single time bin
d = au.PlotData.programs(result3,outputs=['BCG'],quantity='spending',t_bins=[2018,2025])
au.plot_bars(d,stack_outputs='all');
# Unequal bins
d = au.PlotData.programs(result3,outputs=['BCG'],quantity='spending',t_bins=[2018,2025,2028])
au.plot_bars(d,stack_outputs='all');
# 5-year bins, showing scale-up in spending
d = au.PlotData.programs(result3,outputs=['BCG'],quantity='spending',t_bins=5)
au.plot_bars(d,stack_outputs='all');
```
These behave in the usual way when there are multiple outputs, results, and time bins:
```
d = au.PlotData.programs([result2,result3],outputs=['HospDS','HospMDR','HospXDR'],quantity='spending',t_bins=np.arange(2020,2040,5))
au.plot_bars(d,stack_outputs='all',outer='times');
```
Here is a demonstration of the automatic selection of addition vs averaging for time aggregation:
```
# SPENDING
# Raw values
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='spending')
au.plot_series(d);
plt.xlim(2020,2026)
# Time aggregation over 2 years
# Spending values are summed (e.g. 22m+25m~=49m in the first 2 years)
# Notice how the axis label for the line plot is `$/year` but for the bar plot it is `$`
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='spending',t_bins=np.arange(2020,2026,2))
au.plot_bars(d);
# Number covered
# Raw values
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='coverage_number')
au.plot_series(d);
plt.xlim(2020,2026)
# Time aggregation over 2 years
# People covered per year are summed
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='coverage_number',t_bins=np.arange(2020,2026,2))
au.plot_bars(d);
# Coverage eligible
# Raw values
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='coverage_eligible')
au.plot_series(d);
plt.xlim(2020,2026)
# Time aggregation over 2 years
# Compartment sizes for compartments reached by a program are averaged
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='coverage_eligible',t_bins=np.arange(2020,2026,2))
au.plot_bars(d);
# Fraction covered
# Raw values
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='coverage_fraction')
au.plot_series(d);
plt.xlim(2020,2026)
# Time aggregation over 2 years
# Fraction covered per year are averaged
d = au.PlotData.programs([result3],outputs=['BCG'],quantity='coverage_fraction',t_bins=np.arange(2020,2026,2))
au.plot_bars(d);
```
## Output aggregation
When plotting spending values, it is possible to aggregate programs in the same way that outputs can be aggregated for standard `PlotData` objects. This can only be done for programs - coverages are more complex due to modality interactions, and a system for plotting such aggregations is not yet available.
As with aggregating outputs and pops normally, to aggregate programs, pass them in within a `dict` where the key is the name of the aggregated output, and the value is a list of the program names to include:
```
# Select a subset of programs
prog_list = ['HospDS','HospMDR','HospXDR']
d = au.PlotData.programs([result1],outputs=prog_list)
d.interpolate(2018)
au.plot_bars(d,stack_outputs='all')
plt.title('Unaggregated');
# Aggregate programs
outputs = {'Hosp':['HospDS','HospMDR','HospXDR']}
d = au.PlotData.programs([result1],outputs=outputs)
d.interpolate(2018)
au.plot_bars(d,stack_outputs='all')
plt.title('Aggregated');
```
| github_jupyter |
# Random Forests
```
!pip install scikit-learn==0.23.2
import pandas as pd
import numpy as np
from sklearn.datasets import load_digits
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, plot_confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import plot_tree
data = load_digits(as_frame=True)
X, y, images = data.data, data.target, data.images
X.head()
X.describe()
y.value_counts().sort_index()
data.target_names
X_train, X_test, y_train, y_test, images_train, images_test = train_test_split(X, y, images, train_size=0.6, random_state=0)
fig, axes = plt.subplots(1, 4, figsize=(10, 4))
fig.suptitle("Dados de treino")
for ax, image, label in zip(axes, images_train, y_train):
ax.set_axis_off()
ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
ax.set_title('Label: %i' % label)
model = RandomForestClassifier(criterion="entropy", n_estimators=200, max_depth=3, random_state=0)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test, y_pred)
fig, ax = plt.subplots(figsize=(11, 10))
disp = plot_confusion_matrix(model, X_test, y_test, ax=ax)
disp.figure_.suptitle("Matriz de Confusão");
fig, axes = plt.subplots(1, 4, figsize=(10, 4))
fig.suptitle("Predições corretas")
for ax, image, pred, label in zip(axes, images_test[y_pred == y_test], y_pred[y_pred == y_test], y_test[y_pred == y_test]):
ax.set_axis_off()
ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
ax.set_title(f'Pred {pred}/Label {label}')
fig, axes = plt.subplots(1, 4, figsize=(10, 4))
fig.suptitle("Predições erradas")
for ax, image, pred, label in zip(axes, images_test[y_pred != y_test], y_pred[y_pred != y_test], y_test[y_pred != y_test]):
ax.set_axis_off()
ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
ax.set_title(f'Pred {pred}/Label {label}')
# This may not the best way to view each estimator as it is small
num_trees = 4
fn = data.feature_names
cn = [str(t) for t in data.target_names]
fig, axes = plt.subplots(num_trees, 1, figsize=(16,25))
for index in range(0, num_trees):
plot_tree(model.estimators_[index],
feature_names=fn,
class_names=cn,
filled=True,
ax=axes[index],
fontsize=9)
axes[index].set_title('Estimator: ' + str(index), fontsize=15)
fig, ax = plt.subplots(figsize=(9, 15))
ax.barh(data.feature_names, model.feature_importances_)
```
| github_jupyter |
# Deep Learning & Art: Neural Style Transfer
In this assignment, you will learn about Neural Style Transfer. This algorithm was created by [Gatys et al. (2015)](https://arxiv.org/abs/1508.06576). Download the data in [coursera-deep-learning-specialization](https://github.com/amanchadha/coursera-deep-learning-specialization).
**In this assignment, you will:**
- Implement the neural style transfer algorithm
- Generate novel artistic images using your algorithm
Most of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values!
```
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from nst_utils import *
import numpy as np
import tensorflow as tf
import pprint
%matplotlib inline
```
## 1 - Problem Statement
Neural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely: a **"content" image (C) and a "style" image (S), to create a "generated" image (G**).
The generated image G combines the "content" of the image C with the "style" of image S.
In this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by Claude Monet, a leader of the impressionist movement (style image S).
<img src="images/louvre_generated.png" style="width:750px;height:200px;">
Let's see how you can do this.
## 2 - Transfer Learning
Neural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning.
Following the [original NST paper](https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the shallower layers) and high level features (at the deeper layers).
Run the following code to load parameters from the VGG model. This may take a few seconds.
```
pp = pprint.PrettyPrinter(indent=4)
model = load_vgg_model("pretrained_model/imagenet-vgg-verydeep-19.mat")
pp.pprint(model)
```
* The model is stored in a python dictionary.
* The python dictionary contains key-value pairs for each layer.
* The 'key' is the variable name and the 'value' is a tensor for that layer.
#### Assign input image to the model's input layer
To run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this:
```python
model["input"].assign(image)
```
This assigns the image as an input to the model.
#### Activate a layer
After this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows:
```python
sess.run(model["conv4_2"])
```
## 3 - Neural Style Transfer (NST)
We will build the Neural Style Transfer (NST) algorithm in three steps:
- Build the content cost function $J_{content}(C,G)$
- Build the style cost function $J_{style}(S,G)$
- Put it together to get $J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$.
### 3.1 - Computing the content cost
In our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre.
```
content_image = scipy.misc.imread("images/louvre.jpg")
imshow(content_image);
```
The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.
** 3.1.1 - Make generated image G match the content of image C**
#### Shallower versus deeper layers
* The shallower layers of a ConvNet tend to detect lower-level features such as edges and simple textures.
* The deeper layers tend to detect higher-level features such as more complex textures as well as object classes.
#### Choose a "middle" activation layer $a^{[l]}$
We would like the "generated" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image.
* In practice, you'll get the most visually pleasing results if you choose a layer in the **middle** of the network--neither too shallow nor too deep.
* (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.)
#### Forward propagate image "C"
* Set the image C as the input to the pretrained VGG network, and run forward propagation.
* Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be an $n_H \times n_W \times n_C$ tensor.
#### Forward propagate image "G"
* Repeat this process with the image G: Set G as the input, and run forward progation.
* Let $a^{(G)}$ be the corresponding hidden layer activation.
#### Content Cost Function $J_{content}(C,G)$
We will define the content cost function as:
$$J_{content}(C,G) = \frac{1}{4 \times n_H \times n_W \times n_C}\sum _{ \text{all entries}} (a^{(C)} - a^{(G)})^2\tag{1} $$
* Here, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost.
* For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the 3D volumes corresponding to a hidden layer's activations.
* In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below.
* Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style cost $J_{style}$.
<img src="images/NST_LOSS.png" style="width:800px;height:400px;">
**Exercise:** Compute the "content cost" using TensorFlow.
**Instructions**: The 3 steps to implement this function are:
1. Retrieve dimensions from `a_G`:
- To retrieve dimensions from a tensor `X`, use: `X.get_shape().as_list()`
2. Unroll `a_C` and `a_G` as explained in the picture above
- You'll likey want to use these functions: [tf.transpose](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/transpose) and [tf.reshape](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/reshape).
3. Compute the content cost:
- You'll likely want to use these functions: [tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [tf.square](https://www.tensorflow.org/api_docs/python/tf/square) and [tf.subtract](https://www.tensorflow.org/api_docs/python/tf/subtract).
#### Additional Hints for "Unrolling"
* To unroll the tensor, we want the shape to change from $(m,n_H,n_W,n_C)$ to $(m, n_H \times n_W, n_C)$.
* `tf.reshape(tensor, shape)` takes a list of integers that represent the desired output shape.
* For the `shape` parameter, a `-1` tells the function to choose the correct dimension size so that the output tensor still contains all the values of the original tensor.
* So tf.reshape(a_C, shape=[m, n_H * n_W, n_C]) gives the same result as tf.reshape(a_C, shape=[m, -1, n_C]).
* If you prefer to re-order the dimensions, you can use `tf.transpose(tensor, perm)`, where `perm` is a list of integers containing the original index of the dimensions.
* For example, `tf.transpose(a_C, perm=[0,3,1,2])` changes the dimensions from $(m, n_H, n_W, n_C)$ to $(m, n_C, n_H, n_W)$.
* There is more than one way to unroll the tensors.
* Notice that it's not necessary to use tf.transpose to 'unroll' the tensors in this case but this is a useful function to practice and understand for other situations that you'll encounter.
```
# GRADED FUNCTION: compute_content_cost
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.shape
new_shape = [int(m), int(n_H * n_W), int(n_C)]
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.reshape(a_C, new_shape)
a_G_unrolled = tf.reshape(a_G, new_shape)
# compute the cost with tensorflow (≈1 line)
J_content = (.25 / float(int(n_H * n_W * n_C))) * tf.reduce_sum(np.power(a_G_unrolled - a_C_unrolled, 2))
### END CODE HERE ###
return J_content
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
```
#### What you should remember
- The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are.
- When we minimize the content cost later, this will help make sure $G$ has similar content as $C$.
### 3.2 - Computing the style cost
For our running example, we will use the following style image:
```
style_image = scipy.misc.imread("images/monet_800600.jpg")
imshow(style_image);
```
This was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*.
Lets see how you can now define a "style" cost function $J_{style}(S,G)$.
### 3.2.1 - Style matrix
#### Gram matrix
* The style matrix is also called a "Gram matrix."
* In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$.
* In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large.
#### Two meanings of the variable $G$
* Note that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature.
* $G$ is used to denote the Style matrix (or Gram matrix)
* $G$ also denotes the generated image.
* For this assignment, we will use $G_{gram}$ to refer to the Gram matrix, and $G$ to denote the generated image.
#### Compute $G_{gram}$
In Neural Style Transfer (NST), you can compute the Style matrix by multiplying the "unrolled" filter matrix with its transpose:
<img src="images/NST_GM.png" style="width:900px;height:300px;">
$$\mathbf{G}_{gram} = \mathbf{A}_{unrolled} \mathbf{A}_{unrolled}^T$$
#### $G_{(gram)i,j}$: correlation
The result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters (channels). The value $G_{(gram)i,j}$ measures how similar the activations of filter $i$ are to the activations of filter $j$.
#### $G_{(gram),i,i}$: prevalence of patterns or textures
* The diagonal elements $G_{(gram)ii}$ measure how "active" a filter $i$ is.
* For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{(gram)ii}$ measures how common vertical textures are in the image as a whole.
* If $G_{(gram)ii}$ is large, this means that the image has a lot of vertical texture.
By capturing the prevalence of different types of features ($G_{(gram)ii}$), as well as how much different features occur together ($G_{(gram)ij}$), the Style matrix $G_{gram}$ measures the style of an image.
**Exercise**:
* Using TensorFlow, implement a function that computes the Gram matrix of a matrix A.
* The formula is: The gram matrix of A is $G_A = AA^T$.
* You may use these functions: [matmul](https://www.tensorflow.org/api_docs/python/tf/matmul) and [transpose](https://www.tensorflow.org/api_docs/python/tf/transpose).
```
# GRADED FUNCTION: gram_matrix
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
### START CODE HERE ### (≈1 line)
GA = tf.matmul(A, tf.transpose(A))
### END CODE HERE ###
return GA
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2*1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = \n" + str(GA.eval()))
```
### 3.2.2 - Style cost
Your goal will be to minimize the distance between the Gram matrix of the "style" image S and the gram matrix of the "generated" image G.
* For now, we are using only a single hidden layer $a^{[l]}$.
* The corresponding style cost for this layer is defined as:
$$J_{style}^{[l]}(S,G) = \frac{1}{4 \times {n_C}^2 \times (n_H \times n_W)^2} \sum _{i=1}^{n_C}\sum_{j=1}^{n_C}(G^{(S)}_{(gram)i,j} - G^{(G)}_{(gram)i,j})^2\tag{2} $$
* $G_{gram}^{(S)}$ Gram matrix of the "style" image.
* $G_{gram}^{(G)}$ Gram matrix of the "generated" image.
* Remember, this cost is computed using the hidden layer activations for a particular hidden layer in the network $a^{[l]}$
**Exercise**: Compute the style cost for a single layer.
**Instructions**: The 3 steps to implement this function are:
1. Retrieve dimensions from the hidden layer activations a_G:
- To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`
2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above (see the images in the sections "computing the content cost" and "style matrix").
- You may use [tf.transpose](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/transpose) and [tf.reshape](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/reshape).
3. Compute the Style matrix of the images S and G. (Use the function you had previously written.)
4. Compute the Style cost:
- You may find [tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [tf.square](https://www.tensorflow.org/api_docs/python/tf/square) and [tf.subtract](https://www.tensorflow.org/api_docs/python/tf/subtract) useful.
#### Additional Hints
* Since the activation dimensions are $(m, n_H, n_W, n_C)$ whereas the desired unrolled matrix shape is $(n_C, n_H*n_W)$, the order of the filter dimension $n_C$ is changed. So `tf.transpose` can be used to change the order of the filter dimension.
* for the product $\mathbf{G}_{gram} = \mathbf{A}_{} \mathbf{A}_{}^T$, you will also need to specify the `perm` parameter for the `tf.transpose` function.
```
# GRADED FUNCTION: compute_layer_style_cost
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.reshape(a_S, [n_H * n_W, n_C])
a_S = tf.transpose(a_S)
a_G = tf.reshape(a_G, [n_H * n_W, n_C])
a_G = tf.transpose(a_G)
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
factor = (.5 / (n_H * n_W * n_C)) ** 2
J_style_layer = factor * tf.reduce_sum(np.power(GS - GG, 2))
### END CODE HERE ###
return J_style_layer
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
```
### 3.2.3 Style Weights
* So far you have captured the style from only one layer.
* We'll get better results if we "merge" style costs from several different layers.
* Each layer will be given weights ($\lambda^{[l]}$) that reflect how much each layer will contribute to the style.
* After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$.
* By default, we'll give each layer equal weight, and the weights add up to 1. ($\sum_{l}^L\lambda^{[l]} = 1$)
```
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
```
You can combine the style costs for different layers as follows:
$$J_{style}(S,G) = \sum_{l} \lambda^{[l]} J^{[l]}_{style}(S,G)$$
where the values for $\lambda^{[l]}$ are given in `STYLE_LAYERS`.
### Exercise: compute style cost
* We've implemented a compute_style_cost(...) function.
* It calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`.
* Please read over it to make sure you understand what it's doing.
#### Description of `compute_style_cost`
For each layer:
* Select the activation (the output tensor) of the current layer.
* Get the style of the style image "S" from the current layer.
* Get the style of the generated image "G" from the current layer.
* Compute the "style cost" for the current layer
* Add the weighted style cost to the overall style cost (J_style)
Once you're done with the loop:
* Return the overall style cost.
```
def compute_style_cost(model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
```
**Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below.
<!--
How do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers
!-->
## What you should remember
- The style of an image can be represented using the Gram matrix of a hidden layer's activations.
- We get even better results by combining this representation from multiple different layers.
- This is in contrast to the content representation, where usually using just a single hidden layer is sufficient.
- Minimizing the style cost will cause the image $G$ to follow the style of the image $S$.
### 3.3 - Defining the total cost to optimize
Finally, let's create a cost function that minimizes both the style and the content cost. The formula is:
$$J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$$
**Exercise**: Implement the total cost function which includes both the content cost and the style cost.
```
# GRADED FUNCTION: total_cost
def total_cost(J_content, J_style, alpha = 10, beta = 40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
### START CODE HERE ### (≈1 line)
J = alpha * J_content + beta * J_style
### END CODE HERE ###
return J
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
```
## What you should remember
- The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$.
- $\alpha$ and $\beta$ are hyperparameters that control the relative weighting between content and style.
## 4 - Solving the optimization problem
Finally, let's put everything together to implement Neural Style Transfer!
Here's what the program will have to do:
1. Create an Interactive Session
2. Load the content image
3. Load the style image
4. Randomly initialize the image to be generated
5. Load the VGG19 model
7. Build the TensorFlow graph:
- Run the content image through the VGG19 model and compute the content cost
- Run the style image through the VGG19 model and compute the style cost
- Compute the total cost
- Define the optimizer and the learning rate
8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step.
Lets go through the individual steps in detail.
#### Interactive Sessions
You've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$.
* To do so, your program has to reset the graph and use an "[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)".
* Unlike a regular session, the "Interactive Session" installs itself as the default session to build a graph.
* This allows you to run variables without constantly needing to refer to the session object (calling "sess.run()"), which simplifies the code.
#### Start the interactive session.
```
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
```
#### Content image
Let's load, reshape, and normalize our "content" image (the Louvre museum picture):
```
content_image = scipy.misc.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
```
#### Style image
Let's load, reshape and normalize our "style" image (Claude Monet's painting):
```
style_image = scipy.misc.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
```
#### Generated image correlated with content image
Now, we initialize the "generated" image as a noisy image created from the content_image.
* The generated image is slightly correlated with the content image.
* By initializing the pixels of the generated image to be mostly noise but slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image.
* Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.
```
generated_image = generate_noise_image(content_image)
imshow(generated_image[0]);
```
#### Load pre-trained VGG19 model
Next, as explained in part (2), let's load the VGG19 model.
```
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
```
#### Content Cost
To get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following:
1. Assign the content image to be the input to the VGG model.
2. Set a_C to be the tensor giving the hidden layer activation for layer "conv4_2".
3. Set a_G to be the tensor giving the hidden layer activation for the same layer.
4. Compute the content cost using a_C and a_G.
**Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below.
```
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
```
#### Style cost
```
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
```
### Exercise: total cost
* Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`.
* Use `alpha = 10` and `beta = 40`.
```
### START CODE HERE ### (1 line)
J = total_cost(J_content, J_style, alpha = 10, beta = 40)
### END CODE HERE ###
```
### Optimizer
* Use the Adam optimizer to minimize the total cost `J`.
* Use a learning rate of 2.0.
* [Adam Optimizer documentation](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
```
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
```
### Exercise: implement the model
* Implement the model_nn() function.
* The function **initializes** the variables of the tensorflow graph,
* **assigns** the input image (initial generated image) as the input of the VGG19 model
* and **runs** the `train_step` tensor (it was created in the code above this function) for a large number of steps.
#### Hints
* To initialize global variables, use this:
```Python
sess.run(tf.global_variables_initializer())
```
* Run `sess.run()` to evaluate a variable.
* [assign](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/assign) can be used like this:
```python
model["input"].assign(image)
```
```
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model['input'].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
somthing = sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model['input'])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
```
Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.
```
model_nn(sess, generated_image)
```
You're done! After running this, in the upper bar of the notebook click on "File" and then "Open". Go to the "/output" directory to see all the saved images. Open "generated_image" to see the generated image! :)
You should see something the image presented below on the right:
<img src="images/louvre_generated.png" style="width:800px;height:300px;">
We didn't want you to wait too long to see an initial result, and so had set the hyperparameters accordingly. To get the best looking results, running the optimization algorithm longer (and perhaps with a smaller learning rate) might work better. After completing and submitting this assignment, we encourage you to come back and play more with this notebook, and see if you can generate even better looking images.
Here are few other examples:
- The beautiful ruins of the ancient city of Persepolis (Iran) with the style of Van Gogh (The Starry Night)
<img src="images/perspolis_vangogh.png" style="width:750px;height:300px;">
- The tomb of Cyrus the great in Pasargadae with the style of a Ceramic Kashi from Ispahan.
<img src="images/pasargad_kashi.png" style="width:750px;height:300px;">
- A scientific study of a turbulent fluid with the style of a abstract blue fluid painting.
<img src="images/circle_abstract.png" style="width:750px;height:300px;">
## 5 - Test with your own image (Optional/Ungraded)
Finally, you can also rerun the algorithm on your own images!
To do so, go back to part 4 and change the content image and style image with your own pictures. In detail, here's what you should do:
1. Click on "File -> Open" in the upper tab of the notebook
2. Go to "/images" and upload your images (requirement: (WIDTH = 300, HEIGHT = 225)), rename them "my_content.png" and "my_style.png" for example.
3. Change the code in part (3.4) from :
```python
content_image = scipy.misc.imread("images/louvre.jpg")
style_image = scipy.misc.imread("images/claude-monet.jpg")
```
to:
```python
content_image = scipy.misc.imread("images/my_content.jpg")
style_image = scipy.misc.imread("images/my_style.jpg")
```
4. Rerun the cells (you may need to restart the Kernel in the upper tab of the notebook).
You can share your generated images with us on social media with the hashtag #deeplearniNgAI or by direct tagging!
You can also tune your hyperparameters:
- Which layers are responsible for representing the style? STYLE_LAYERS
- How many iterations do you want to run the algorithm? num_iterations
- What is the relative weighting between content and style? alpha/beta
## 6 - Conclusion
Great job on completing this assignment! You are now able to use Neural Style Transfer to generate artistic images. This is also your first time building a model in which the optimization algorithm updates the pixel values rather than the neural network's parameters. Deep learning has many different types of models and this is only one of them!
## What you should remember
- Neural Style Transfer is an algorithm that given a content image C and a style image S can generate an artistic image
- It uses representations (hidden layer activations) based on a pretrained ConvNet.
- The content cost function is computed using one hidden layer's activations.
- The style cost function for one layer is computed using the Gram matrix of that layer's activations. The overall style cost function is obtained using several hidden layers.
- Optimizing the total cost function results in synthesizing new images.
# Congratulations on finishing the course!
This was the final programming exercise of this course. Congratulations--you've finished all the programming exercises of this course on Convolutional Networks! We hope to also see you in Course 5, on Sequence models!
### References:
The Neural Style Transfer algorithm was due to Gatys et al. (2015). Harish Narayanan and Github user "log0" also have highly readable write-ups from which we drew inspiration. The pre-trained network used in this implementation is a VGG network, which is due to Simonyan and Zisserman (2015). Pre-trained weights were from the work of the MathConvNet team.
- Leon A. Gatys, Alexander S. Ecker, Matthias Bethge, (2015). [A Neural Algorithm of Artistic Style](https://arxiv.org/abs/1508.06576)
- Harish Narayanan, [Convolutional neural networks for artistic style transfer.](https://harishnarayanan.org/writing/artistic-style-transfer/)
- Log0, [TensorFlow Implementation of "A Neural Algorithm of Artistic Style".](http://www.chioka.in/tensorflow-implementation-neural-algorithm-of-artistic-style)
- Karen Simonyan and Andrew Zisserman (2015). [Very deep convolutional networks for large-scale image recognition](https://arxiv.org/pdf/1409.1556.pdf)
- [MatConvNet.](http://www.vlfeat.org/matconvnet/pretrained/)
| github_jupyter |
# DataFrames
DataFrames are the workhorse of pandas and are directly inspired by the R programming language. We can think of a DataFrame as a bunch of Series objects put together to share the same index. Let's use pandas to explore this topic!
```
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5,4),index='A B C D E'.split(),columns='W X Y Z'.split())
df
True in df.X.isnull()
df.X.isnull().any()
```
## Selection and Indexing
Let's learn the various methods to grab data from a DataFrame
```
df['W']
# Pass a list of column names
df[['W','Z']]
# SQL Syntax (NOT RECOMMENDED!)
df.W
```
``DataFrame Columns are just Series``
```
type(df['W'])
```
**Creating a new column:**
```
df['new'] = df['W'] + df['Y']
df
```
**Removing Columns**
```
df.drop('new',axis=1)
# Not inplace unless specified!
df
df.drop('new',axis=1,inplace=True)
df
```
Can also drop rows this way:
```
df.drop('E',axis=0)
```
**Selecting Rows**
```
df.loc['A']
```
Or select based off of position instead of label
```
df.iloc[2]
```
**Selecting subset of rows and columns**
```
df.loc['B','Y']
df.loc[['A','B'],['W','Y']]
```
### Conditional Selection
An important feature of pandas is conditional selection using bracket notation, very similar to numpy:
```
df
df>0
df[df>0]
df[df['W']>0]
df[df['W']>0]['Y']
df[df['W']>0][['Y','X']]
```
For two conditions you can use | and & with parenthesis:
```
df[(df['W']>0) & (df['Y'] > 1)]
```
## More Index Details
Let's discuss some more features of indexing, including resetting the index or setting it something else. We'll also talk about index hierarchy!
```
df
# Reset to default 0,1...n index
df.reset_index()
newind = 'CA NY WY OR CO'.split()
df['States'] = newind
df
df.set_index('States')
df
df.set_index('States',inplace=True)
df
```
## Multi-Index and Index Hierarchy
Let us go over how to work with Multi-Index, first we'll create a quick example of what a Multi-Indexed DataFrame would look like:
```
# Index Levels
outside = ['G1','G1','G1','G2','G2','G2']
inside = [1,2,3,1,2,3]
hier_index = list(zip(outside,inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index
df = pd.DataFrame(np.random.randn(6,2),index=hier_index,columns=['A','B'])
df
```
Now let's show how to index this! For index hierarchy we use df.loc[], if this was on the columns axis, you would just use normal bracket notation df[]. Calling one level of the index returns the sub-dataframe:
```
df.loc['G1']
df.loc['G1'].loc[1]
df.index.names
df.index.names = ['Group','Num']
df
df.xs('G1')
df.xs(['G1',1])
df.xs(1,level='Num')
```
| github_jupyter |
<h1><center>Предобработка текста</center></h1>
## Основные техники
* Уровень символов:
* Токенизация: разбиение текста на слова
* Разбиение текста на предложения
* Уровень слов – морфология:
* Разметка частей речи
* Снятие морфологической неоднозначности
* Нормализация (стемминг или лемматизация)
* Уровень предложений – синтаксис:
* Выделенние именных или глагольных групп
* Выделенние семантических ролей
* Деревья составляющих и зависимостей
* Уровень смысла – семантика и дискурс:
* Разрешение кореферентных связей
* Выделение синонимов
* Анализ аргументативных связей
## Основные проблемы
* Неоднозначность
* Лексическая неоднозначность: *орган, парить, рожки, атлас*
* Морфологическая неоднозначность: *Хранение денег в банке. Что делают белки в клетке?*
* Синтаксическая неоднозначность: *Мужу изменять нельзя. Его удивил простой солдат. Эти типы стали есть в цехе.*
* Неологизмы: *печеньки, заинстаграммить, репостнуть, расшарить, биткоины*
* Разные варианты написания: *Россия, Российская Федерация, РФ*
* Нестандартное написание (в т.ч. орфографические ошибки и опечатки): *каг дила? куптиь телфон*
<img src=pipeline.png alt="pipeline.png" style="width: 400px;"/>
### NLP-библиотеки
NLP-библиотеки для питона:
* Natural Language Toolkit (NLTK)
* Apache OpenNLP
* Stanford NLP suite
* Gate NLP library
* Spacy
* Yargy
* DeepPavlov
* CLTK (для древних языков)
* и т.д.
Самая старая и известная — NLTK. В NLTK есть не только различные инструменты для обработки текста, но и данные — текстовые корпуса, предобученные модели для анализа тональности и морфологической разметки, списки стоп-слов для разных языков и т.п.
* [Учебник по NLTK](https://www.nltk.org/book/) от авторов библиотеки и [тьюториалы](https://github.com/hb20007/hands-on-nltk-tutorial) по решению разных задач NLP с помощью NLTK.
* [Документация Spacy](https://spacy.io/)
* [Документация Yargy](https://yargy.readthedocs.io/)
* [Документация DeepPavlop](http://docs.deeppavlov.ai/)
## Предобработка текста
1. **Токенизация** — самый первый шаг при обработке текста.
2. **Нормализация** — приведение к одному регистру, удаляются пунктуации, исправление опечаток и т.д.
3.
* **Стемминг** — выделение псевдоосновы слова.
* **Лемматизация** — приведение слов к словарной ("начальной") форме.
4. **Удаление стоп-слов** — слов, которые не несут никакой смысловой нагрузки (предлоги, союзы и т.п.) Список зависит от задачи!
**Важно!** Не всегда нужны все этапы, все зависит от задачи!
## Токенизация
#### Сколько слов в этом предложении?
*На дворе трава, на траве дрова, не руби дрова на траве двора.*
* 12 токенов: На, дворе, трава, на, траве, дрова, не, руби, дрова, на, траве, двора
* 8 - 9 словоформ: Н/на, дворе, трава, траве, дрова, не, руби, двора.
* 6 лексем: на, не, двор, трава, дрова, рубить
### Токен и словоформа
**Словоформа** – уникальное слово из текста
**Токен** – словоформа и её позиция в тексте
Объем корпуса измеряется в токенах, объем словаря — в словоформах или лексемах.
### Обозначения
$N$ = число токенов
$V$ = словарь (все словоформы)
$|V|$ = количество словоформ в словаре
### Токен ≠ слово
__Рассмотрим пример:__
Продаётся LADA 4x4. ПТС 01.12.2018, куплена 20 января 19 года, 10 000 км пробега. Комплектация полная. Новая в салоне 750 000, отдам за 650 000. Возможен обмен на ВАЗ-2110 или ВАЗ 2109 с вашей доплатой.
* Модификация: 1.6 MT (89 л.с.)
* Владельцев по ПТС: 4+
* VIN или номер кузова: XTA21104*50****47
* Мультимедиа и навигация: CD/DVD/Blu-ray
* Шины и диски: 14"
Краснодар, ул. Миклухо-Маклая, д. 4/5, подъезд 1
Тел. 8(999)1234567, 8 903 987-65-43, +7 (351) 111 22 33
e-mail: ivanov.ivan-61@mail.ru
И.И. Иванов (Иван Иванович)
```
# самая банальная токенизация: разбиение по пробелам
text = '''
Продаётся LADA 4x4. ПТС 01.12.2018, куплена 20 января 19 года, 10 000 км пробега.
Комплектация полная. Новая в салоне 750 000, отдам за 650 000.
Возможен обмен на ВАЗ-2110 или ВАЗ 2109 с вашей доплатой.
Краснодар, ул. Миклухо-Маклая, д. 4/5, подьезд 1
Тел. 8(999)1234567, 8 903 987-65-43, +7 (351) 111 22 33
И.И. Иванов (Иван Иванович)
'''
tokens = text.split()
print(tokens)
len(tokens)
!pip install yargy natasha
!pip install yargy
# максимально разбивает
from yargy.tokenizer import MorphTokenizer
tknzr = MorphTokenizer()
tokens = [_.value for _ in tknzr(text)]
print(tokens)
len(tokens)
# !pip install --user nltk
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('snowball_data')
nltk.download('perluniprops')
nltk.download('universal_tagset')
nltk.download('stopwords')
nltk.download('nonbreaking_prefixes')
nltk.download('wordnet')
from nltk.tokenize import word_tokenize, ToktokTokenizer
tokens = word_tokenize(text)
print(tokens)
len(tokens)
tknzr = ToktokTokenizer()
tokens = tknzr.tokenize(text)
print(tokens)
len(tokens)
# специальный токенизатор для твитов
from nltk.tokenize import TweetTokenizer
tknzr = TweetTokenizer()
tweet = "@remy This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
tknzr.tokenize(tweet)
# токенизатор на регулярных выражениях
from nltk.tokenize import RegexpTokenizer
s = "Good muffins cost $3.88 in New York. Please buy me two of them. \n\nThanks."
tknzr = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
tknzr.tokenize(s)
```
В nltk вообще есть довольно много токенизаторов:
```
from nltk import tokenize
dir(tokenize)[:16]
```
Они умеют выдавать индексы начала и конца каждого токена:
```
wh_tok = tokenize.WhitespaceTokenizer()
list(wh_tok.span_tokenize("don't stop me"))
```
Некторые токенизаторы ведут себя специфично:
```
tokenize.TreebankWordTokenizer().tokenize("don't stop me")
```
## Сегментация предложений
Сегментацию предложений иногда называют **сплиттингом**.
Основные признаки — знаки препинания. "?", "!" как правило однозначны, проблемы возникают с "." Возможное решение: бинарный классификатор для сегментации предложений. Для каждой точки "." определить, является ли она концом предложения или нет.
```
from nltk.tokenize import sent_tokenize
sents = sent_tokenize(text)
print(len(sents))
sents
!pip install rusenttokenize
from rusenttokenize import ru_sent_tokenize
sents = ru_sent_tokenize(text)
print(len(sents))
sents
```
## Нормализация
### Удаление пунктуации
```
# Способ №1
import re
# набор пунктуационных символов зависит от задачи и текста
punct = '!"#$%&()*\+,-\./:;<=>?@\[\]^_`{|}~„“«»†*\—/\-‘’'
clean_text = re.sub(punct, r'', text)
print(clean_text.split())
# Способ №2
clean_words = [w.strip(punct) for w in word_tokenize(text)]
print(clean_words)
clean_words == clean_text
```
### Преобразование регистра
```
clean_words = [w.lower() for w in clean_words if w != '']
print(clean_words)
```
### Стоп-слова
**Стоп-слова** — высокочастотные слова, которые не дают нам никакой информации о конкретном тексте. Они составляют верхушку частотного списка в любом языке. Набор стоп-слов не универсален, он будет зависеть от вашей задачи!
В NLTK есть готовые списки стоп-слов для многих языков.
```
from nltk.corpus import stopwords
# смотрим, какие языки есть
stopwords.fileids()
sw = stopwords.words('russian')
print(sw)
print([w if w not in sw else print(w) for w in clean_words])
```
## Стемминг
**Стемминг** — отсечение от слова окончаний и суффиксов, чтобы оставшаяся часть, называемая stem, была одинаковой для всех грамматических форм слова. Стем необязательно совпадает с морфлогической основой слова. Одинаковый стем может получиться и не у однокоренных слов и наоборот — в этом проблема стемминга.
* 1-ый вид ошибки: белый, белка, белье $\implies$ бел
* 2-ой вид ошибки: трудность, трудный $\implies$ трудност, труд
* 3-ий вид ошибки: быстрый, быстрее $\implies$ быст, побыстрее $\implies$ побыст
Самый простой алгоритм, алгоритм Портера, состоит из 5 циклов команд, на каждом цикле – операция удаления / замены суффикса. Возможны вероятностные расширения алгоритма.
### Snowball stemmer
Улучшенный вариант стеммера Портера; в отличие от него умеет работать не только с английским текстом.
```
from nltk.stem.snowball import SnowballStemmer
SnowballStemmer.languages
poem = '''
По морям, играя, носится
с миноносцем миноносица.
Льнет, как будто к меду осочка,
к миноносцу миноносочка.
И конца б не довелось ему,
благодушью миноносьему.
Вдруг прожектор, вздев на нос очки,
впился в спину миноносочки.
Как взревет медноголосина:
Р-р-р-астакая миноносина!
'''
words = [w.strip(punct).lower() for w in word_tokenize(poem)]
words = [w for w in words if w not in sw and w != '']
snowball = SnowballStemmer("russian")
for w in words:
print("%s: %s" % (w, snowball.stem(w)))
```
## Морфологический анализ
Задачи морфологического анализа:
* Разбор слова — определение нормальной формы (леммы), основы (стема) и грамматических характеристик слова
* Синтез словоформы — генерация словоформы по заданным грамматическим характеристикам из леммы
Морфологический анализ — не самая сильная сторона NLTK. Для этих задач лучше использовать `pymorphy2` и `pymystem3` для русского языка и, например, `Spacy` для европейских.
## Лемматизация
**Лемматизация** — процесс приведения словоформы к лемме, т.е. нормальной (словарной) форме. Это более сложная задача, чем стемминг, но и результаты дает гораздо более осмысленные, особенно для языков с богатой морфологией.
* кошке, кошку, кошкам, кошкой $\implies$ кошка
* бежал, бежит, бегу $\implies$ бежать
* белому, белым, белыми $\implies$ белый
## POS-tagging
**Частеречная разметка**, или **POS-tagging** _(part of speech tagging)_ — определение части речи и грамматических характеристик слов в тексте (корпусе) с приписыванием им соответствующих тегов.
Для большинства слов возможно несколько разборов (т.е. несколько разных лемм, несколько разных частей речи и т.п.). Теггер генерирует все варианты, ранжирует их по вероятности и по умолчанию выдает наиболее вероятный. Выбор одного разбора из нескольких называется **снятием омонимии**, или **дизамбигуацией**.
### Наборы тегов
Существует множество наборов грамматических тегов, или тегсетов:
* НКРЯ
* Mystem
* UPenn
* OpenCorpora (его использует pymorphy2)
* Universal Dependencies
* ...
Есть даже [библиотека](https://github.com/kmike/russian-tagsets) для преобразования тегов из одной системы в другую для русского языка, `russian-tagsets`. Но важно помнить, что любое такое преобразование будет с потерями!
На данный момент стандартом является **Universal Dependencies**. Подробнее про проект можно почитать [вот тут](http://universaldependencies.org/), а про теги — [вот тут](http://universaldependencies.org/u/pos/). Вот список основных (частереных) тегов UD:
* ADJ: adjective
* ADP: adposition
* ADV: adverb
* AUX: auxiliary
* CCONJ: coordinating conjunction
* DET: determiner
* INTJ: interjection
* NOUN: noun
* NUM: numeral
* PART: particle
* PRON: pronoun
* PROPN: proper noun
* PUNCT: punctuation
* SCONJ: subordinating conjunction
* SYM: symbol
* VERB: verb
* X: other
### pymystem3
**pymystem3** — это питоновская обертка для морфологичекого анализатора Mystem, сделанного в Яндексе. Его можно скачать отдельно и использовать из консоли. Отдельный плюс Mystem - он умеет разрешать омонимию (выбирает более релевантный вариант разбора слова для данного контекста).
* [Документация Mystem](https://tech.yandex.ru/mystem/doc/index-docpage/)
* [Документация pymystem3](http://pythonhosted.org/pymystem3/)
Инициализируем Mystem c дефолтными параметрами. А вообще параметры есть такие:
* mystem_bin - путь к `mystem`, если их несколько
* grammar_info - нужна ли грамматическая информация или только леммы (по дефолту нужна)
* disambiguation - нужно ли снятие омонимии - дизамбигуация (по дефолту нужна)
* entire_input - нужно ли сохранять в выводе все (пробелы всякие, например), или можно выкинуть (по дефолту оставляется все)
Методы Mystem принимают строку, токенизатор вшит внутри. Можно, конечно, и пословно анализировать, но тогда он не сможет учитывать контекст.
mystem3 - очень быстрый - если его использовать из консоли
```
!pip3 install pymystem3
from pymystem3 import Mystem
m = Mystem()
lemmas = m.lemmatize(' '.join(words))
print(lemmas)
parsed = m.analyze(poem)
parsed[:10]
# как достать части речи
for word in parsed[:20]:
if 'analysis' in word:
gr = word['analysis'][0]['gr']
pos = gr.split('=')[0].split(',')[0]
print(word['text'], pos)
```
### pymorphy2
**pymorphy2** — это полноценный морфологический анализатор, целиком написанный на Python. В отличие от Mystem, он не учитывает контекст, а значит, вопрос разрешения омонимии надо будет решать нам самим (об этом ниже). Он также умеет ставить слова в нужную форму (спрягать и склонять).
[Документация pymorphy2](https://pymorphy2.readthedocs.io/en/latest/)
```
# ! pip install --user pymorphy2
from pymorphy2 import MorphAnalyzer
morph = MorphAnalyzer()
p = morph.parse('стали')
p
first = p[0] # первый разбор
print('Слово:', first.word)
print('Тэг:', first.tag)
print('Лемма:', first.normal_form)
print('Вероятность:', first.score)
```
Из каждого тега можно достать более дробную информацию. Если граммема есть в разборе, то вернется ее значение, если ее нет, то вернется None. [Список граммем](https://pymorphy2.readthedocs.io/en/latest/user/grammemes.html)
```
print(first.normalized) # лемма
print(first.tag.POS) # Part of Speech, часть речи
print(first.tag.animacy) # одушевленность
print(first.tag.aspect) # вид: совершенный или несовершенный
print(first.tag.case) # падеж
print(first.tag.gender) # род (мужской, женский, средний)
print(first.tag.involvement) # включенность говорящего в действие
print(first.tag.mood) # наклонение (повелительное, изъявительное)
print(first.tag.number) # число (единственное, множественное)
print(first.tag.person) # лицо (1, 2, 3)
print(first.tag.tense) # время (настоящее, прошедшее, будущее)
print(first.tag.transitivity) # переходность (переходный, непереходный)
print(first.tag.voice) # залог (действительный, страдательный)
print(first.normalized)
print(first.tag.POS)
print(first.tag.aspect)
print(first.tag.case)
```
### mystem vs. pymorphy
1) Оба они могут работать с незнакомыми словами (out-of-vocabulary words, OOV).
2) *Скорость*. Mystem работает невероятно медленно под windows на больших текстах, но очень быстро, елси запускать из консоли в linux / mac os.
3) *Снятие омонимии*. Mystem умеет снимать омонимию по контексту (хотя не всегда преуспевает), pymorphy2 берет на вход одно слово и соответственно вообще не умеет дизамбигуировать по контексту:
```
homonym1 = 'За время обучения я прослушал больше сорока курсов.'
homonym2 = 'Сорока своровала блестящее украшение со стола.'
mystem_analyzer = Mystem() # инициализирую объект с дефолтными параметрами
print(mystem_analyzer.analyze(homonym1)[-5])
print(mystem_analyzer.analyze(homonym2)[0])
p = morph.parse('сорока')
p
```
### Собираем все вместе:
Сделаем стандартную предобработку данных с сайта Lenta.ru
```
# если у вас линукс / мак / collab или ещё какая-то среда, в которой работает wget, можно так:
#!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1-6Fl4aiag30z2BKqGlOgBVsBuexTi2_y' -O lenta_ru_sample.csv
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', 800)
data = pd.read_csv('lenta_ru_sample.csv', usecols=['text'])
data.head()
m = pymorphy2.MorphAnalyzer()
# убираем все небуквенные символы
regex = re.compile("[А-Яа-яA-z]+")
def words_only(text, regex=regex):
try:
return regex.findall(text.lower())
except:
return []
print(data.text[0])
print(*words_only(data.text[0]))
```
Метод @lru_cashe создает для функции lemmatize кэш указанного размера, что позволяет в целом ускорить лемматизацию текста (что очень полезно, так как лемматизация - ресурсоемкий процесс).
```
from functools import lru_cache
@lru_cache(maxsize=128)
def lemmatize_word(token, pymorphy=m):
return pymorphy.parse(token)[0].normal_form
def lemmatize_text(text):
return [lemmatize_word(w) for w in text]
tokens = words_only(data.text[0])
print(lemmatize_text(tokens))
mystopwords = stopwords.words('russian')
def remove_stopwords(lemmas, stopwords = mystopwords):
return [w for w in lemmas if not w in stopwords]
lemmas = lemmatize_text(tokens)
print(*remove_stopwords(lemmas))
def remove_stopwords(lemmas, stopwords = mystopwords):
return [w for w in lemmas if not w in stopwords and len(w) > 3]
print(*remove_stopwords(lemmas))
```
Если собрать все в одну функцию:
```
def clean_text(text):
tokens = words_only(text)
lemmas = lemmatize_text(tokens)
return remove_stopwords(lemmas)
print(*clean_text(data.text[3]))
```
Если нужно предобработать большой объем текста, помимо кэширования может помочь распараллеливание, например, методом Pool библиотеки multiprocessing:
```
N = 200
with Pool(8) as p:
lemmas = list(tqdm(p.imap(clean_text, data['text'][:N]), total=N))
# map толькко для мультипроцессора
# берем Н элементов
data = data.head(200)
data['lemmas'] = lemmas
data.sample(3)
```
### Итого:
- посмотрели, как делать все стандартные этапы предобработки текста
- научились работать с морфологоческими парсерами
| github_jupyter |
In his blog post [Embedding Matplotlib Animations in IPython Notebooks](http://jakevdp.github.io/blog/2013/05/12/embedding-matplotlib-animations/), Jake VanderPlas presents a slick hack for embedding Matplotlib Animations in IPython Notebooks, which involves writing it as a video to a [tempfile](https://docs.python.org/2/library/tempfile.html), and then re-encoding it in Base64 as a HTML5 Video.
Unfortunately (or rather fortunately), this hack has been largely rendered obsolete by the heavy development efforts dedicated to both Matplotlib and IPython Notebook ([since renamed to Jupyter Notebook](https://blog.jupyter.org/2015/08/12/first-release-of-jupyter/)) in recent years. In particular, [Matplotlib 1.5.1](http://matplotlib.org/users/whats_new.html#new-in-matplotlib-1-5) now [supports inline display of animations in the notebook](http://matplotlib.org/users/whats_new.html#display-hook-for-animations-in-the-ipython-notebook) with the `to_html5_video` method, which converts the animation to an h264 encoded video and embeddeds it directly in the notebook.
In this notebook, we reproduce Jake VanderPlas' blog post with this new feature.
<!-- TEASER_END -->
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from IPython.display import HTML
# First set up the figure, the axis, and the plot element we want to animate
fig, ax = plt.subplots()
ax.set_xlim(( 0, 2))
ax.set_ylim((-2, 2))
line, = ax.plot([], [], lw=2)
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return (line,)
# animation function. This is called sequentially
def animate(i):
x = np.linspace(0, 2, 1000)
y = np.sin(2 * np.pi * (x - 0.01 * i))
line.set_data(x, y)
return (line,)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=20, blit=True)
HTML(anim.to_html5_video())
```
Note that [Animation](http://matplotlib.org/api/animation_api.html#matplotlib.animation.Animation) instances now have a `_repr_html_` method. However, it returns `None` by default.
```
anim._repr_html_() is None
```
This means we won't get any sort of animation from the inline display.
```
anim
```
The method used to display is controlled by the `animation.html` rc parameter, which currently supports values of `none` and `html5`. `none` is the default, performing no display. We simply need to set it to `html5`:
```
# equivalent to rcParams['animation.html'] = 'html5'
rc('animation', html='html5')
anim
```
And that's all there is to it!
| github_jupyter |
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# CCXT - Calculate Support and Resistance
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/CCXT/CCXT_Calculate_Support_and_Resistance.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
**Tags:** #ccxt #bitcoin #trading #investors #analytics #plotly
**Author:** [Jeremy Ravenel](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/)
## Input
```
!pip install trendln matplotlib==3.1.3 --user
import naas
import ccxt
import pandas as pd
from datetime import datetime
import naas_drivers
import trendln
import plotly.tools as tls
import plotly.graph_objects as go
```
### Setup Binance
👉 <a href='https://www.binance.com/en/support/faq/360002502072'>How to create API ?</a>
```
binance_api = ""
binance_secret = ""
```
### Variables
```
symbol = 'BTC/USDT'
limit = 180
timeframe = '4h'
```
## Model
### Get data
```
binance = ccxt.binance({
'apiKey': binance_api,
'secret': binance_secret
})
data = binance.fetch_ohlcv(symbol=symbol,
limit=limit,
timeframe=timeframe)
```
### Data cleaning
```
df = pd.DataFrame(data, columns=["Date", "Open", "High", "Low", "Close", "Volume"])
df['Date'] = [datetime.fromtimestamp(float(time)/1000) for time in df['Date']]
df
```
## Output
### Plotting figure
```
fig = trendln.plot_support_resistance(
df[-1000:].Close, #as per h for calc_support_resistance
xformatter = None, #x-axis data formatter turning numeric indexes to display output
# e.g. ticker.FuncFormatter(func) otherwise just display numeric indexes
numbest = 1, #number of best support and best resistance lines to display
fromwindows = True, #draw numbest best from each window, otherwise draw numbest across whole range
pctbound = 0.1, # bound trend line based on this maximum percentage of the data range above the high or below the low
extmethod = trendln.METHOD_NUMDIFF,
method=trendln.METHOD_PROBHOUGH,
window=125,
errpct = 0.005,
hough_prob_iter=50,
sortError=False,
accuracy=1)
plotly_fig = tls.mpl_to_plotly(fig)
layout = dict(
dragmode="pan",
xaxis_rangeslider_visible=False,
showlegend=True,
)
new_data = list(plotly_fig.data)
new_data.pop(2)
new_data.pop(2)
new_data.pop(1)
new_data.pop(1)
fig = go.Figure(data=new_data, layout=layout)
fig
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
sns.set()
pd.set_option('max_columns', 999)
df = pd.read_csv('/Users/daviderickson/projects/datasf/data/Assessor_Historical_Secured_Property_Tax_Rolls.csv')
df_flw = df[df['Property Location'].str.contains('140 MAIDEN')] # Frank Lloyd Wright VC Morris Gift Shop
display(df_flw)
# plt.bar(df_flw['Closed Roll Year'], df_flw['Assessed Improvement Value'] + df_flw['Assessed Land Value'],
# alpha=1, color='grey')
plt.bar(df_flw['Closed Roll Year'], df_flw['Assessed Land Value'], color='red')
plt.bar(df_flw['Closed Roll Year'], df_flw['Assessed Improvement Value'],
bottom=df_flw['Assessed Land Value'], color='blue')
plt.legend()
plt.ylim(0,);
plt.ylabel('VC Morris Gift Shop:\nAss_Land_Val + Ass_Impr_Val')
plt.xlabel('Year');
df.dtypes
df.shape
df.head(5)
df['Closed Roll Year'].unique()
df[df['Closed Roll Year'].isna()]
def search_st_address(string, df):
# Searches df['Property Location'] for string.lower()
str_dummy = string
str_dummy = str_dummy.upper()
return df[df['Property Location'].str.contains(str_dummy)]
df_temp = search_st_address('127 vernon', df)
print('Missing year row is 2011: ')
sorted(df_temp['Closed Roll Year'].unique())
print('Replacing single NAN in Closed_Roll_Year')
df.loc[df['Closed Roll Year'].isna(), 'Closed Roll Year'] = 2011
df_temp = search_st_address('127 vernon', df)
print('Missing year row is 2011: ')
sorted(df_temp['Closed Roll Year'].unique())
def plot_address_valVSyear(string, df):
df_temp = search_st_address(string, df)
plt.bar(df_temp['Closed Roll Year'], df_temp['Assessed Land Value'], color='red')
plt.bar(df_temp['Closed Roll Year'], df_temp['Assessed Improvement Value'],
bottom=df_temp['Assessed Land Value'], color='blue')
# plt.legend()
plt.title(string)
plt.ylim(0,);
plt.ylabel('Ass_Land_Val + Ass_Impr_Val')
plt.xlabel('Year');
plot_address_valVSyear('127 vernon', df)
df.dtypes
'''
NEED TO NOW SAVE THE CLEANED DATA,
THEN WRITE SCRIPTS TO IMPORT DATA PROPERLY
-Closed roll year should be dateteime or int
-Check all dtypes for clues for data cleaning
-Write scripts to automate some of the standard aspsects of cleaning
--Or get other packages (e.g. pandas-profiling) working
'''
print('Converting Closed_Roll_Year to datetime from Float ...')
df['Closed Roll Year'] = pd.to_datetime(df['Closed Roll Year'])
print('Done.')
df.head(3)
for col in df.columns:
uniq_dummy = df[col].unique()
print(col, uniq_dummy.shape)
df['Use Definition'].unique()
df.describe()
# Create two lists for the loop results to be placed
lat = []
lon = []
# For each row in a varible,
for row in df['the_geom']:
# Try to,
try:
# Split the row by comma, convert to float, and append
# everything before the comma to lat
lon.append(float(row.split(',')[0].split('(')[1]))
# Split the row by comma, convert to float, and append
# everything after the comma to lon
lat.append(float(row.split(',')[1].split(')')[0]))
# But if you get an error
except:
# append a missing value to lat
lat.append(np.NaN)
# append a missing value to lon
lon.append(np.NaN)
# Create two new columns from lat and lon
df['latitude'] = lat
df['longitude'] = lon
for col in df.columns:
match = re.search('Value', col)
if match:
print(col)
for col in df.columns:
match = re.search('Value', col)
if match:
fig, ax = plt.subplots(figsize=(6, 4))
df[col].hist(bins=100, log=True)
ax.set_title(col)
plt.show()
# sns.distplot(df[col].dropna(), hist_kws={'log': True})
plt.show()
df['Assessed Land Value'].describe()
df['Closed Roll Year'].describe()
df_groupby_nhood = df.groupby(by='Analysis Neighborhood').mean()
df_groupby_nhood.head()
df_yr_nhood_appval = df.pivot_table(index='Closed Roll Year', columns='Analysis Neighborhood', values='Assessed Personal Property Value', aggfunc=np.mean)
plt.subplots(figsize=(12,6))
sns.heatmap(df_yr_nhood_appval)
# Plot above
df['Property Class Code Definition'].unique()
list = df.dtypes==object
list = list.to_list()
print(np.sum(list), ' with dtype=object \n')
list #- 'Property Location'
# print(list)
print(df.loc[list].columns.to_list())
df.loc[list]
df_exCode_nhood_appval = df.pivot_table(index='Exemption Code Definition', columns='Analysis Neighborhood', values='Assessed Personal Property Value', aggfunc=np.mean)
df_exCode_nhood_appval
plt.subplots(figsize=(13,6))
sns.heatmap(df_exCode_nhood_appval, cmap='viridis')
df_exCode_appval = df.groupby(by='Exemption Code Definition')['Assessed Personal Property Value'].mean()
print(df_exCode_appval)
fig, ax = plt.subplots(figsize=(6,4))
plt.bar(height=df_exCode_appval, x=df_exCode_appval.index)
plt.xticks(rotation=45)
ax.set_yscale('log')
df_exCode_exVal = df.groupby(by='Exemption Code Definition')['Misc Exemption Value'].mean()
print(df_exCode_exVal)
fig, ax = plt.subplots(figsize=(6,4))
plt.bar(height=df_exCode_exVal, x=df_exCode_exVal.index)
plt.xticks(rotation=45)
ax.set_yscale('log')
df_exCode_nhood_exVal = df.pivot_table(index='Exemption Code Definition', columns='Analysis Neighborhood', values='Misc Exemption Value', aggfunc=np.mean)
df_exCode_nhood_exVal
plt.subplots(figsize=(13,6))
sns.heatmap(df_exCode_nhood_exVal, cmap='viridis')
mask = (df['Exemption Code Definition'] == 'Lessor') & (df['Misc Exemption Value'] > 10**7)
# mask2 = df['Misc Exemption Value'] > 10**7
# print(mask, mask2)
# mask = mask and mask2
pd.set_option('max_columns', 999)
display(df[mask])
```
| github_jupyter |
# Explain binary classification model predictions on GPU
_**This notebook showcases how to use the interpret-community repo to help interpret and visualize predictions from a binary classification model on GPU.**_
Adapted from `explain-binary-classification-local.ipynb` notebook in the repository
## Table of Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Project](#Project)
1. [Run model explainer locally at training time](#Explain)
1. Train a binary classification model
1. Explain the model
1. Generate global explanations
1. Generate local explanations
1. [Visualize results](#Visualize)
1. [Next steps](#Next)
<a id='Introduction'></a>
## 1. Introduction
This notebook illustrates how to locally use interpret-community to help interpret binary classification model predictions at training time. It demonstrates the API calls needed to obtain the global and local interpretations along with an interactive visualization dashboard for discovering patterns in data and explanations.
Two options using the TabularExplainer on CPU and GPU ( with the `use_gpu` flag) are demonstrated:
- KernelExplainer - uses [SHAP KernelExplainer](https://shap-lrjball.readthedocs.io/en/latest/generated/shap.KernelExplainer.html#shap-kernelexplainer) on CPU
- GPUKernelExplainer - uses [cuML KernelExplainer](https://docs.rapids.ai/api/cuml/stable/api.html#cuml.explainer.KernelExplainer) for GPU Acceleration
To run the GPUKernelExplainer:
- Ensure local machine has GPU and CUDA & NVIDIA Drivers installed. For minimum version requirements visit [RAPIDS getting started](https://rapids.ai/start.html)
- Install [RAPIDS libraries](https://rapids.ai/start.html#get-rapids)
<a id='Project'></a>
## 2. Project
The goal of this project is to classify breast cancer diagnosis with scikit-learn and cuML then locally running the model explainer:
1. Train a SVM classification model using Scikit-learn
2. Run 'explain_model' globally and locally with full dataset in local mode, which doesn't contact any Azure services.
3. Visualize the global and local explanations with the visualization dashboard.
<a id='Setup'></a>
## 3. Setup
If you are using Jupyter notebooks, the extensions should be installed automatically with the package.
If you are using Jupyter Labs run the following command:
```
(myenv) $ jupyter labextension install @jupyter-widgets/jupyterlab-manager
```
<a id='Explain'></a>
## 4. Run model explainer locally at training time
```
from sklearn.datasets import load_breast_cancer
from sklearn import svm
# Explainers:
# 1. SHAP Kernel Explainer
from interpret.ext.blackbox import TabularExplainer
# cuML is a Machine Learning library within RAPDIS similar to scikit-learn
import cuml
```
### Load the breast cancer diagnosis data
```
breast_cancer_data = load_breast_cancer()
classes = breast_cancer_data.target_names.tolist()
# Split data into train and test
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size=0.2, random_state=0)
```
### Train a SVM classification model, which you want to explain
```
# Train a cuML model
cu_clf = cuml.svm.SVC(gamma=0.001, C=100., probability=True)
model = cu_clf.fit(x_train, y_train)
# Train sklearn model
# clf = svm.SVC(gamma=0.001, C=100., probability=True)
# sk_model = clf.fit(x_train, y_train)
```
### Explain predictions on your local machine
```
# 1. Using GPU SHAP TabularExplainer with model
# To use this, the machine should have GPUs present and RAPIDS libraries installed.
# Visit https://rapids.ai for more information. This option uses cuML's SHAP implementation on GPU.
# cuML is a GPU-accelerated Machine Learning library within RAPDIS and mirrors scikit-learn's API
# cuML model with GPU SHAP proved to be the most optimal combination for speed. We noticed it yielded
# a 3.5x speed-up over sklearn model with CPU SHAP on RTX 8000. The speed-ups with larger dataset
# will be more significant.
explainer = TabularExplainer(model,
x_train,
features=breast_cancer_data.feature_names,
classes=classes,
use_gpu=True)
# 2. Using GPU SHAP TabularExplainer with sklearn model
# We can use a model from scikit-learn model for training as well. Train the sklearn model
# by uncommenting appropriate lines in the previous cell to run GPU SHAP + sklearn.
# explainer = TabularExplainer(sk_model,
# x_train,
# features=breast_cancer_data.feature_names,
# classes=classes,
# use_gpu=True)
# 3. Using CPU SHAP TabularExplainer with sklearn model
# explainer = TabularExplainer(sk_model,
# x_train,
# features=breast_cancer_data.feature_names,
# classes=classes)
```
### Generate global explanations
Explain overall model predictions (global explanation)
```
# Passing in test dataset for evaluation examples - note it must be a representative sample of the original data
# x_train can be passed as well, but with more examples explanations will take longer although they may be more accurate
global_explanation = explainer.explain_global(x_test)
# Sorted SHAP values
print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))
# Corresponding feature names
print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))
# Feature ranks (based on original order of features)
print('global importance rank: {}'.format(global_explanation.global_importance_rank))
# Per class feature names
print('ranked per class feature names: {}'.format(global_explanation.get_ranked_per_class_names()))
# Per class feature importance values
print('ranked per class feature values: {}'.format(global_explanation.get_ranked_per_class_values()))
# Print out a dictionary that holds the sorted feature importance names and values
print('global importance rank: {}'.format(global_explanation.get_feature_importance_dict()))
```
### Explain overall model predictions as a collection of local (instance-level) explanations
```
# feature shap values for all features and all data points in the training data
print('local importance values: {}'.format(global_explanation.local_importance_values))
```
### Generate local explanations
Explain local data points (individual instances)
```
# You can pass a specific data point or a group of data points to the explain_local function
# E.g., Explain the first data point in the test set
instance_num = 0
local_explanation = explainer.explain_local(x_test[instance_num,:])
# Get the prediction for the first member of the test set and explain why model made that prediction
prediction_value = clf.predict(x_test)[instance_num]
sorted_local_importance_values = local_explanation.get_ranked_local_values()[prediction_value]
sorted_local_importance_names = local_explanation.get_ranked_local_names()[prediction_value]
print('local importance values: {}'.format(sorted_local_importance_values))
print('local importance names: {}'.format(sorted_local_importance_names))
```
<a id='Visualize'></a>
## 5. Visualize
Load the visualization dashboard
```
from raiwidgets import ExplanationDashboard
try:
ExplanationDashboard(global_explanation, model, dataset=x_test, true_y=y_test)
except NameError as e:
# If we used sklearn model instead - show the dashboard with sk_model
ExplanationDashboard(global_explanation, sk_model, dataset=x_test, true_y=y_test)
```
## 6. Next Steps
Learn more
- [RAPIDS](https://rapids.ai/)
- [RAPIDS on Medium](https://medium.com/rapids-ai)
- [cuML on GitHub](https://github.com/rapidsai/cuml.git)
- [cuML API Reference](https://docs.rapids.ai/api/cuml/stable/api.html)
| github_jupyter |
# Active Inference: Simple Generative Model
This notebook simulates an active inference agent behaving in a random environment described by a single hidden state variable and a single observation modality. The agent uses variational inference to infer the most likely hidden states, and optimizes its policies with respect to those that minimize the expected free energy of their attendant observations.
## Import basic paths
```
import os
import sys
from pathlib import Path
path = Path(os.getcwd())
module_path = str(path.parent) + '/'
sys.path.append(module_path)
```
## Import `pymdp` module
```
import itertools
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import special
from pymdp.distributions import Categorical, Dirichlet
from pymdp import functions as F
```
## Define an auxiliary function for creating the transition likelihood
```
def create_B(Ns, Nf, controllableActionIdx):
"""
Generate controlled transitions for each hidden state factor, that correspond to actions.
"""
B = np.empty((Nf),dtype=object)
for si, ndim_si in enumerate(Ns):
B[si] = np.eye(ndim_si)
# controllable hidden state factors - transition to the k-th location
for pi in controllableActionIdx:
B[pi] = np.tile(B[pi].reshape(Ns[pi],Ns[pi],1),(1,1,Ns[pi])).transpose((1,2,0))
return B
```
## The generative process
Here, we setup the mechanics of the environment, or the 'generative process.' To make this analogous to the generative _model_ learned by the agent, we describe these mechanics using likelihood distribution $P(o_t|s_t)$, denoted `A_GP`, and a transition distribution $P(s_t|s_{t-1},a_{t-1})$, denoted `B_GP`. The generative process will be used to generate observations `obs` via the likelihood $P(o_t|s_t)$ and is changed by actions via the likelihood $P(s_t|s_{t-1},a_{t-1})$.
```
# set up state-space and outcome-space dimensionalities of the generative process
No = [4] # dimensionality of the different outcome modalities
Ng = len(No) # total number of outcome modalities
Ns = [3] # dimensionality of the hidden state factors
Nf = len(Ns) # toatl number of hidden state factors
# Create the likelihoods and priors relevant to the generative model
A_GP = Categorical(values = np.random.rand(*(No+Ns))) # observation likelihood
A_GP.normalize()
B_GP = Categorical(values = create_B(Ns, Nf, [0])[0] ) # transition likelihood
initState_idx = np.random.randint(*Ns) # sample a random initial state
initState = np.eye(*Ns)[initState_idx] # one-hot encode it
T = 100 # number of timesteps
```
## The generative model
Here, we setup the belief structure of the active inference agent, or the 'generative model.' For this simple case, we make the generative model identical to the generative process. Namely, the agent's beliefs about the observation and likelihood distributions (respectively, the _observation model_ and _transition model_ ) are identical to the true parameters describing the environment.
```
# Generative model likelihoods
A_GM = Categorical(values = A_GP.values) # in this case, the generative model and the generative process are identical
B_GM = Categorical(values = B_GP.values) # in this case, the generative model and the generative process are identical
# Prior Dirichlet parameters (these parameterize the generative model likelihoods)
pA = Dirichlet(values = A_GM.values * 1e20) # fix prior beliefs about observation likelihood to be really high (and thus impervious to learning)
pB = Dirichlet(values = B_GP.values * 1e20) # fix prior beliefs about transition likelihood to be really high (and thus impervious to learning)
# create some arbitrary preference about observations
C = np.zeros(*No)
C[0] = -2 # prefers not to observe the outcome with index == 0
C[-1] = 2 # prefers to observe the outcome with highest index
# initialize a flat prior
prior = Categorical(values = np.ones(Ns[0])/Ns[0])
# policy related parameters
policy_horizon = 1
cntrl_fac_idx = [0] # which indices of the hidden states are controllable
Nu, possiblePolicies = F.constructNu(Ns,Nf,cntrl_fac_idx,policy_horizon)
```
# Action-Perception Loop
## Initialize history of beliefs, hidden states, and observations
```
# Set current hidden state to be the initial state sampled above
s = initState
# set up some variables to store history of actions, etc.
actions_hist = np.zeros( (Nu[0],T) )
states_hist = np.zeros( (Ns[0],T) )
obs_hist = np.zeros( (No[0],T) )
Qs_hist = np.zeros( (Ns[0],T) )
```
## Main loop over time
```
for t in range(T):
#### SAMPLE AN OBSERVATION FROM THE GENERATIVE PROCESS ######
ps = A_GP.dot(s)
obs = ps.sample()
#### INVERT GENERATIVE MODEL TO INFER MOST LIKELY HIDDEN STATE ######
Qs = F.update_posterior_states(A_GM, obs, prior, return_numpy = False)
#### INFER THE MOST LIKELY POLICIES (USING EXPECTED FREE ENERGY ASSUMPTION) #####
Q_pi,EFE = F.update_posterior_policies(Qs, A_GM, pA, B_GM, pB, C, possiblePolicies, gamma = 16.0, return_numpy=True)
#### SAMPLE AN ACTION FROM THE POSTERIOR OVER CONTROLS, AND PERTURB THE GENERATIVE PROCESS USING THE SAMPLED ACTION #####
action = F.sample_action(Q_pi, possiblePolicies, Nu, sampling_type = 'marginal_action')
s = B_GP[:,:,action[0]].dot(s)
#### STORE VARIABLES IN HISTORY ####
actions_hist[action[0],t] = 1.0
states_hist[np.where(s)[0],t] = 1.0
obs_hist[obs,t] = 1.0
Qs_hist[:,t] = Qs.values[:,0].copy()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/adasegroup/ML2021_seminars/blob/master/seminar3/seminar03-solution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Measure quality of a classification model
This notebook explains how to measure quality of a classification machine learning model.
We provide definitions for various quality measures and try to find out if they are suitable or not for a particular machine learning classification problem.
The data is a subsample from the kaggle comptetion "Give me some credit"
https://www.kaggle.com/c/GiveMeSomeCredit#description
```
# Imports
# data processing tools: pandas and numpy
import numpy as np
import pandas as pd
# visualization tools: matplotlib, seaborn
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
%matplotlib inline
# machine learning tools: various methods from scikit-learn
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve, auc
from sklearn.metrics import f1_score, accuracy_score, average_precision_score
```
# Load data
```
# load the data
training_data = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2021_seminars/master/seminar3/credit/training_data.csv')
test_data = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2021_seminars/master/seminar3/credit/test_data.csv')
```
See some technical info about data
```
# print information about the data
training_data.info(verbose=True)
```
Let's look at some general statistics of data:
* **count** -- number of not `NaN` values;
* **mean**, **std** -- mean and standard deviation;
* other -- minimal, maximal values, quantiles.
```
training_data.describe().T
```
Choose randomly ten objects from dataset:
```
training_data.sample(10, random_state=123)
```
We see that there are `NaN`s in data. Let's calculate mean values of features on **training data** and fill them in instead of the missing values. We will do that both for **train** and **test**.
There are several ways to fill in skipped data:
* mean, median;
* regression predictions;
* in case of time series -- last known value,
* linear interpolation, etc.
If the number of skipped values is small, you can throw the corresponding objects away.
```
training_data["SeriousDlqin2yrs"].value_counts()
train_mean = training_data.mean()
train_mean
# fill NA values with mean training values
training_data.fillna(train_mean, inplace=True)
test_data.fillna(train_mean, inplace=True)
print(training_data.isnull().sum())
print(test_data.isnull().sum())
```
Compare train and test distributions
```
axes = training_data.hist(figsize=(16, 9), bins=25, alpha=0.75) # that will plot training data histograms
for plot in axes.flat: # that will draw test data on top of training histograms
column = plot.title.get_text()
if column:
test_data[column].hist(ax=plot, bins=25, alpha=0.55)
```
Pay attention to **SeriousDlqin2yrs** -- 90 days past due delinquency or worse in the last 2 years. We see that most of the borrowers pay in time.
```
# The data set is imbalanced: typically people return credits
training_data["SeriousDlqin2yrs"].value_counts()
```
# Classification algorithms
First of all, load data for learning as pairs $(X, y)$, where $X = (x_i)_{i=1}^n$ -- input features,
and $y=(y_i)_{i=1}^n$ corresponding labels.
```
training_X = training_data.drop("SeriousDlqin2yrs", axis=1)
training_y = training_data["SeriousDlqin2yrs"]
test_X = test_data.drop("SeriousDlqin2yrs", axis=1)
test_y = test_data["SeriousDlqin2yrs"]
```
Construct calssification algorithms and train them.
```
# Construct Decision Tree model
decision_tree = DecisionTreeClassifier(max_depth = 5)
decision_tree.fit(training_X, training_y)
!pip install graphviz
from graphviz import Source
from sklearn import tree
Source(tree.export_graphviz(decision_tree, out_file=None, feature_names=training_X.columns))
# Construct k Nearest Neighbors model
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(training_X, training_y)
print("Training accuracy:")
print("\tDT accuracy:\t%.2f%%" % (100 * decision_tree.score(training_X, training_y)))
print("\tkNN accuracy:\t%.2f%%" % (100 * knn.score(training_X, training_y)))
print("\tNumber of '0' labels:\t%.2f%%" % (100 - 100 * np.mean(training_y)))
print()
print("Test accuracy:")
print("\tDT accuarcy:\t%.2f%%" % (100 * decision_tree.score(test_X, test_y)))
print("\tkNN accuarcy:\t%.2f%%" % (100 * knn.score(test_X, test_y)))
print("\tNumber of '0' labels:\t%.2f%%" % (100 - 100 * np.mean(test_y)))
test_predictions_dt = decision_tree.predict(test_X)
test_probabilities_dt = decision_tree.predict_proba(test_X)[:, 1]
training_predictions_dt = decision_tree.predict(training_X)
training_probabilities_dt = decision_tree.predict_proba(training_X)[:, 1]
test_predictions_knn = knn.predict(test_X)
test_probabilities_knn = knn.predict_proba(test_X)[:, 1]
training_predictions_knn = knn.predict(training_X)
training_probabilities_knn = knn.predict_proba(training_X)[:, 1]
np.unique(test_probabilities_dt)
np.unique(training_probabilities_knn)
```
# Classification quality measures
## Confusion matrix
Confusion matrix is table layout that allows visualization of the performance of an algorithm. Rows of this matrix correspond to actual classes of the test set, columns correspond to predicted labels. There are 4 types of elements if predictions are given:
* True Positive
* False Negative
* False Positive
* True Negative
| Variable | Predicted True | Predicted False |
| ------------- |-------------|-----|
| Actual True | TP | FN |
| Actual False | FP | TN |
```
confusion_dt = pd.DataFrame(confusion_matrix(test_y, test_predictions_dt))
confusion_knn = pd.DataFrame(confusion_matrix(test_y, test_predictions_knn))
print('Confusion for Decision Tree:')
print(confusion_dt)
print('Confusion for kNN:')
print(confusion_knn)
```
If we want to compare metrics on different data, we can use instead True Positive Rate and False Positive Rate:
* False Positive Rate is $\frac{FP}{FP + TN}$
* True Positive Rate is $\frac{TP}{TP + FN}$
## ROC curve
ROC stands for *Receiver Operating Characteristic*. This curve shows True Positive Rate (**TPR**) against False Positive Rate (**FPR**) as classifier's discrimination threshold is varied
Remember that classifiers are usually constructed based on some function
$f(x) \in [0, 1]$ and threshold $\tau$:
$$ \text{Classifier}\bigl(\text{object}\bigr)
= \begin{cases}
1 & \text{if}\, f(\text{object}) \geq \tau\,,\\
0 & \text{else}\,.
\end{cases}
$$
**roc_curve** function from *scikit-learn* allows to easily obtain ROC curve points and **threshold** values.
Detailed description of ROC-AUC by Alexander Dyakonov (in Russian)
https://dyakonov.org/2017/07/28/auc-roc-площадь-под-кривой-ошибок/
```
false_positive_rates_dt, true_positive_rates_dt, threshold_dt = roc_curve(test_y, test_probabilities_dt)
false_positive_rates_knn, true_positive_rates_knn, threshold_knn = roc_curve(test_y, test_probabilities_knn)
# create plot
fig = plt.figure(figsize=(14, 7))
# specify parameters for the first curve
plot_1 = fig.add_subplot(121,
xlabel="FPR", xlim=(-.01, 1.01),
ylabel="TPR", ylim=(-.01, 1.01), title = 'Decision Tree')
# draw the first curve
plot_1.plot(false_positive_rates_dt, true_positive_rates_dt,
color='darkorange', lw=2, label = 'ROC-curve on test')
plot_1.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':')
plt.legend(loc="lower right")
# specify parameters for the second curve
plot_2 = fig.add_subplot(122,
xlabel="FPR", xlim=(-.01, 1.01),
ylabel="TPR", ylim=(-.01, 1.01), title = 'k Nearest Neighbors')
# draw the second curve
plot_2.plot(false_positive_rates_knn, true_positive_rates_knn,
color='darkorange', lw=2, label = 'ROC-curve on test')
plot_2.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':')
plt.legend(loc="lower right")
plt.show()
```
The closer **ROC** curve to the **upper left** corner, the better classification is.
Despite being a good visual representation we usually need a number to make conclusions about calssification quality. In case of ROC curve this number is Area Under the Curve (**ROC-AUC**).
*scikit-learn* has a special function **auc(...)**:
```
roc_auc_dt = auc(false_positive_rates_dt, true_positive_rates_dt)
roc_auc_knn = auc(false_positive_rates_knn, true_positive_rates_knn)
print("DT ROC-AUC on test data:", roc_auc_dt)
print("kNN ROC-AUC on test data:", roc_auc_knn)
```
For the training set ROC curve and ROC-AUC look much better.
```
training_false_positive_rates_dt, training_true_positive_rates_dt, _ = roc_curve(training_y, training_probabilities_dt)
training_false_positive_rates_knn, training_true_positive_rates_knn, _ = roc_curve(training_y, training_probabilities_knn)
training_roc_auc_dt = auc(training_false_positive_rates_dt, training_true_positive_rates_dt)
training_roc_auc_knn = auc(training_false_positive_rates_knn, training_true_positive_rates_knn)
print("DT ROC-AUC on training data:", training_roc_auc_dt)
print("kNN ROC-AUC on training data:", training_roc_auc_knn)
fig = plt.figure(figsize=(14, 7))
plot_1 = fig.add_subplot(121,
xlabel="FPR", xlim=(-.01, 1.01),
ylabel="TPR", ylim=(-.01, 1.01), title = 'Decision Tree')
# draw the first curve
plot_1.plot(training_false_positive_rates_dt, training_true_positive_rates_dt,
color='darkgreen', lw=2, label = 'ROC-curve on train (AUC = %0.2f)' % training_roc_auc_dt)
plot_1.plot(false_positive_rates_dt, true_positive_rates_dt,
color='darkorange', lw=2, label = 'ROC-curve on test (AUC = %0.2f)' % roc_auc_dt)
plot_1.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':')
plt.legend(loc="lower right")
# specify parameters for the second curve
plot_2 = fig.add_subplot(122,
xlabel="FPR", xlim=(-.01, 1.01),
ylabel="TPR", ylim=(-.01, 1.01), title = 'k Nearest Neighbors')
# draw the second curve
plot_2.plot(training_false_positive_rates_knn, training_true_positive_rates_knn,
color='darkgreen', lw=2, label = 'ROC-curve on train (AUC = %0.2f)' % training_roc_auc_knn)
plot_2.plot(false_positive_rates_knn, true_positive_rates_knn,
color='darkorange', lw=2, label = 'ROC-curve on test (AUC = %0.2f)' % roc_auc_knn)
plot_2.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':')
plt.legend(loc="lower right")
plt.show()
```
Another ROC-AUC visualization http://www.navan.name/roc/
Area under ROC-curve = probability of pairs of objects from different classes being classified correctly.

$a_i$ - prediction at $i$-th object, $y_i$ - target (class), $q$- number of objects in test
## Precision and Recall
Precision and Recall are two other measures for evaluation of classification quality. Both of the metrics are calculated based on **confusion matrix**.
<img src="https://github.com/adasegroup/ML2021_seminars/blob/master/seminar3/figures/precision_recall.png?raw=1">
Note that Recall equals to True Positive Rate.
Although "accuracy" and "precision" have very similar meanings, they are completely different metrics. Look how Precision and Recall are evaluated for k Nearest Neighbors classifier:
```
confusion = confusion_matrix(test_y, test_predictions_knn)
TN, FP = confusion[0, 0], confusion[0, 1]
FN, TP = confusion[1, 0], confusion[1, 1]
```
**Recall** of a classifier is equal to True Positive Rate **TPR** ($\frac{TP}{TP + FN}$). This value may be interpreted as a sensitivity of a classifier to the objects with label `1`. If it is close to $100\%$, then a classifier rarely "miss" the object of class `1`.
```
recall = TP / (TP + FN)
print("Recall: %.2f%%" % (100 * recall))
```
**Precision** -- is a fraction $\frac{TP}{TP + FP}$. If this value is large, then a classifier assigns label `1` to objects with actual label `0` rarely.
See how it is different to Accuracy = $\frac{TP + TN}{TP + TN + FP + FN}$
```
precision = TP / (TP + FP)
print("Precision: %.2f%%" % (100 * precision))
```
A classifier with large Recall but small Precision produces many false positive predictions and tends to assign many `1` labels.
Vice versa, if a classifier has small Recall but large Precision, then it detects class `1` accurately, but misses many objects (many false negative predictions).
### Precision-Recall curve
In **precision-recall** space we may construct a curve similar to **ROC** curve in **FPR-TPR** space. PR curve also depicts the dependecy of Precision and Recall on threshold. *scikit* has the corresponding function: **precision_recall_curve(...)**.
Let's calculate PR curve points.
Note that unlike ROC curve, we cannot use interpolation for calculation of area under the curve. This may lead to larger values of the metric, which is not good. In this case we need to use **average_precision_score()** function instead of **auc()** function.
```
# generate values for Precision-Recall curve
precision_dt, recall_dt, _ = precision_recall_curve(test_y, test_probabilities_dt)
precision_knn, recall_knn, _ = precision_recall_curve(test_y, test_probabilities_knn)
# calculate value under Precision-Recall curve
pr_auc_dt = average_precision_score(test_y, test_probabilities_dt)
pr_auc_knn = average_precision_score(test_y, test_probabilities_knn)
print("DT PR-AUC on test data:", pr_auc_dt)
print("kNN PR-AUC on test data:", pr_auc_knn)
# generate values for training Precision Recall curve
training_precision_dt, training_recall_dt, _ = precision_recall_curve(training_y, training_probabilities_dt)
training_precision_knn, training_recall_knn, _ = precision_recall_curve(training_y, training_probabilities_knn)
# TODO calculate value under precision-recall curve
training_pr_auc_dt = average_precision_score(training_y, training_probabilities_dt)
training_pr_auc_knn = average_precision_score(training_y, training_probabilities_knn)
print("DT PR-AUC on training data:", training_pr_auc_dt)
print("kNN PR-AUC on training data:", training_pr_auc_knn)
fig = plt.figure(figsize=(14, 7))
plot_1 = fig.add_subplot(121,
xlabel="Recall", xlim=(-.01, 1.01),
ylabel="Precision", ylim=(-.01, 1.01), title = 'Decision Tree')
plot_1.plot(training_recall_dt, training_precision_dt,
color='darkgreen', lw=2, label = 'PR-curve on train (AUC = %0.2f)' % training_pr_auc_dt)
plot_1.plot(recall_dt, precision_dt,
color='darkorange', lw=2, label = 'PR-curve on test (AUC = %0.2f)' % pr_auc_dt)
plt.legend(loc="upper right")
plot_2 = fig.add_subplot(122,
xlabel="Recall", xlim=(-.01, 1.01),
ylabel="Precision", ylim=(-.01, 1.01), title = 'k Nearest Neighbors')
plot_2.plot(training_recall_knn, training_precision_knn,
color='darkgreen', lw=2, label = 'PR-curve on train (AUC = %0.2f)' % training_pr_auc_knn)
plot_2.plot(recall_knn, precision_knn,
color='darkorange', lw=2, label = 'PR-curve on test (AUC = %0.2f)' % pr_auc_knn)
plt.legend(loc="upper right")
plt.show()
```
The closer **PR** curve to the **upper right** corner, the better classification is.
Large AUC value means that Precision and Recall are also large. That means that classifier makes small number of both False Positives and False Negatives.
## F1 score
This metric allows to take into account a different cost for False Positive Errors and False Negative Errors.
General $F_\beta$ score is defined as follows:
$$
F_\beta = (1 + \beta^2) \frac{Precision \cdot Recall}{\beta^2 Precision + Recall} = \frac{1 + \beta^2}{\frac{\beta^2}{Recall} + \frac{1}{Precision}}= \frac{\beta + \beta^{-1}}{\beta\frac{1}{\text{Recall}} + \beta^{-1}\frac{1}{\text{Precision}}}
\,.
$$
Most commonly used is $F_1$ score:
$$
F_1 = 2 \frac{Precision \cdot Recall}{Precision + Recall}
$$
Harmonic mean is used in order to make metric value very small when Precision or Recall is close to zero. Note that $F_1$ score doesn't describe how classifier works for True Negative results (**TN**).
```
print("DT F1 score on training data", f1_score(training_y, training_predictions_dt))
print("kNN F1 score on training data", f1_score(training_y, training_predictions_knn))
print("DT F1 score on test data", f1_score(test_y, test_predictions_dt))
print("kNN F1 score on test data", f1_score(test_y, test_predictions_knn))
```
$F_1$ score is good for imbalanced classification, when a number of class `1` objects is **much** bigger than class `0` objects.
Let's compare **accuracy** and $F_1$ score of our classifiers with *random* classifier, which works as follows:
* estimate probability $\hat{p}$ of class `1` on training data (frequency of class `1` objects);
* for every test object predict randomly:
* label `1` with probability $\hat{p}$,
* label `0` with probability $1 - \hat{p}$.
```
training_prob = sum(training_y) / len(training_y)
random_predictions = np.random.binomial(1, training_prob, len(test_y))
print("Decision Tree accuracy\t\t", accuracy_score(test_y, test_predictions_dt))
print("kNN accuracy\t\t\t", accuracy_score(test_y, test_predictions_knn))
print("Random classifier accuracy\t", accuracy_score(test_y, random_predictions))
print('---')
print("Decision Tree F1 score\t\t", f1_score(test_y, test_predictions_dt))
print("kNN F1 score\t\t\t", f1_score(test_y, test_predictions_knn))
print("FRandom classifier F1 score\t", f1_score(test_y, random_predictions))
```
# Exercise 1
We have seen how some of classifiers work for this dataset. Now, try it yourself with Logistic Regression.
* Fisrt, **import** **LogisticRegression()** function and train it on training data.
* Then, calculate **ROC AUC**, **PR AUC** and **F1 score** on test data.
* Try to change parameters to improve results.
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
```
from sklearn.linear_model import LogisticRegression
logistic_regression = LogisticRegression(penalty = 'l2', C = 100.0, max_iter = 1000)
logistic_regression.fit(training_X, training_y)
test_predictions = logistic_regression.predict(test_X)
test_probabilities = logistic_regression.predict_proba(test_X)[:, 1]
false_positive_rates, true_positive_rates, threshold = roc_curve(test_y, test_probabilities)
roc_auc = auc(false_positive_rates, true_positive_rates)
print(roc_auc)
precision, recall, _ = precision_recall_curve(test_y, test_probabilities)
pr_auc = average_precision_score(test_y, test_probabilities)
print(pr_auc)
print(f1_score(test_y, test_predictions))
```
# Cross-validation technique
In many cases test sample is not available or we have a small dataset, and we have only one sample: a training one. The most popular approach in this case is **cross-validation**.
The most common way is $k$-fold cross-validation. The idea is to divide training sample into $k$ blocks, one of them is treated as an artificial test sample and other $k-1$ are used for training.
*scikit* has several functions for dividing data into folds and for performing automated cross-validation. One of those functions is **GridSearchCV()**.
<img src="https://github.com/adasegroup/ML2021_seminars/blob/master/seminar3/figures/5-fold-cv.png?raw=1">
```
from sklearn.model_selection import GridSearchCV
parameters_knn = {'n_neighbors': [5, 10, 15, 20]}
knn_cv = GridSearchCV(knn, param_grid = parameters_knn)
knn_cv.fit(training_X, training_y)
knn_cv.best_params_
predictions_knn_cv = knn_cv.predict(test_X)
probabilities_knn_cv = knn_cv.predict_proba(test_X)[:,1]
false_positive_rates_knn_cv, true_positive_rates_knn_cv, _ = roc_curve(test_y, probabilities_knn_cv)
roc_auc_knn_cv = auc(false_positive_rates_knn_cv, true_positive_rates_knn_cv)
precision_knn_cv, recall_knn_cv, _ = precision_recall_curve(test_y, probabilities_knn_cv)
pr_auc_knn_cv = average_precision_score(test_y, probabilities_knn_cv)
f1_knn_cv = f1_score(test_y, predictions_knn_cv)
print('ROC AUC: ', roc_auc_knn_cv)
print('PR AUC: ', pr_auc_knn_cv)
print('F1_score: ', f1_knn_cv)
pd.DataFrame(confusion_matrix(test_y, predictions_knn_cv))
```
# Exercise 2
Now we know how to perform cross-validation. Try it yourself with Decision Tree.
* Using **GridSearchCV** choose parameter **min_samples_leaf**. Try several values from 1 to 100.
* Use **five**-fold cross-validation and **roc_auc** scoring. See the chosen parameters.
* Evaluate quality metrics and look how they changed. Try to make some plots.
http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
HINT https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html
```
from sklearn.metrics import roc_auc_score, make_scorer
parameters_dt = {'min_samples_leaf' : [1, 2, 4, 8, 16, 32, 64, 128]}
clf = DecisionTreeClassifier()
dt_cv = GridSearchCV(clf, param_grid = parameters_dt, scoring=make_scorer(roc_auc_score), cv=5)
dt_cv.fit(training_X, training_y)
dt_cv.best_params_
dt_cv.best_score_
dt_cv.cv_results_
fig,ax=plt.subplots(1,1)
plt.plot(dt_cv.cv_results_['param_min_samples_leaf'].data, dt_cv.cv_results_['mean_test_score'], axes = ax)
ax.set_xlabel('min_samples_leaf')
ax.set_ylabel('ROC AUC')
```
# Multiclass classification
Let's have a look how multiclass tasks are treated.
```
# import some modules
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression
import zipfile
```
## Load data
We will use data from Kaggle contest *"Otto Group
Product Classification Challenge"*, which was created to predict class of an item by several features.
https://www.kaggle.com/c/otto-group-product-classification-challenge
Data are in ZIP, but we can load them easily
```
train_dataset = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2021_seminars/master/seminar3/otto/train.csv', index_col='id')
test_dataset = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2021_seminars/master/seminar3/otto/test.cutted.csv', index_col='id')
```
Data consist of the following:
* **id** -- anonymized identifier;
* **feat_1, ..., feat_93** -- features;
* **target** -- actual class of an item.
Number of objects for every class in **target**
```
train_dataset['target'].value_counts()
y = train_dataset["target"]
X = np.asarray(train_dataset.drop("target", axis = 1))
```
Let's see data description
```
train_dataset.describe().T
```
Divide data into input and output, transform labels from strings to numbers. **LabelEncoder** allows us to perform that transform nad obtain numbers from $0$ to $K-1$, where $K$ is the number of classes.
```
import xgboost
xgb = xgboost.XGBClassifier(objective='multi:softprob')
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
```
Split data into training sample and test sample
```
split = train_test_split(X, y, test_size=0.5,
random_state=42, stratify=y)
train_X, test_X, train_y, test_y = split
xgb.fit(train_X, train_y)
test_preds = xgb.predict(test_X)
accuracy_score(test_y, test_preds)
confusion_matrix(test_y, test_preds)
print(classification_report(test_y, test_preds))
```
| github_jupyter |
## R SWAT Modelagen
Github do SWAT: https://github.com/sassoftware/R-swat
Action sets: https://go.documentation.sas.com/?docsetId=allprodsactions&docsetTarget=actionSetsByName.htm&docsetVersion=3.5&locale=en
Documentacion: https://developer.sas.com/apis/swat/r/v1.3.0/R-swat.pdf
```
#install.packages('https://github.com/sassoftware/R-swat/releases/download/v1.5.0/R-swat-1.5.0-linux64.tar.gz',
# repos=NULL, type='file')
#install.packages('https://github.com/sassoftware/R-swat/releases/download/v1.5.0/R-swat-1.5.0-win64.tar.gz',
# repos=NULL, type='file')
# Load necessary packages
library('swat')
library('ggplot2')
library('reshape2')
#options(cas.print.messages = FALSE)
#.authinfo file
#default user <USER> password <PASSWORD>
getwd()
conn <- CAS('sasserver.com',
port=8777, protocol = "http",
caslib = 'casuser',
authinfo = './.authinfo')
#conn <- CAS('sasserver.com',
# port=8777, protocol = "http",
# caslib = 'casuser',
# username = 'username', password = 'password')
test <- cas.table.tableInfo(conn, caslib = 'casuser')
test$TableInfo
cas.table.caslibInfo(conn)
## Cargando los Actionsets en CAS
actionsets <- c('sampling', 'decisionTree', 'neuralNet', 'percentile')
for(i in actionsets){
loadActionSet(conn, i)
}
# Cargando los datos para CAS
castbl <- cas.read.csv(conn, './data/hmeq.csv')
cas.table.tableInfo(conn)
head(castbl)
class(castbl)
## no va a funcionar
summary(castbl)
table <- cas.simple.summary(castbl)
table
table$Summary[c(1:3),c('Min', 'Max')]
# Trayendo los datos para maquina local
df <- to.casDataFrame(castbl, obs = nrow(castbl))
head(df)
# Formatacion de datos
d <- melt(df[sapply(df, is.numeric)], id.vars=NULL)
head(d)
tail(d)
options(repr.plot.width=12, repr.plot.height=7)
ggplot(d, aes(x = value)) +
geom_histogram(fill = 'blue', bins = 25) +
facet_wrap(~variable,
scales = 'free_x')
cas.table.promote(conn, name = 'hmeq')
tableInfo <- cas.table.tableInfo(conn, caslib = "casuser")
# Mirando los datos faltantes de todas las variables
tbl <- cas.simple.distinct(castbl)
tbl$Distinct
tbl <- tbl$Distinct
tbl
# cogiendo los datos faltantes/missing
cas.nmiss(castbl)
# Visualizacion de missing
tbl$PctMiss <- tbl$NMiss/nrow(castbl)
ggplot(tbl, aes(Column, PctMiss)) +
geom_col(fill = 'blue') +
ggtitle('Pct Missing Values') +
theme(plot.title = element_text(hjust = 0.5))
colnames(castbl)[-1]
# Imputacion de datos missing
cas.dataPreprocess.impute(castbl,
methodContinuous = 'MEDIAN',
methodNominal = 'MODE',
inputs = colnames(castbl)[-1],
copyAllVars = TRUE,
casOut = list(name = 'hmeq',
replace = TRUE)
)
# Particionamento de daros
cas.sampling.srs(conn,
table = 'hmeq',
samppct = 30,
partind = TRUE,
output = list(casOut = list(name = 'hmeq', replace = TRUE),
copyVars = 'ALL')
)
hmeq1 <- defCasTable(conn, 'hmeq')
head(hmeq1)
indata <- 'hmeq'
# Cogiendo informacion de las variables
colinfo <- head(cas.table.columnInfo(conn, table = indata)$ColumnInfo, -1)
head(colinfo)
# Variable target
target <- colinfo$Column[1]
target
# Separacion para modelos que saben utilizar missing
inputs <- colinfo$Column[-1]
nominals <- c(target, subset(colinfo, Type == 'varchar')$Column)
# Separacaion para modelos que no saben utilizar missing
imp.inputs <- grep('IMP_', inputs, value = T)
imp.nominals <- c(target, grep('IMP_', nominals, value = T))
# Entrena Modelos
## Arbol de decision
cas.decisionTree.dtreeTrain(conn,
table = list(name = indata, where = '_PartInd_ = 0'),
target = target,
inputs = inputs,
nominals = nominals,
varImp = TRUE,
casOut = list(name = 'dt_model', replace = TRUE)
)
### Prevision en un unico modelo
cas.decisionTree.dtreeScore(
object = hmeq1,
modelTable = list(name = 'dt_model'),
copyVars = list(target, '_PartInd_'),
assessonerow = TRUE,
casOut = list(name = 'dt_scored', replace = T)
)
dt_scores <- defCasTable(conn, 'dt_scored')
head(dt_scores)
## Random Forest
cas.decisionTree.forestTrain(conn,
table = list(name = indata, where = '_PartInd_ = 0'),
target = target,
inputs = inputs,
nominals = nominals,
casOut = list(name = 'rf_model', replace = TRUE)
)
## Gradient Boosting
cas.decisionTree.gbtreeTrain(conn,
table = list(name = indata, where = '_PartInd_ = 0'),
target = target,
inputs = inputs,
nominals = nominals,
casOut = list(name = 'gbt_model', replace = TRUE)
)
## Neural Network
cas.neuralNet.annTrain(conn,
table = list(name = indata, where = '_PartInd_ = 0'),
target = target,
inputs = imp.inputs,
hidden = 7,
nominals = imp.nominals,
casOut = list(name = 'nn_model', replace = TRUE)
)
models <- c('dt','rf','gbt','nn')
scores <- c(cas.decisionTree.dtreeScore, cas.decisionTree.forestScore,
cas.decisionTree.gbtreeScore, cas.neuralNet.annScore)
names(scores) <- models
# Function para autimatizar el processo de predicion en nuevos datos
score.params <- function(model){return(
list(
object = defCasTable(conn, indata),
modelTable = list(name = paste0(model, '_model')),
copyVars = list(target, '_PartInd_'),
assessonerow = TRUE,
casOut = list(name = paste0(model, '_scored'), replace = T)
)
)
}
all_scores <- lapply(models,
function(x) {do.call(scores[[x]],
score.params(x))}
)
all_scores
# Carga el actionset para assessment
# loadActionSet(conn, 'percentile')
## assesment de um único modelo
asses <- cas.percentile.assess(conn,
table = list(name = paste0('dt_scored'),
where = '_PartInd_ = 1'),
inputs = paste0('_dt_P_ 1'),
response = target,
event = '1')
head(asses$LIFTInfo)
head(asses$ROCInfo)
# Funcion para comparacion de los modelos
assess.model <- function(model){
cas.percentile.assess(conn,
table = list(name = paste0(model,'_scored'),
where = '_PartInd_ = 1'),
inputs = paste0('_', model, '_P_ 1'),
response = target,
event = '1')
}
model.names <- c('Decision Tree', 'Random Forest',
'Gradient Boosting', 'Neural Network')
roc.df <- data.frame()
for (i in 1:length(models)){
tmp <- (assess.model(models[i]))$ROCInfo
tmp$Model <- model.names[i]
roc.df <- rbind(roc.df, tmp)
}
# Manipulacion del data.frame
compare <- subset(roc.df, round(roc.df$CutOff, 2) == 0.5)
rownames(compare) <- NULL
compare[,c('Model','TP','FP','FN','TN')]
# Creando dataframe para comparar missclassification
compare$Misclassification <- 1 - compare$ACC
miss <- compare[order(compare$Misclassification), c('Model','Misclassification')]
rownames(miss) <- NULL
miss
# Add nueva coluna para ser utilizada como label da curva Roc
roc.df$Models <- paste(roc.df$Model, round(roc.df$C, 3), sep = ' - ')
# Cria curva ROC
options(repr.plot.width=14, repr.plot.height=6)
plot <- ggplot(data = roc.df[c('FPR', 'Sensitivity', 'Models', "CutOff")],
aes(x = FPR, y = Sensitivity, colour = Models, cutoff = CutOff)) +
geom_line(size =1.2) +
labs(x = 'False Positive Rate', y = 'True Positive Rate')
plot
library('magrittr')
p <- plotly::ggplotly(plot) %>% plotly::layout(plot, hovermode = "x")
plotly::embed_notebook(p)
# Finalizar session
cas.session.endSession(conn)
```
| github_jupyter |
## Predicting synthesizability of arbitrary crystal structures and compositions
This notebook shows how to:
* Access material structures from the Materials Project (MP) using the Materials API (MAPI) or figshare.
* Use pre-trained models to predict synthesizability of materials from either 1) Materials Project ID; 2) crystal composition; or 3) crystal structure.
You will need a [Materials Project API key](https://materialsproject.org/open) to use the features shown in this notebook.
```
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
from datetime import datetime
from monty.serialization import loadfn, dumpfn
from pymatgen.ext.matproj import MPRester
from pymatgen.core import Structure, Lattice
from pumml.pupredict import PUPredict
```
### Accessing MP data
You can access all MP structures (as of 04-24-2020) directly from figshare: https://figshare.com/account/home#/collections/4952793.
However, the MP is constantly being updated and new structures are added. It is highly recommended that you use the MAPI to pull structure data that you are interested in. Get an API key for the Materials Project [here](https://materialsproject.org/open).
This code shows how to apply some criteria (e.g., ignore compounds with f-block elements), get MP IDs (which does not take much time), and then download structures in chunks (time-consuming).
```
# Treat materials with f-block electrons separately.
fblock = ['Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er',
'Tm', 'Yb', 'Lu', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk',
'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr']
criteria = {"elements": {"$nin": fblock}} # exclude fblock
# https://wiki.materialsproject.org/The_Materials_API
mpids = []
with MPRester() as m: # include api key as argument or configure with pmg command line
mp_ids = m.query(criteria, ["material_id"], chunk_size=0)
# Tag with date collected
today = datetime.today().strftime('%Y-%m-%d')
mp_ids = [mpid['material_id'] for mpid in mp_ids]
dumpfn(mp_ids, "mp_ids_%s.json" % (today))
mp_ids = loadfn('mp_ids_%s.json' %(today))
```
The sublists contain MP IDs in chunks of 1000.
```
chunk_size = 1000
sublists = [mp_ids[i:i+chunk_size] for i in range(0, len(mp_ids), chunk_size)]
# MPRester.supported_properties
properties = ['energy_per_atom', 'formation_energy_per_atom',
'e_above_hull', 'icsd_ids',
'material_id', 'structure']
data = []
# Get all materials from MP by mpid
with MPRester() as m: # use api_key arg or set up with pmg command line tool
for sublist in sublists:
data += m.query({"material_id":{"$in": sublist}}, properties=properties)
dumpfn(data, "mp_fblock_%s.json" % (today))
```
### Access a small sample dataset
We want to be responsible users of the MAPI, so to test out pumml models we can work with small MP datasets that are already downloaded.
You can ownload a small example dataset of 500 structures [here](https://figshare.com/articles/500_example_structures_from_Materials_Project/12252962).
```
data = loadfn('mp_example_dataset_042420.json') # json file must be in same directory as this notebook
```
Materials Project data is a really useful source for training models, but what if we are interested in the synthesizability of a particular theoretical compound? We have pre-trained PUMML models on large subsets of the Materials Project to enable quick predictions.
### Predict synthesizability of theoretical compounds from MP IDs
We can use the `pumml.pupredict.PUPredict` class to generate synthesizability scores directly for compounds from the Materials Project. Information related to the predictions and Materials Project API access is logged to a file called `output.log`. When you create an instance of `PUPredict`, the pre-trained models and data will be downloaded to your local machine.
```
api_key = '<api_key>' # fill this in with your key
pup = PUPredict(api_key)
print(pup.synth_score_from_mpid('mp-1213718')) # theoretical Cs2TbO3
print(pup.synth_score_from_mpid('mp-771359')) # theoretical Cu2O3
```
The outputs represent the synthesizability scores of the theoretical compounds.
### Predict synthesizability by chemical formula
We can also predict synthesizability for a crystal composition. If there are multiple crystal structures with the same composition, synthesizability scores for each will be predicted.
```
pup.synth_score_from_formula('Ba2Yb2Al4Si2N10O4')
pup.synth_score_from_formula('Na1Mg1')
```
### Predict synthesizability by crystal structure
Finally, we can predict synthesizability for a crystal structure represented as a `pymatgen.structure` object.
```
bcc_fe = Structure(Lattice.cubic(2.8), ["Fe", "Fe"], [[0, 0, 0], [0.5, 0.5, 0.5]])
pup.synth_score_from_structure(bcc_fe)
```
No synthesizability prediction is returned! If we check the log, we see that all MP entries for Fe already exist, so there's no need to predict synthesizability.
```
!tail output.log -n2
```
| github_jupyter |
# TextBlob
TextBlob is a Python (2 and 3) library for processing textual data.
It provides a simple API for diving into common Natural Language Processing (NLP) tasks such as part-of-speech tagging, noun phrase extraction, sentiment analysis, classification, translation, and more.
### Import TextBlob
```
from textblob import TextBlob
```
TextBlob is a python library and offers a simple API to access its methods and perform basic NLP tasks.
<hr>
A good thing about TextBlob is that they are just like python strings. So, you can transform and play with it same like we did in python.
```
string1 = TextBlob("Analytics")
string1[1:5] # Extracting 1 to 5 characters
string1.upper() # Convert text to upper case
string2 = TextBlob("Vidhya")
# concat
string1 + " " + string2
```
## Tokenization
Tokenization refers to dividing text or a sentence into a sequence of tokens, which roughly correspond to “words”.
This is one of the basic tasks of NLP.
To do this using TextBlob, follow the two steps:
1. Create a **textblob** object and pass a string with it.
2. Call **functions** of textblob in order to do a specific task.
```
blob = TextBlob("Analytics Vidhya is a great platform to learn data science. \n It helps community through blogs, hackathons, discussions,etc.")
# Tokenizing Sentences
blob.sentences
# Extracting only the first sentence
blob.sentences[0]
# Printing words of first sentence
for words in blob.sentences[0].words:
print(words)
# Printing all words
blob.words
```
## Noun Phrase Extraction
Since we extracted the words in the previous section, instead of that we can just extract out the noun phrases from the textblob.
Noun Phrase extraction is particularly important when you want to analyze the “who” in a sentence.
```
blob = TextBlob("Analytics Vidhya is a great platform to learn data science.")
for np in blob.noun_phrases:
print (np)
```
## Part-of-speech Tagging
Part-of-speech tagging or grammatical tagging is a method to mark words present in a text on the basis of its definition and context.
In simple words, it tells whether a word is a noun, or an adjective, or a verb, etc.
This is just a complete version of noun phrase extraction, where we want to find all the the parts of speech in a sentence.
```
for words, tag in blob.tags:
print(words, tag)
```
## Words Inflection and Lemmatization
*Inflection* is a process of word formation in which characters are added to the base form of a word to express grammatical meanings.
Word inflection in TextBlob is very simple, i.e., the words we tokenized from a textblob can be easily changed into singular or plural.
```
blob = TextBlob("Analytics Vidhya is a great platform to learn data science. \n It helps community through blogs, hackathons, discussions,etc.")
print(blob.sentences[1].words[1])
print(blob.sentences[1].words[1].singularize())
```
TextBlob library also offers an in-build object known as *Word*.
We just need to create a word object and then apply a function directly to it.
```
from textblob import Word
w = Word("Platform")
w.pluralize()
```
We can also use the tags to inflect a particular type of words
```
## Using Tags
for word, pos in blob.tags:
if pos == 'NN':
print(word.pluralize())
```
Words can be lemmatized using the *lemmatize* function.
```
## Lemmatization
w = Word("running")
w.lemmatize("v") #Here 'v' represents verb
```
## N-grams
A combination of multiple words together are called N-Grams.
N grams (N > 1) are generally more informative as compared to words, and can be used as features for language modelling.
N-grams can be easily accessed in TextBlob using the **ngrams** function, which returns a tuple of n successive words.
```
for ngram in blob.ngrams(2):
print(ngram)
```
## Sentiment Analysis
Sentiment analysis is basically the process of determining the attitude or the emotion of the writer, i.e., whether it is positive or negative or neutral.
The sentiment function of textblob returns two properties, **polarity**, and **subjectivity**.
Polarity is float which lies in the range of [-1,1] where 1 means positive statement and -1 means a negative statement.
Subjective sentences generally refer to personal opinion, emotion or judgment whereas objective refers to factual information.
Subjectivity is also a float which lies in the range of [0,1].
```
print(blob)
blob.sentiment
```
We can see that polarity is **0.8**, which means that the statement is positive and **0.75** subjectivity refers that mostly it is a public opinion and not a factual information.
## Spelling Correction
Spelling correction is a cool feature which TextBlob offers, we can be accessed using the **correct** function
```
blob = TextBlob('Analytics Vidhya is a gret platfrm to learn data scence')
blob.correct()
```
We can also check the list of suggested word and its confidence using the **spellcheck** function.
```
blob.words[4].spellcheck()
```
## Creating a short summary of a text
This is a simple trick in which we will be using the things we learned above.
```
import random
blob = TextBlob('Analytics Vidhya is a thriving community for data driven industry. This platform allows \
people to know more about analytics from its articles, Q&A forum, and learning paths. Also, we help \
professionals & amateurs to sharpen their skillsets by providing a platform to participate in Hackathons.')
nouns = list()
for word, tag in blob.tags:
if tag == 'NN':
nouns.append(word.lemmatize())
print('This text is about...')
for item in random.sample(nouns, 5):
word = Word(item)
print(word.pluralize())
```
What we did above that we extracted out a list of nouns from the text to give a general idea to the reader about the things the text is related to.
## Translation and Language Detection
```
blob = TextBlob('هذا بارد') # Arabic Text
blob.detect_language()
```
So, it is Arabic. Now, let’s find translate it into English so that we can know what is written using TextBlob.
```
# Translate to English
blob.translate(from_lang='ar', to='en')
```
Even if you don’t explicitly define the source language, TextBlob will automatically detect the language and translate into the desired language
```
blob.translate(to='en')
```
## Text Classification
Let’s build a simple text classification model using TextBlob. For this, first, we need to prepare a training and testing data.
```
training = [
('Tom Holland is a terrible spiderman.','pos'),
('a terrible Javert (Russell Crowe) ruined Les Miserables for me...','pos'),
('The Dark Knight Rises is the greatest superhero movie ever!','neg'),
('Fantastic Four should have never been made.','pos'),
('Wes Anderson is my favorite director!','neg'),
('Captain America 2 is pretty awesome.','neg'),
('Let\s pretend "Batman and Robin" never happened..','pos'),
]
testing = [
('Superman was never an interesting character.','pos'),
('Fantastic Mr Fox is an awesome film!','neg'),
('Dragonball Evolution is simply terrible!!','pos')
]
```
Textblob provides in-build classifiers module to create a custom classifier. So, let’s quickly import it and create a basic classifier.
```
from textblob import classifiers
# Naive Bayes Classifier
nb_classifier = classifiers.NaiveBayesClassifier(training)
```
As you can see above, we have passed the training data into the classifier.
Note that here we have used Naive Bayes classifier, but TextBlob also offers Decision tree classifier which is as shown below.
```
# Decision Tree Classifier
dt_classifier = classifiers.DecisionTreeClassifier(training)
```
Now, let’s check the accuracy of this classifier on the testing dataset and also TextBlob provides us to check the most informative features.
```
print(nb_classifier.accuracy(testing))
nb_classifier.show_informative_features(3)
```
As, we can see that if the text contains “is”, then there is a high probability that the statement will be negative.
In order to give a little more idea, let’s check our classifier on a random text.
```
blob = TextBlob('The weather is terrible!', classifier=nb_classifier)
print(blob.classify())
```
So, based on the training on the above dataset, our classifier has provided us the right result.
## Pros and Cons:
### Pros:
1. Since, it is built on the shoulders of NLTK and Pattern, therefore making it simple for beginners by providing an intuitive interface to NLTK.
2. It provides language translation and detection which is powered by Google Translate (not provided with spaCy)
### Cons:
1. It is little slower in the comparison to spaCy but faster than NLTK. (spaCy > TextBlob > NLTK)
2. It does not provide features like dependency parsing, word vectors etc. which is provided by spacy.
| github_jupyter |
# Part 3: Create a model to serve the item embedding data
This notebook is the third of five notebooks that guide you through running the [Real-time Item-to-item Recommendation with BigQuery ML Matrix Factorization and ScaNN](https://github.com/GoogleCloudPlatform/analytics-componentized-patterns/tree/master/retail/recommendation-system/bqml-scann) solution.
Use this notebook to wrap the item embeddings data in a Keras model that can act as an item-embedding lookup, then export the model as a SavedModel.
Before starting this notebook, you must run the [02_export_bqml_mf_embeddings](02_export_bqml_mf_embeddings.ipynb) notebook to process the item embeddings data and export it to Cloud Storage.
After completing this notebook, run the [04_build_embeddings_scann](04_build_embeddings_scann.ipynb) notebook to create an approximate nearest neighbor index for the item embeddings.
## Setup
Import the required libraries, configure the environment variables, and authenticate your GCP account.
```
!pip install -q -U pip
!pip install -q tensorflow==2.2.0
!pip install -q -U google-auth google-api-python-client google-api-core
```
### Import libraries
```
import os
import tensorflow as tf
import numpy as np
print(f'Tensorflow version: {tf.__version__}')
```
### Configure GCP environment settings
Update the following variables to reflect the values for your GCP environment:
+ `PROJECT_ID`: The ID of the Google Cloud project you are using to implement this solution.
+ `BUCKET`: The name of the Cloud Storage bucket you created to use with this solution. The `BUCKET` value should be just the bucket name, so `myBucket` rather than `gs://myBucket`.
```
PROJECT_ID = 'yourProject' # Change to your project.
BUCKET = 'yourBucketName' # Change to the bucket you created.
EMBEDDING_FILES_PATH = f'gs://{BUCKET}/bqml/item_embeddings/embeddings-*'
MODEL_OUTPUT_DIR = f'gs://{BUCKET}/bqml/embedding_lookup_model'
!gcloud config set project $PROJECT_ID
```
### Authenticate your GCP account
This is required if you run the notebook in Colab. If you use an AI Platform notebook, you should already be authenticated.
```
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
```
## Create the embedding lookup model
You use the `EmbeddingLookup` class to create the item embedding lookup model. The `EmbeddingLookup` class inherits from [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model), and is implemented in the
[lookup_creator.py](embeddings_lookup/lookup_creator.py)
module.
The `EmbeddingLookup `class works as follows:
1. Accepts the `embedding_files_prefix` variable in the class constructor. This variable points to the Cloud Storage location of the CSV files containing the item embedding data.
1. Reads and parses the item embedding CSV files.
1. Populates the `vocabulary` and `embeddings` class variables. `vocabulary` is an array of item IDs, while `embeddings` is a Numpy array with the shape (*number of embeddings*, *embedding dimensions*).
1. Appends the `oov_embedding` variable to the `embeddings` variable. The `oov_embedding` variable value is all zeros, and it represents the out of vocabulary (OOV) embedding vector. The `oov_embedding` variable is used when an invalid ("out of vocabulary", or OOV) item ID is submitted, in which case an embedding vector of zeros is returned.
1. Writes the `vocabulary` value to a file, one array element per line, so it can be used as a model asset by the SavedModel.
1. Uses `token_to_idx`, a `tf.lookup.StaticHashTable` object, to map the
item ID to the index of the embedding vector in the `embeddings` Numpy array.
1. Accepts a list of strings with the `__call__` method of the model. Each string represents the item ID(s) for which the embeddings are to be retrieved. If the input list contains _N_ strings, then _N_ embedding vectors are returned.
Note that each string in the input list may contain one or more space-separated item IDs. If multiple item IDs are present, the embedding vectors of these item IDs are retrieved and _combined_ (by averaging) into a single embedding vector. This makes it possible to fetch an embedding vector representing a set of items (like a playlist) rather than just a single item.
### Clear the model export directory
```
if tf.io.gfile.exists(MODEL_OUTPUT_DIR):
print("Removing {} contents...".format(MODEL_OUTPUT_DIR))
tf.io.gfile.rmtree(MODEL_OUTPUT_DIR)
```
### Create the model and export the SavedModel file
Call the `export_saved_model` method, which uses the `EmbeddingLookup` class to create the model and then exports the resulting SavedModel file:
```
from embeddings_lookup import lookup_creator
lookup_creator.export_saved_model(EMBEDDING_FILES_PATH, MODEL_OUTPUT_DIR)
```
Inspect the exported SavedModel using the `saved_model_cli` command line tool:
```
!saved_model_cli show --dir {MODEL_OUTPUT_DIR} --tag_set serve --signature_def serving_default
```
### Test the SavedModel file
Test the SavedModel by loading it and then calling it with input item IDs:
```
loaded_model = tf.saved_model.load(MODEL_OUTPUT_DIR)
input_items = ['2114406', '2114402 2120788', 'abc123']
output = loaded_model(input_items)
print(f'Embeddings retrieved: {output.shape}')
for idx, embedding in enumerate(output):
print(f'{input_items[idx]}: {embedding[:5]}')
```
The output shows the output embedding vector (the first five elements of each vector) for each input item. Note the following:
+ The second entry in the input list contains two item IDs, `2114402` and `2120788`. The returned vector is the average of the embeddings of these two items.
+ The third entry in the input list, `abc123`, is an invalid item ID, so the returned embedding vector contains zeros.
## License
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
**This is not an official Google product but sample code provided for an educational purpose**
| github_jupyter |
```
import pandas as pd
import sys
import os
import holoviews as hv
from IPython.core.display import display, HTML
from holoviews import opts, dim, Palette
################################ CONFIGURATION FILE HANDLING #################################
import configparser
config = configparser.ConfigParser()
try:
config.read('config_a.ini')
# Get values from configuration file
upper_acceptable_ping_bound = float(config['DEFAULT']['upper_acceptable_ping_bound'])
upper_ping_issue_bound = float(config['DEFAULT']['upper_ping_issue_bound'])
acceptable_network_speed = float(config['DEFAULT']['acceptable_network_speed'])
except:
# In case no config-file is found or another reading error occured
print("Configuration file not found/readable.")
print("Creating a new configuration file.")
# Creating new file with standard values
config['DEFAULT'] = {'upper_acceptable_ping_bound': '10',
'upper_ping_issue_bound': '99999',
'acceptable_network_speed': '16'}
with open('config_a.ini', 'w') as configfile:
config.write(configfile)
print("New configuration file was created. Running on default parameters, please restart for changes.")
##############################################################################################
hv.extension('bokeh')
opts.defaults(
opts.Bars(xrotation=45, tools=['hover']),
opts.BoxWhisker(width=700, xrotation=30, box_fill_color=Palette('Category20')),
opts.Curve(width=700, tools=['hover']),
opts.GridSpace(shared_yaxis=True),
opts.Scatter(width=700, height=500, color=Palette('Category20'), size=dim('growth')+5, tools=['hover'],alpha=0.5, cmap='Set1'),
opts.NdOverlay(legend_position='left'))
# Initializes the figures path in webpage for the diagram output
if os.path.isdir("./webpage/figures") == False:
os.mkdir("./webpage/figures")
print("Path 'figures' created successfully")
else:
print("Path 'figures' initialized")
def numOutlierCount(attribute):
q25=attribute.quantile(0.25)
q75=attribute.quantile(0.75)
iqr=q75-q25
NumberMax=attribute[attribute>q75+1.5*iqr].count()
NumberMin=attribute[attribute<q25-1.5*iqr].count()
NoOfOutliersAbsolute=NumberMax+NumberMin
NoOfOutliersPercentage=NoOfOutliersAbsolute/attribute.count()
print("Number Of Outliers Absolute: "+str(NoOfOutliersAbsolute))
print("Number of outliers percentage: "+str(NoOfOutliersPercentage))
return(NoOfOutliersAbsolute)
def inspect_outliers(df,attribute):
q25=attribute.quantile(0.25)
q75=attribute.quantile(0.75)
iqr=q75-q25
df_outliers_min=df[attribute<(q25-iqr*1.5)]
df_outliers_max = df[attribute>(q75+iqr*1.5)]
display(HTML("</br><h2>Min-Outliers</h2>"))
display(HTML(df_outliers_min.to_html()))
display(HTML("<hr></br><h2>Max-Outliers</h2>"))
display(HTML(df_outliers_max.to_html()))
# Data Import
try:
df_ping = pd.read_csv("Data/ping_test.csv",index_col=0)
df_speed_test = pd.read_csv("Data/speed_test.csv", index_col=0)
df_ping["date"] = pd.to_datetime(df_ping["date"],format="%d.%m.%Y %H:%M:%S")
df_speed_test["date"] = pd.to_datetime(df_speed_test["date"], format="%d.%m.%Y %H:%M:%S")
except:
print("Error while searching for files. Please perform network-test first.")
sys.exit(0)
```
# Basic Data Wrangling
```
print(df_ping.shape)
df_ping.head()
print(df_speed_test.shape)
df_speed_test.head()
df_ping_issues = df_ping[df_ping["max"]==upper_ping_issue_bound]
print("There are {} issues in the analysis of the ping.".format(df_ping_issues.shape[0]))
df_speed_test_issues = df_speed_test[df_speed_test["ping"]==upper_ping_issue_bound]
print("There are {} issues in the analysis of the network speed.".format(df_speed_test_issues.shape[0]))
# Filter issues from eg. sockets
df_ping = df_ping[df_ping["max"]!=upper_ping_issue_bound]
df_speed_test = df_speed_test[df_speed_test["ping"]!=upper_ping_issue_bound]
df_speed_test["ping"].max()
print(
"The maximal Ping time has been {} ms.\nThe minimal ping time has been {} ms. \nThe mean ping time has been {} ms. "
.format(df_ping["max"].max(), df_ping["min"].min(), df_ping["avg"].mean()))
```
# Ping Times in ms with extreme outlieres
```
fig_all_max_ping = hv.Curve((df_ping["date"], df_ping["max"]),
"Date",
"Ping in ms",
label="All messured pings")
fig_dot_over_upper_bound = hv.Scatter(
(df_ping["date"][df_ping["max"] > upper_acceptable_ping_bound],
df_ping["max"][df_ping["max"] > upper_acceptable_ping_bound]),
"Date",
"Max_Ping_Time",
label="Highlight pings over {} ms".format(
str(upper_acceptable_ping_bound))).opts(opts.Scatter(color="red", size=10))
fig_ping_times_with_extreme_outliers = (fig_all_max_ping *
fig_dot_over_upper_bound).opts(
legend_position="top_left",
title="All Max. Ping Times in ms",padding=0.05)
#Safe newly generated plot
hv.save(fig_ping_times_with_extreme_outliers,
os.path.join("webpage", "figures",
"fig_ping_times_with_extreme_outliers.html"),
backend='bokeh')
fig_ping_times_with_extreme_outliers
inspect_outliers(df_ping,df_ping["max"])
```
# Ping Times in ms without extreme outlieres
```
fig_ping_without_extreme_outliers = hv.Curve(
(df_ping["date"][df_ping["max"]<1000],
df_ping["max"][df_ping["max"]<1000]),"Date","Ping in ms",label="All ping times less then 1000 ms")
fig_ping_highlight_max = hv.Scatter(
(df_ping["date"][df_ping["max"] > upper_acceptable_ping_bound][df_ping["max"]<1000],
df_ping["max"][df_ping["max"] > upper_acceptable_ping_bound][df_ping["max"]<1000]),
"Date",
"Max_Ping_Time",
label = "Highlight pings over {} ms".format(str(upper_acceptable_ping_bound))
).opts(color="red", size=10)
fig_ping_times_without_extreme_outliers = (fig_ping_without_extreme_outliers*fig_ping_highlight_max).opts(title="All Max. Ping Times in ms without extreme outlieres",
legend_position="top_left",
padding = 0.05)
#Safe newly generated plot
hv.save(fig_ping_times_without_extreme_outliers, os.path.join("webpage","figures","fig_ping_times_without_extreme_outliers.html") , backend='bokeh')
fig_ping_times_without_extreme_outliers
# The latency bound under which network speedtest is performing is defined in the network_test.py
pingbound_network_test = df_speed_test["ping"].min()
fig_network_speed_below_pingbound = hv.Curve(
(df_speed_test["date"], df_speed_test["downstream"]/1000),
"Date",
"Network Speed",
label="Messured downlink speed when ping below {} ms".format(
str(pingbound_network_test)))
fig_highlight_below_acceptable_network_speed = hv.Scatter(
(df_speed_test["date"][
df_speed_test["downstream"]/1000 < acceptable_network_speed],
df_speed_test["downstream"][
df_speed_test["downstream"]/1000 < acceptable_network_speed]/1000),
"Date",
"Network Speed",
label="Highlight downstream speed below {} mbit/s".format(
str(acceptable_network_speed))).opts(color="red", size=10)
fig_horizontal_marker = hv.HLine(
acceptable_network_speed,
label="Acceptable network speed at {} mbit/s".format(
str(acceptable_network_speed))).opts(color="black")
fig_upstream_below_ping_bound = hv.Curve(
(df_speed_test["date"], df_speed_test["upstream"]/1000),
"Date",
"Network Speed",
label="Messured uplink when ping below {} ms".format(
str(pingbound_network_test))).opts(color="purple")
fig_network_speeds_under_upper_bound = (
fig_network_speed_below_pingbound *
fig_highlight_below_acceptable_network_speed * fig_upstream_below_ping_bound* fig_horizontal_marker
).opts(
title="Network Speed when Ping below {} ms".format(pingbound_network_test),
legend_position="top_left",
padding=0.05)
#Safe newly generated plot
hv.save(fig_network_speeds_under_upper_bound,
os.path.join("webpage", "figures",
"fig_network_speeds_under_upper_bound.html"),
backend='bokeh')
fig_network_speeds_under_upper_bound
df_ping["qcut"] = pd.qcut(df_ping["avg"],10)
df_ping.groupby("qcut").agg({"avg":["count"]}).reset_index()
max_groups = df_ping[df_ping["max"]>10].groupby([df_ping["date"].dt.hour,df_ping["date"].dt.minute]).agg({"max":["count"]})
max_groups[max_groups["max"]["count"]>2]
error_groups = df_ping_issues.groupby([df_ping_issues["date"].dt.hour, df_ping_issues["date"].dt.minute]).agg({"max":["count"]})
error_groups[error_groups["max"]["count"]>1]
```
| github_jupyter |
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
<i>Licensed under the MIT License.</i>
# Estimating Baseline Performance
<br>
Estimating baseline performance is as important as choosing right metrics for model evaluation. In this notebook, we briefly discuss about why do we care about baseline performance and how to measure it.
The notebook covers two example scenarios under the context of movie recommendation: 1) rating prediction and 2) top-k recommendation.
### Why does baseline performance matter?
<br>
Before we go deep dive into baseline performance estimation, it is worth to think about why we need that.
As we can simply see from the definition of the word 'baseline', <b>baseline performance</b> is a minimum performance we expect to achieve by a model or starting point used for model comparisons.
Once we train a model and get results from evaluation metrics we choose, we will wonder how should we interpret the metrics or even wonder if the trained model is better than a simple rule-based model. Baseline results help us to understand those.
Let's say we are building a food recommender. We evaluated the model on the test set and got nDCG (at 10) = 0.3. At that moment, we would not know if the model is good or bad. But once we find out that a simple rule of <i>'recommending top-10 most popular foods to all users'</i> can achieve nDCG = 0.4, we see that our model is not good enough. Maybe the model is not trained well, or maybe we should think about if nDCG is the right metric for prediction of user behaviors in the given problem.
### How can we estimate the baseline performance?
<br>
To estimate the baseline performance, we first pick a baseline model and evaluate it by using the same evaluation metrics we will use for our main model. In general, a very simple rule or even <b>zero rule</b>--<i>predicts the mean for regression or the mode for classification</i>--will be a enough as a baseline model (Random-prediction might be okay for certain problems, but usually it performs poor than the zero rule). If we already have a running model in hand and now trying to improve that, we can use the previous results as a baseline performance for sure.
Most importantly, <b>different baseline approaches should be taken for different problems and business goals</b>. For example, recommending the previously purchased items could be used as a baseline model for food or restaurant recommendation since people tend to eat the same foods repeatedly. For TV show and/or movie recommendation, on the other hand, recommending previously watched items does not make sense. Probably recommending the most popular (most watched or highly rated) items is more likely useful as a baseline.
In this notebook, we demonstrate how to estimate the baseline performance for the movie recommendation with MovieLens dataset. We use the mean for rating prediction, i.e. our baseline model will predict a user's rating of a movie by averaging the ratings the user previously submitted for other movies. For the top-k recommendation problem, we use top-k most-rated movies as the baseline model. We choose the number of ratings here because we regard the binary signal of 'rated vs. not-rated' as user's implicit preference when evaluating ranking metrics.
Now, let's jump into the implementation!
```
import sys
sys.path.append("../../")
import itertools
import pandas as pd
from reco_utils.common.notebook_utils import is_jupyter
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_random_split
from reco_utils.dataset.pandas_df_utils import filter_by
from reco_utils.evaluation.python_evaluation import (
rmse, mae, rsquared, exp_var,
map_at_k, ndcg_at_k, precision_at_k, recall_at_k
)
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
```
First, let's prepare training and test data sets.
```
MOVIELENS_DATA_SIZE = '100k'
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=['UserId', 'MovieId', 'Rating', 'Timestamp']
)
data.head()
train, test = python_random_split(data, ratio=0.75, seed=42)
```
### 1. Rating prediction baseline
As we discussed earlier, we use each user's **mean rating** as the baseline prediction.
```
# Calculate avg ratings from the training set
users_ratings = train.groupby(['UserId'])['Rating'].mean()
users_ratings = users_ratings.to_frame().reset_index()
users_ratings.rename(columns = {'Rating': 'AvgRating'}, inplace = True)
users_ratings.head()
# Generate prediction for the test set
baseline_predictions = pd.merge(test, users_ratings, on=['UserId'], how='inner')
baseline_predictions.loc[baseline_predictions['UserId'] == 1].head()
```
Now, let's evaluate how our baseline model will perform on regression metrics
```
baseline_predictions = baseline_predictions[['UserId', 'MovieId', 'AvgRating']]
cols = {
'col_user': 'UserId',
'col_item': 'MovieId',
'col_rating': 'Rating',
'col_prediction': 'AvgRating',
}
eval_rmse = rmse(test, baseline_predictions, **cols)
eval_mae = mae(test, baseline_predictions, **cols)
eval_rsquared = rsquared(test, baseline_predictions, **cols)
eval_exp_var = exp_var(test, baseline_predictions, **cols)
print("RMSE:\t\t%f" % eval_rmse,
"MAE:\t\t%f" % eval_mae,
"rsquared:\t%f" % eval_rsquared,
"exp var:\t%f" % eval_exp_var, sep='\n')
```
As you can see, our baseline model actually performed quite well on the metrics. E.g. MAE (Mean Absolute Error) was around 0.84 on MovieLens 100k data, saying that users actual ratings would be within +-0.84 of their mean ratings. This also gives us an insight that users' rating could be biased where some users tend to give high ratings for all movies while others give low ratings.
Now, next time we build our machine-learning model, we will want to make the model performs better than this baseline.
### 2. Top-k recommendation baseline
Recommending the **most popular items** is intuitive and simple approach that works for many of recommendation scenarios. Here, we use top-k most-rated movies as the baseline model as we discussed earlier.
```
item_counts = train['MovieId'].value_counts().to_frame().reset_index()
item_counts.columns = ['MovieId', 'Count']
item_counts.head()
user_item_col = ['UserId', 'MovieId']
# Cross join users and items
test_users = test['UserId'].unique()
user_item_list = list(itertools.product(test_users, item_counts['MovieId']))
users_items = pd.DataFrame(user_item_list, columns=user_item_col)
print("Number of user-item pairs:", len(users_items))
# Remove seen items (items in the train set) as we will not recommend those again to the users
users_items_remove_seen = filter_by(users_items, train, user_item_col)
print("After remove seen items:", len(users_items_remove_seen))
# Generate recommendations
baseline_recommendations = pd.merge(item_counts, users_items_remove_seen, on=['MovieId'], how='inner')
baseline_recommendations.head()
k = 10
cols['col_prediction'] = 'Count'
eval_map = map_at_k(test, baseline_recommendations, k=k, **cols)
eval_ndcg = ndcg_at_k(test, baseline_recommendations, k=k, **cols)
eval_precision = precision_at_k(test, baseline_recommendations, k=k, **cols)
eval_recall = recall_at_k(test, baseline_recommendations, k=k, **cols)
print("MAP:\t%f" % eval_map,
"NDCG@K:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
```
Again, the baseline is quite high, nDCG = 0.25 and Precision = 0.22.
<br>
### Concluding remarks
In this notebook, we discussed how to measure baseline performance for the movie recommendation example.
We covered very naive approaches as baselines, but still they are useful in a sense that they can provide reference numbers to estimate the complexity of the given problem as well as the relative performance of the recommender models we are building.
```
if is_jupyter():
# Record results with papermill for unit-tests
import papermill as pm
import scrapbook as sb
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("rmse", eval_rmse)
sb.glue("mae", eval_mae)
sb.glue("exp_var", eval_exp_var)
sb.glue("rsquared", eval_rsquared)
```
### References
[[1](https://dl.acm.org/citation.cfm?id=1401944)] Yehuda Koren, Factorization meets the neighborhood: a multifaceted collaborative filtering model, KDD '08 pp. 426-434 2008.
[[2](https://surprise.readthedocs.io/en/stable/basic_algorithms.html)] Surprise lib, Basic algorithms
| github_jupyter |
SOP062 - Install ipython-sql and pyodbc modules
===============================================
Steps
-----
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {}
error_hints = {}
install_hint = {}
first_run = True
rules = None
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""
Run shell command, stream stdout, print stderr and optionally return output
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
line_decoded = line.decode()
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
if rules is not None:
apply_expert_rules(line_decoded)
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
try:
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
j = load_json("sop062-install-ipython-sql-module.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
global rules
for rule in rules:
# rules that have 9 elements are the injected (output) rules (the ones we want). Rules
# with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,
# not ../repair/tsg029-nb-name.ipynb)
if len(rule) == 9:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
# print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
# print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'python': []}
error_hints = {'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP008 - Backup HDFS files to Azure Data Lake Store Gen2 with distcp', '../common/sop008-distcp-backup-to-adl-gen2.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb']]}
install_hint = {'python': []}
```
### Pip install the ipython-sql module
```
run(f'python -m pip install ipython-sql')
```
### Pip install the pyodbc module
```
run(f'python -m pip install pyodbc')
```
### Pip list installed modules
```
run(f'python -m pip list')
print('Notebook execution complete.')
```
| github_jupyter |
```
from __future__ import print_function
import sisl
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
First TranSiesta bias example.
In this example we will take the system from [TS 1](../TS_01/run.ipynb) and perform bias calculations on it. Note, however, that applying a bias to a *pristine* bulk system is non-physical and should thus **NEVER** be done. TranSiesta will *not* warn you about this and will happily calculate the non-equilibrium density for any bulk system with an applied bias. This is an ***extremely*** important point and once complete with this example you should carefully think through why this is the case.
Bias calculations are very heavy because of a full DFT+NEGF calculation *per bias-point*.
We will begin with creating the structures.
```
graphene = sisl.geom.graphene(1.44, orthogonal=True)
elec = graphene.tile(2, axis=0)
elec.write('STRUCT_ELEC.fdf')
device = elec.tile(3, axis=0)
device.write('STRUCT_DEVICE.fdf')
```
## Exercises
In this exercise you will be familiarized with the input options that define a bias calculation. The input options are *extremely elaborate*, yet they require little intervention when using default parameters.
As this is your first example of running TranSiesta with applied bias there are a few things you should know:
1. Do not start by performing any $V\neq0$ calculations until you are perfectly sure that your $V=0$ calculation is well converged and well behaved, i.e. a small `dQ` (see [TS 1](../TS_01/run.ipynb)).
2. When performing bias calculations your are recommended to create a new directory for each bias: `TS_<V>`.
3. *Any* bias calculation should be a restart by using the ***closests*** bias calculations TranSiesta density matrix. This can be ensured by copying the `siesta.TSDE` file from the ***closests*** bias calculation to the current simulation directory. I.e.
- First run $V=0$ in `TS_0`, ensure convergence etc.
- Second run $V=0.25\,\mathrm{eV}$ in `TS_0.25`, copy `TS_0/siesta.TSDE` to `TS_0.25/` and start run.
- Third run $V=0.5\,\mathrm{eV}$ in `TS_0.5`, copy `TS_0.25/siesta.TSDE` to `TS_0.5/` and start run.
- etc.
- $N$th run $V=-0.25\,\mathrm{eV}$ in `TS_-0.25`, copy `TS_0/siesta.TSDE` to `TS_-0.25/` and start run (note negative bias' can be performed in parallel to positive bias)
4. All the commands required for this example can be executed like this:
```
siesta --electrode RUN_ELEC.fdf > ELEC.out
cd TS_0
cp ../C.psf .
siesta ../RUN.fdf > TS.out
# Check that the charge is converged etc.
cp siesta.TSDE ../TS_0.5/
cd ../TS_0.5
cp ../C.psf .
siesta -V 0.5:eV ../RUN.fdf > TS.out
# Check that it has converged...
cp siesta.TSDE ../TS_1.0/
cd ../TS_1.0
cp ../C.psf .
siesta -V 1:eV ../RUN.fdf > TS.out
# Check that it has converged...
```
After every calculation go through the output to ensure everything is well behaved. Note that the output of a bias calculation is different from a non-bias calculation, they are more detailed.
5. An additional analysis (before going to the transport calculations) is to calculate the potential drop in the junction. In sisl this is easy:
```
v0 = sisl.Grid.read('TS_0/ElectrostaticPotential.grid.nc')
vd = (sisl.Grid.read('TS_0.5/ElectrostaticPotential.grid.nc') - v0)
```
`vd` then contains the potential profile (in eV). To save it as a linear average bias file (remember transport is along first lattice vector) you can execute the following:
```
vd = vd.average(1).average(2)
dv = (vd.dcell[0, :] ** 2).sum() ** .5
sisl.io.tableSile('potential_0.5.dat', 'w').write_data(dv * np.arange(vd.shape[0]), vd.grid[:, 0, 0])
```
This completes all non-equilibrium calculations for this example. However, we have only calculated the non-equilibrium density and thus, the non-equilibrium Hamiltonian. We still need to calculate the transport properties for all bias'. Basically we can only calculate the transport properties at the calculated bias values, but generally we are interested in a full $I(V)$ curve.
As a user, one has three options:
1. Calculate $I(V)$ for the calculated biases $V$ and perform an interpolation of $I(V)$, or
2. Interpolate the Hamiltonian to calculate $I(V)$ for all the required biases, or
3. Calculate all non-equilibrium Hamiltonians.
The first option is by far the fastests and easiest with a sometimes poor accuracy; the second option is relatively fast, and drastically improves the accuracy; while the last option is the most accurate but may sometimes be non-feasible due to insufficient computational resources.
In the following we will calculate all transmissions using option 2. Look in the manual for the options regarding the interpolation (there are two interpolation methods).
Go through `RUN.fdf` and find the respective block that tells TBtrans to interpolate the Hamiltonian, also notice how the energy-grid is defined in TBtrans. You will notice that this is the fastest way to calculate the $I(V)$ curve for *any* bias, it however, will not calculate any physical quantities outside the bias window.
Now complete the exercise by running TBtrans for $V\in\{0, 0.1, \dots, 1\}$ eV. Note that instead of changing the applied bias in the fdf-file, one can do:
tbtrans -V 0.4:eV RUN.fdf
to apply $V=0.4\,\mathrm{eV}$, *any* fdf-flag specified on the command line has precedence! The `:` is to denote an effective space, otherwise you will have to encapsulate in quotation marks `tbtrans -V "0.4 eV" RUN.fdf`.
If you do not want to run the commands manually, you may use this loop command:
```
for V in $(seq 0 0.1 1) ; do
d=TBT_${V//,/.}
mkdir $d
cd $d
tbtrans -V "${V//,/.}:eV" ../RUN.fdf > TBT.out
cd ../
done
```
**TIME**: A remark on this exercise. Think why applying a bias to a bulk system is wrong. If you can't immediately figure this out, try and create a longer system by replacing `device = elec.tile(3, axis=0)` with, say: `device = elec.tile(6, axis=0)` and redo the calculation for a given bias. Then compare the potential profiles.
### Plot the transmissions
Calculate the current for all $V$, then plot it.
```
V = np.arange(0, 1.05, 0.1)
I = np.empty([len(V)])
for i, v in enumerate(V):
I[i] = sisl.get_sile('TBT_{:.1f}/siesta.TBT.nc'.format(v)).current()
plt.plot(V, I * 1e6);
plt.xlabel('Bias [V]'); plt.ylabel(r'Current [$\mu\mathrm{A}$]');
```
Why is the current $0$ for $V<0.8$?
*Hint*: see [TB 1](../TB_01/run.ipynb).
| github_jupyter |
```
import pandas as pd
import numpy as np
# Read in feature sets and corresponding outputs
X = pd.read_csv("all_features_QF_SLIA.csv")
y = pd.read_csv("times_QF_SLIA_ALL.csv")
from scipy.stats import zscore
# Normalize features to zero mean and unit variance
X = X.apply(zscore)
# Convert output values to 0 for 30-60s, 1 for 1-12m, 2 for timeout
y = y.values
y = pd.DataFrame(np.where(y == 900, 2, np.where(y > 60, 1, np.where(y >= 30, 0, -1))))
# Verifies that there were no values in the data that were outside the expect range
assert(not -1 in y.values)
from sklearn.model_selection import train_test_split
# Split datasets into
# training (60%)
# validation (20%)
# testing (20%)
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size=0.25, random_state=1)
# Combine output and features
train = pd.concat([y_train, X_train], axis=1)
val = pd.concat([y_val, X_val], axis=1)
test = pd.concat([y_test, X_test], axis=1)
train.to_csv('train.csv', index=False, header=False)
val.to_csv('validation.csv', index=False, header=False)
test.to_csv('test.csv', index=False, header=False)
import sagemaker, boto3, os
bucket = sagemaker.Session().default_bucket()
prefix = "smt-qfslia-cvc4-runtime"
# Upload datasets to S3
boto3.Session().resource('s3').Bucket(bucket).Object(
os.path.join(prefix, 'data/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(
os.path.join(prefix, 'data/validation.csv')).upload_file('validation.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(
os.path.join(prefix, 'data/test.csv')).upload_file('test.csv')
region = sagemaker.Session().boto_region_name
role = sagemaker.get_execution_role()
from sagemaker.debugger import Rule, rule_configs
from sagemaker.session import TrainingInput
# Configure model
s3_output_location='s3://{}/{}/{}'.format(bucket, prefix, 'xgboost_model')
container=sagemaker.image_uris.retrieve("xgboost", region, "1.2-1")
print(container)
xgb_model=sagemaker.estimator.Estimator(
image_uri=container,
role=role,
instance_count=1,
instance_type='ml.m4.xlarge',
volume_size=5,
output_path=s3_output_location,
sagemaker_session=sagemaker.Session(),
rules=[Rule.sagemaker(rule_configs.create_xgboost_report())]
)
xgb_model.set_hyperparameters(
objective = 'multi:softprob',
num_class = 3,
max_depth = 6,
eta = 0.3,
num_round = 100,
alpha = 0.8,
subsample = 0.5,
colsample_bytree = 0.5
)
from sagemaker.session import TrainingInput
train_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "data/train.csv"), content_type="csv"
)
validation_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "data/validation.csv"), content_type="csv"
)
# Run the training job to fit the model
xgb_model.fit({"train": train_input, "validation": validation_input}, wait=True)
# Get the auto-generated analytics
rule_output_path = xgb_model.output_path + "/" + xgb_model.latest_training_job.name + "/rule-output"
! aws s3 ls {rule_output_path} --recursive
# Download the auto-generated analytics
! aws s3 cp {rule_output_path} ./ --recursive
# When done training/tuning the model, deploy an endpoint to SageMaker
import sagemaker
from sagemaker.serializers import CSVSerializer
xgb_predictor=xgb_model.deploy(
initial_instance_count=1,
instance_type='ml.t2.medium',
serializer=CSVSerializer()
)
import numpy as np
# This function calls the endpoint to get predictions
# from the model and processes the returned data
def predict_multi_class(data, num_class, rows=1000):
assert(num_class >= 2)
num_examples = data.shape[0]
split_array = np.array_split(data, int(num_examples / float(rows) + 1))
predictions = ''
for array in split_array:
predictions = ','.join([predictions, xgb_predictor.predict(array).decode('utf-8')])
# For binary classifiers, predict() returns a single float:
# the probability of a positive outcome
# formally, this means the model returns 1
if num_class == 2:
return np.fromstring(predictions[1:], sep=',')
# Convert string version of 2D array to Python list of strings
pred_list = predictions[1:].replace('[','').replace(']','').strip().split(',')
try:
assert(len(pred_list) == num_examples * num_class)
except AssertionError:
print("Something went wrong. Verify that the value of num_class is correct.")
exit()
# Convert Python list to Numpy array of floats, and reshape to 2D
return np.array(pred_list, dtype=float).reshape([num_examples,num_class])
import sklearn
# Output the accuracy of the model on the test set
log_predictions = predict_multi_class(test.to_numpy()[:,1:], 3)
predictions = np.argmax(log_predictions, axis=1)
sklearn.metrics.accuracy_score(test.iloc[:,0], predictions)
# Output the confusion matrix for the test set
cm = sklearn.metrics.confusion_matrix(test.iloc[:,0], predictions)
cm
# Computing feature means and stdevs for Inference Script
X_orig = pd.read_csv("all_features_QF_SLIA.csv")
np.set_printoptions(suppress=True) # Suppresses scientific notation
with open("feature_means.dat", 'w') as fp:
fp.write(str(X_orig.apply(np.mean).to_numpy()))
np.set_printoptions(suppress=True) # Suppresses scientific notation
with open("feature_stdevs.dat", 'w') as fp:
fp.write(str(X_orig.apply(np.std).to_numpy()))
```
| github_jupyter |
**Authors:** Peter Štrauch, Jozef Hanč, Martina Hančová <br>
**R consultant:** Andrej Gajdoš <br>
[Faculty of Science](https://www.upjs.sk/en/faculty-of-science/?prefferedLang=EN) *P. J. Šafárik University in Košice, Slovakia* <br>
email: [jozef.hanc@upjs.sk](mailto:jozef.hanc@upjs.sk)
***
**<font size=6 color=brown> Research study III: In-service teachers</font>**
**<font size=4> R Shiny $-$ UEQ (User Experience Questionary) evaluation and benchmark plot</font>**
<font size=4> Computational tool: </font> **<font size=4> R, CRAN libraries, own R functions </font>**
# Data and tools
## R libraries and functions
```
# use the following commands to install libraries, if it is needed
# packages = c('readxl', psychometric', 'repr', 'scale', 'Hmisc')
# install.packages(packages)
## load CRAN libraries
library(readxl) # read excel
library(psychometric) # measures - cronbach alpha
library(repr) # set up figures
require(scales) # transparent color
library(Hmisc) # weighted sd, var
# own UEQ functions
source('UEQ_functions.R')
```
## UEQ characteristics
```
## UEQ items
print(item_names())
## dimensions with items
dimensions()
## borders for levels in each dimension
benchmark_tab_borders()
```
## Data preprocesing
```
# load full results as dataframe
data_shiny <- as.data.frame(read_excel('../data/03_In-service_teachers_UEQ-weighted.xlsx'))
# types of data - structure
str(data_shiny, list.len = 5)
# select only UEQ data
data <- data_shiny[,4:29] # 1st column is timestamp, 2nd column is ID of teacher, 3rd column are weights
weights <- data_shiny$weight
## view data
head(weights,5)
head(data,5)
```
## Data wrangling for UEQ benchmark
```
## rescale data
DT <- rescale_data(data = data)
DT
```
# Analysis
## Consistency, inconsistency
```
## reliability
reliability(DT, spec = "whole")
reliability(DT, coef = "lambda")
## check data for inconstencies
inconsistencies(rescaled_data = DT, spec = "text")
## which responces are suggested to be deleted
remove_rows <- inconsistencies(rescaled_data = DT, spec = "remove")
remove_rows
## if we want we can remove suspicious responces - just delete "#" sign in the row below
#DT <- DT[-remove_rows,]; DT
#weights <- weights[-remove_rows]; weights
```
## Analysis of items
```
## mean values per item
item_mean(DT)
## plot of item means
options(repr.plot.width=8, repr.plot.height=6)
plot_items(rescaled_data = DT)
```
## Analysis of responses
```
## means per person
tab <- means_person(rescaled_data = DT)
tab
## mean, standard deviaton and variance for each dimension
dim_means <- dimensions_mean(data = tab, weights = weights)
dim_means
dimensions_deviation(data = tab, weights = weights)
dimensions_sderror(data=tab, weights = weights)
dimensions_variance(data = tab, weights = weights)
## means for grouped dimensions
grouped_dimensions_mean(tab, weights = weights)
```
## Vizualization and interpretation
```
## plot by dimensions
options(repr.plot.width=8, repr.plot.height=5)
plot_dimensions(data = tab, weights = weights)
## plot by grouped dimensions
options(repr.plot.width=7, repr.plot.height=5)
plot_grouped_dimensions(tab, weights = weights)
## plot with benchmarks
options(repr.plot.width=10, repr.plot.height=6)
plot_benchmarks(tab, weights = weights)
## interpretation of results
interpretation(dim_means)
```
## Weighted vs non-weighted
```
## duplicate data - with and without weights
data_merged <- merge_data(data_1 = data[,1:26], data_2 = data[,1:26], label_1 = "weighted", label_2 = "non-weighted")
weights_merged <- c(weights, rep(1, nrow(data)))
weights_merged
data_merged
## rescale data
DT_merged <- rescale_data(data = data_merged)
## calculate means for each dimension
tab_merged <- means_person(rescaled_data = DT_merged, grouping = TRUE)
dimensions_mean(data = tab_merged, grouping = TRUE, weights = weights_merged)
dimensions_deviation(data = tab_merged, grouping = TRUE, weights = weights_merged)
dimensions_sderror(data = tab_merged, grouping = TRUE, weights = weights_merged)
## plot with benchmarks
options(repr.plot.width=10, repr.plot.height=6)
plot_benchmarks(tab_merged, grouping = TRUE, weights = weights_merged)
## plot with benchmarks
options(repr.plot.width=10, repr.plot.height=6)
plot_benchmarks(tab_merged, grouping = TRUE, weights = weights_merged, ylim = c(1,1.6) )
```
| github_jupyter |
# Rotor Estimation using the Tensor Representation of Geometric Algebra
```
from __future__ import print_function
import sys
sys.path.append('../build/')
%pylab inline
np.set_printoptions(precision=2, suppress=True,threshold=np.inf)
import versor as vsr
```
## Dataset generation
```
r = vsr.Rot(vsr.Biv(0,1,0) * np.pi/6.0)
R = np.zeros((3,3))
for i in range(3):
for j in range(3):
a = vsr.Vec(0,0,0)
b = vsr.Vec(0,0,0)
a[j] = 1
b[i] = 1
R[i,j] = b <= a.spin(r)
R
vsr.Vec(1,0,0).spin(r)
vsr.Vec(0,1,0).spin(r)
vsr.Vec(0,0,1).spin(r)
n_points = 10
sigma = 0.09
points_a = [vsr.Vec(*np.random.normal(0.0, 0.8, 3))
for i in range(n_points)]
points_b = [point.spin(rotor) for point in points_a]
points_b_noisy = [vsr.Vec(*(np.array(point)[:3]
+ sigma * np.random.randn(3)))
for point in points_b]
rotor = vsr.Biv(0,-pi/8,0).exp()
print(rotor)
n_points = 3
sigma = 0.09
points_a = [vsr.Vec(*np.random.normal(0.0, 0.8, 3))
for i in range(n_points)]
points_b = [point.spin(rotor) for point in points_a]
points_b_noisy = [vsr.Vec(*(np.array(point)[:3]
+ sigma * np.random.randn(3)))
for point in points_b]
ega_a = [vsr.EGA(p) for p in points_a]
ega_b = [vsr.EGA(p) for p in points_b]
M = np.array([[1,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0],
[0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,1,0]])
print(M)
def matrix(a, b):
m = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(0,0,0,0,0,0,0,0)
ei[i] = i
m[i,:] = np.array(ei * a - b * ei)
return m
# m = np.row_stack([
# np.delete(np.delete(matrix(a,b),[0,4,5,6],0), [1,2,3,7],1)
# for a, b in zip(ega_a, ega_b)]).copy()
m = np.row_stack(np.dot(matrix(a,b),M.T) for a, b in zip(ega_a, ega_b))
U,s,Vt = np.linalg.svd(m)
# print(Vt.T)
print(vsr.Rot(*Vt[-1]))
# print(s)
```
## Solver
```
class Multivector(object):
def __init__(self, data=None):
if data is not None:
self._data = np.array(data).reshape(8,1)
else:
self._data = np.zeros((8,1))
self._grades = [0, 1, 1, 1, 2, 2, 2, 3]
self._gp_tensor = self._create_gp_tensor()
def __repr__(self):
return self._data.ravel().__repr__()
# @property
# def scalar(self):
# return self._data[0]
# @scalar.setter
def scalar(self, scalar):
self._data[0] = scalar
return Multivector(self._data)
# @property
gp_table = np.array([1, 2, 3, 4, 5, 6, 7, 8,
2, 1, 7, -6, 8, -4, 3, 5,
3, -7, 1, 5, 4, 8, -2, 6,
4, 6, -5, 1, -3, 2, 8, 7,
5, 8, -4, 3, -1, -7, 6, -2,
6, 4, 8, -2, 7, -1, -5, -3,
7, -3, 2, 8, -6, 5, -1, -4,
8, 5, 6, 7, -2, -3, -4, -1]).T.reshape(8,8)
# def vector(self):
# return self._data[1:4]
def vector(self, vector):
self._data[1:4] = np.array(vector).copy().reshape(-1,1)
return Multivector(self._data)
# @property
# def bivector(self):
# return self._data[4:7]
# @bivector.setter
def bivector(self, bivector):
self._data[4:7] = np.array(bivector).copy().reshape(-1,1)
return Multivector(self._data)
# @property
# def pseudoscalar(self):
# return self._data[7]
# @pseudoscalar.setter
def pseudoscalar(self, pseudoscalar):
self._data[7] = pseudoscalar
return Multivector(self._data)
def _create_gp_tensor(self):
M = np.zeros((8,8))
mask = np.array([1,2,3,4,5,6,7,8])
for i in range(8):
W = np.zeros((8,8))
for j in range(8):
a = vsr.EGA(0,0,0,0,0,0,0,0)
b = vsr.EGA(0,0,0,0,0,0,0,0)
a[i] = 1.
b[j] = 1.
M[i,j] = np.dot(mask, np.array(a * b))
gp_table = M.copy()
tensor = np.zeros((8,8,8))
for k in range(8):
for i in range(8):
for j in range(8):
val = gp_table[i,j]
if abs(val) == k + 1:
tensor[k,i,j] = np.sign(val)
return tensor
def gp_right_matrix(self):
return np.tensordot(self._gp_tensor.T,self._data,1).reshape(8,8)
def gp_left_matrix(self):
return np.tensordot(self._data.T, self._gp_tensor,1).reshape(8,8)
Multivector(vsr.EGA(vsr.Vec(1,2,3))).gp_left_matrix()
matrix(vsr.EGA(vsr.Vec(1,2,3)))
np.dot(Multivector(vsr.EGA(vsr.Vec(1,2,3))).gp_left_matrix(), vsr.EGA(vsr.Vec(-5,-5,-7)))
vsr.Vec(1,2,3) * vsr.Vec(-5,-5,-7)
vsr.Vec(-5,-5,-7) * vsr.Vec(1,2,3)
def matrix(a, left=True):
m = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(0,0,0,0,0,0,0,0)
ei[i] = 1.0
if left:
m[i,:] = np.array(a * ei)
else:
m[i,:] = np.array(ei * a)
return m
mask = [1,0,0,0,1,1,1,0]
mask= np.outer(mask,mask)
m = matrix(vsr.EGA(vsr.Vec(1,2,3))) - matrix(vsr.EGA(vsr.Vec(3,-1,5)),True)
print(m)
np.delete(np.delete(m,[0,4,5,6],0), [1,2,3,7],1)
motor
points_a = [vsr.EGA(vsr.Vec(1,0,0)),
vsr.EGA(vsr.Vec(0,0,1)),
vsr.EGA(vsr.Vec(1,2,3))]
points_b = [a.spin(vsr.EGA(rotor)) for a in points_a]
# n_points = 10
# sigma = 0.09
# points_a = [vsr.EGA(vsr.Vec(*np.random.normal(0.0, 0.8, 3)))
# for i in range(n_points)]
# points_b = [point.spin(vsr.EGA(rotor)) for point in points_a]
m = np.array([gp_a - gp_b for gp_a, gp_b in zip([Multivector(np.array(point)).gp_right_matrix()
for point in points_a],
[Multivector(np.array(point)).gp_left_matrix()
for point in points_b])]).reshape(-1,8)
U,s,Vt = np.linalg.svd(m)
print(s)
print(Vt.T)
print(rotor)
Multivector().vector(points_a[0]).gp_left_matrix()
class TensorRotorSolver(object):
def __init__(self):
self._gp_tensor = self._create_gp_tensor()
@property
def gp_tensor(self):
return self._gp_tensor
def _create_gp_tensor(self):
gp_table = np.array([1, 2, 3, 4, 5, 6, 7, 8,
2, 1, 7, -6, 8, -4, 3, 5,
3, -7, 1, 5, 4, 8, -2, 6,
4, 6, -5, 1, -3, 2, 8, 7,
5, 8, -4, 3, -1, -7, 6, -2,
6, 4, 8, -2, 7, -1, -5, -3,
7, -3, 2, 8, -6, 5, -1, -4,
8, 5, 6, 7, -2, -3, -4, -1]).T.reshape(8,8)
tensor = np.zeros((8,8,8))
for k in range(8):
for i in range(8):
for j in range(8):
val = gp_table[i,j]
if abs(val) == k + 1:
tensor[k,i,j] = np.sign(val)
return tensor
Gkij = TensorRotorSolver().gp_tensor
ai = np.array([0,1,2,3,0,0,0,0])
bj = np.array([0,0,0,0,1,2,3,0])
print(np.einsum('i,j,kij->k', ai, bj, Gkij))
print(np.einsum('j,kij->ki',bj, Ikij))
print(np.einsum('i,kij->kj', ai, Gkij))
vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,1,2,3,4,5,6,7)
B = vsr.EGA(0,0,0,0,5,6,7,0)
J = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(*np.zeros(8))
ei[i] = 1.
J[:,i] = ei <= B
print(J)
print(np.einsum('i,j,kij->k', ai, bj, Ikij))
print(np.einsum('i,j,kij->k', ai, bj, Okij))
vsr.EGA(1,2,3,4,5,6,7,8).rev()
Rji = np.array([1,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,
0,0,0,1,0,0,0,0,
0,0,0,0,-1,0,0,0,
0,0,0,0,0,-1,0,0,
0,0,0,0,0,0,-1,0,
0,0,0,0,0,0,0,-1
]).reshape(8,8)
rot = np.array([cos(pi/6),0,0,0,0,0,-sin(pi/6),0])
rotrev = np.einsum('i,ji->j', rot, Rji)
print(rot, rotrev)
print(np.einsum('i,j,m,kij,ml,pkl->p', rot, ai, rot,Gkij,Rji, Gkij))
print(np.einsum('i,j,m,lm,kij,pkl->p', rot, ai, rot,Rji,Gkij,Gkij))
print(np.einsum('j,m,ml,pkl->p', ai, rot,Rji,Gkij,Gkij))
print(np.einsum('j,m,ml,kij,pkl->pi', ai, rot,Rji,Gkij,Gkij) +
np.einsum('i,j,kij,pkl->pl', rot, ai, Gkij,Gkij))
print(np.einsum('i,j,lm,kij,pkl->pm', rot, ai,Rji,Gkij,Gkij) +
np.einsum('j,m,lm,kij,pkl->pi', ai, rot,Rji,Gkij,Gkij))
print(np.einsum('j,m,lm,kij,pkl->ip', ai, rot,Rji,Gkij,Gkij))
np.einsum('r,j,kij->')
Jac = np.zeros((3,4))
Jac[:,0] = np.array(vsr.EGA(1,0,0,0,0,0,0,0) * ae * Re.rev() + Re * ae * vsr.EGA(1,0,0,0,0,0,0,0))[1:4]
Jac[:,1] = np.array(vsr.EGA(0,0,0,0,1,0,0,0) * ae * Re.rev() + Re * ae * vsr.EGA(0,0,0,0,-1.,0,0,0))[1:4]
Jac[:,2] = np.array(vsr.EGA(0,0,0,0,0,1,0,0) * ae * Re.rev() + Re * ae * vsr.EGA(0,0,0,0,0,-1,0,0))[1:4]
Jac[:,3] = np.array(vsr.EGA(0,0,0,0,0,0,1.,0) * ae * Re.rev() + Re * ae * vsr.EGA(0,0,0,0,0,0,-1,0))[1:4]
print(Jac)
ae = vsr.EGA(0,1,0,0,0,0,0,0)
Re = vsr.EGA(cos(pi/6),0,0,0,-sin(pi/6),0,0,0)
Jac = np.zeros((8,8))
Jac[:,0] = np.array(vsr.EGA(1,0,0,0,0,0,0,0) * ae * Re.rev() + Re * ae * vsr.EGA(1,0,0,0,0,0,0,0))
Jac[:,1] = np.array(vsr.EGA(0,0,0,0,1,0,0,0) * ae * Re.rev() + Re * ae * vsr.EGA(0,0,0,0,-1.,0,0,0))
Jac[:,2] = np.array(vsr.EGA(0,0,0,0,0,1,0,0) * ae * Re.rev() + Re * ae * vsr.EGA(0,0,0,0,0,-1,0,0))
Jac[:,3] = np.array(vsr.EGA(0,0,0,0,0,0,1.,0) * ae * Re.rev() + Re * ae * vsr.EGA(0,0,0,0,0,0,-1,0))
print(Jac)
vsr.Vec(1,0,0).spin(vsr.Rot(cos(pi/6), -sin(pi/6),0,0))
def create_ip_tensor():
gp_table = np.array([0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, -4, 3, 5,
0, 0, 1, 0, 4, 0, -2, 6,
0, 0, 0, 1, -3, 2, 0, 7,
0, 0, -4, 3, -1, 0, 0, -2,
0, 4, 0, -2, 0, -1, 0, -3,
0, -3, 2, 0, 0, 0, -1, -4,
0, 5, 6, 7, -2, -3, -4, -1]).T.reshape(8,8)
tensor = np.zeros((8,8,8))
for k in range(8):
for i in range(8):
for j in range(8):
val = gp_table[i,j]
if abs(val) == k + 1:
tensor[k,i,j] = np.sign(val)
return tensor
def create_op_tensor():
gp_table = np.array([1, 2, 3, 4, 5, 6, 7, 8,
2, 0, 7, -6, 8, 0, 0, 0,
3, -7, 0, 5, 0, 8, 0, 0,
4, 6, -5, 0, 0, 0, 8, 0,
5, 8, 0, 0, 0, 0, 0, 0,
6, 0, 8, 0, 0, 0, 0, 0,
7, 0, 0, 8, 0, 0, 0, 0,
8, 0, 0, 0, 0, 0, 0, 0]).T.reshape(8,8)
tensor = np.zeros((8,8,8))
for k in range(8):
for i in range(8):
for j in range(8):
val = gp_table[i,j]
if abs(val) == k + 1:
tensor[k,i,j] = np.sign(val)
return tensor
Ikij = create_ip_tensor()
Okij = create_op_tensor()
BjIkij = np.einsum('j,kij->ki',B, Ikij)
print(BjIkij)
np.tensordot(a, BjIkij,1)
np.einsum('j,ijk->ki',B, Gkij)
Gkij = np.zeros((4,4,4))
Gkij[0] = np.array([1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,-1]).reshape(4,4)
Gkij[1] = np.array([0,1,0,0,1,0,0,0,0,0,0,-1,0,0,1,0]).reshape(4,4)
Gkij[2] = np.array([0,0,1,0,0,0,0,1,1,0,0,0,0,-1,0,0]).reshape(4,4)
Gkij[3] = np.array([0,0,0,1,0,0,1,0,0,-1,0,0,1,0,0,0]).reshape(4,4)
Gkij
ai = np.array([0,1,0,0])
bj = np.array([0,0,1,0])
np.einsum('i,j,kij->k',ai,bj,Gkij)
# Reduced tensor
Maji = Mbji = np.array([[0,1,0,0],[0,0,1,0]])
Mcji = np.array([[1,0,0,0],[0,0,0,1]])
Gwuv = np.einsum('wk,iu,jv,kij->wuv',Mcji,Maji.T, Mbji.T,Gkij)
aM = np.array([0,1]) # 0 e1 + 1 e2
bM = np.array([1,0]) # 1 e1 + 0 e2
cM = np.einsum('u,v,wuv->w',aM, bM, Gwuv)
np.einsum('w,wk',cM, Mcji)
np.tensordot(np.tensordot(a,B,0), Ikij,2)
np.tensordot(a, np.tensordot(B, Gkij,1),0)
np.einsum('i,j,kij->k',a, B, Gkij)
vsr.EGA(*a) * vsr.EGA(*B)
def rotor_estimation_ls_svd(points_a, points_b):
# gp_table = np.array([1, 2, 3, 4, 5, 6, 7, 8,
# 2, 1, 7, -6, 8, -4, 3, 5,
# 3, -7, 1, 5, 4, 8, -2, 6,
# 4, 6, -5, 1, -3, 2, 8, 7,
# 5, 8, -4, 3, -1, -7, 6, -2,
# 6, 4, 8, -2, 7, -1, -5, -3,
# 7, -3, 2, 8, -6, 5, -1, -4,
# 8, 5, 6, 7, -2, -3, -4, -1]).reshape(8,8)
M = np.zeros((8,8))
mask = np.array([1,2,3,4,5,6,7,8])
for i in range(8):
W = np.zeros((8,8))
for j in range(8):
a = vsr.EGA(0,0,0,0,0,0,0,0)
b = vsr.EGA(0,0,0,0,0,0,0,0)
a[i] = 1.
b[j] = 1.
M[i,j] = np.dot(mask, np.array(a * b))
gp_table = M.copy()
def gp_tensor():
dim = 8
tensor = np.zeros((8,8,8))
for k in range(dim):
for i in range(dim):
for j in range(dim):
val = gp_table[i,j]
if abs(val) == k + 1:
tensor[k,i,j] = np.sign(val)
return tensor
def gp_left_matrix(multivector):
tensor = gp_tensor()
matrix = np.zeros((8,8))
for i in range(8):
t = tensor[i,:,:]
matrix[i,:] = np.inner(t.T,np.array(multivector).T).reshape(-1)
return matrix
def gp_right_matrix(multivector):
tensor = gp_tensor()
matrix = np.zeros((8,8))
for i in range(8):
t = tensor[i,:,:]
matrix[i,:] = np.inner(np.array(multivector).T,t).reshape(-1)
return matrix
# A = [np.array([0.0, p[0], p[1], p[2], 0.0, 0.0, 0.0, 0.0]).reshape(8,1) for p in points_a]
# B = [np.array([0.0, p[0], p[1], p[2], 0.0, 0.0, 0.0, 0.0]).reshape(8,1) for p in points_b]
gp_a = np.row_stack([
np.delete(np.delete(gp_right_matrix(a),[0,4,5,6],0), [1,2,3,7],1)
for a in points_a])
b_gp = np.row_stack([
np.delete(np.delete(gp_left_matrix(b),[0,4,5,6],0), [1,2,3,7],1) for b in points_b])
m = gp_a - b_gp
[U,s,Vt] = np.linalg.svd(m)
print(Vt.T)
print(s)
names = ('sc', 'e1', 'e2', 'e3', 'e12', 'e13', 'e23', 'e123')
res = np.recarray(1, formats = 8*['f8'], names=names, buf=Vt.T[:,-2])
rotor = np.array([res['sc'], res['e12'], res['e13'], res['e23']])
return rotor, m
print(points_a)
print(points_b)
r,m2 = rotor_estimation_ls_svd(points_a, points_b)
vsr.Rot(*r)
print(rotor)
gp_table = np.array([1, 2, 3, 4, 5, 6, 7, 8,
2, 1, 7, -6, 8, -4, 3, 5,
3, -7, 1, 5, 4, 8, -2, 6,
4, 6, -5, 1, -3, 2, 8, 7,
5, 8, -4, 3, -1, -7, 6, -2,
6, 4, 8, -2, 7, -1, -5, -3,
7, -3, 2, 8, -6, 5, -1, -4,
8, 5, 6, 7, -2, -3, -4, -1]).T.reshape(8,8)
print(gp_table.T)
M = np.zeros((8,8))
mask = np.array([1,2,3,4,5,6,7,8])
for i in range(8):
W = np.zeros((8,8))
for j in range(8):
a = vsr.EGA(0,0,0,0,0,0,0,0)
b = vsr.EGA(0,0,0,0,0,0,0,0)
a[i] = 1.
b[j] = 1.
M[i,j] = np.dot(mask, np.array(a * b))
gp_table = M.T.copy()
print(gp_table.T)
print(Multivector().vector(points_a[0]).gp_right_matrix())
print(Multivector().vector(points_b[0]).gp_left_matrix())
print(m2[:8])
r = rotor
vsr.EGA(0,1,0,0,0,0,0,0).spin(vsr.EGA(r[0],0,0,0,r[1],r[2],r[3],0))
rotor = vsr.Biv(0,-pi/8,0).exp()
print(rotor)
n_points = 3
sigma = 0.09
points_a = [vsr.Vec(*np.random.normal(0.0, 0.8, 3))
for i in range(n_points)]
points_b = [point.spin(rotor) for point in points_a]
points_b_noisy = [vsr.Vec(*(np.array(point)[:3]
+ sigma * np.random.randn(3)))
for point in points_b]
ega_a = [vsr.EGA(p) for p in points_a]
ega_b = [vsr.EGA(p) for p in points_b]
def matrix(a, b):
m = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(0,0,0,0,0,0,0,0)
ei[i] = i
m[i,:] = np.array(ei * a - b * ei)
return m
m = np.row_stack([
np.delete(np.delete(matrix(a,b),[0,4,5,6],0), [1,2,3,7],1)
for a, b in zip(ega_a, ega_b)]).copy()
U,s,Vt = np.linalg.svd(m)
print(Vt.T)
print(s)
ega_a = [vsr.EGA(p) for p in points_a]
ega_b = [vsr.EGA(p) for p in points_b]
M = np.array([[1,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0],
[0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,1,0]])
print(M)
def matrix(a, b):
m = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(0,0,0,0,0,0,0,0)
ei[i] = i
m[i,:] = np.array(ei * a - b * ei)
return m
# m = np.row_stack([
# np.delete(np.delete(matrix(a,b),[0,4,5,6],0), [1,2,3,7],1)
# for a, b in zip(ega_a, ega_b)]).copy()
m = np.row_stack(np.dot(matrix(a,b),M.T) for a, b in zip(ega_a, ega_b))
U,s,Vt = np.linalg.svd(m)
# print(Vt.T)
print(vsr.Rot(*Vt[-1]))
# print(s)
matrix(ega_a[0], ega_b[0])
np.delete(np.delete(matrix(ega_a[0],ega_b[0]),[0,4,5,6],0), [1,2,3,7],1)
r = np.array([1,2,3,4,5,6,7,8]).T
vsr.CGA(vsr.Mot(1,2,3,4,5,6,7,8))
np.delete(matrix(ega_a[0],ega_b[0]),[0,4,5,6],0)
motor
Mrotij = np.array([[1,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0],
[0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,1,0]])
np.einsum('j,ij->i',r,Mrotij)
cga_a = [vsr.CGA(p.null()) for p in points_a]
cga_b = [vsr.CGA(p.null()) for p in points_b]
def matrix(a, b):
m = np.zeros((32,32))
for i in range(32):
ei = vsr.CGA(*np.zeros(32))
ei[i] = i
m[i,:] = np.array(ei * a - b * ei)
return m
k = matrix(cga_a[0], cga_b[0])
m = np.row_stack([matrix(a,b) for a,b in zip(cga_a, cga_b)])
U,s,Vt = np.linalg.svd(m)
print(Vt.T[-1])
import time
t1 = time.time()
vsr.CGA(vsr.Vec(1,2,3).null()).spin(vsr.CGA(motor))
t2 = time.time()
print(t2-t1)
t1 = time.time()
vsr.Vec(1,2,3).null().spin(motor)
t2 = time.time()
print(t2-t1)
np.set_printoptions(linewidth=200,precision=2)
motor = vsr.Vec(1,1,1).trs() * vsr.Rot(vsr.Biv(1,1,1).unit() * np.pi/6.0)
print(motor)
n_points = 10
sigma = 0.09
points_a = [vsr.Vec(*np.random.normal(0.0, 0.8, 3)).null()
for i in range(n_points)]
points_b = [point.spin(motor) for point in points_a]
points_b_noisy = [vsr.Vec(*(np.array(point)[:3]
+ sigma * np.random.randn(3))).null()
for point in points_b]
cga_a = [vsr.CGA(p) for p in points_a]
cga_b = [vsr.CGA(p) for p in points_b]
def set_idx(idx):
a = np.zeros(32)
a[idx] = 1.
return a
M = np.array([set_idx(0),
set_idx(6), set_idx(7), set_idx(8),
set_idx(12), set_idx(13), set_idx(14),
set_idx(27)])
def matrix(a, b):
m = np.zeros((32,32))
for i in range(32):
ei = vsr.CGA(*np.zeros(32))
ei[i] = i
m[i,:] = np.array(ei * a - b * ei)
return np.dot(m,M.T)[1:6,:]
# print(matrix(cga_a[0],cga_b[0])[1:6,:] )
m = np.row_stack([matrix(a,b) for a, b in zip(cga_a, cga_b)]).copy()
# print(m)
U,s,Vt = np.linalg.svd(m)
print(Vt.T)
print(s)
set_idx(1)
ega_a = [vsr.EGA(p) for p in points_a]
ega_b = [vsr.EGA(p) for p in points_b]
M = np.array([[1,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0],
[0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,1,0]])
print(M)
def matrix(a, b):
m = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(0,0,0,0,0,0,0,0)
ei[i] = i
m[i,:] = np.array(ei * a - b * ei)
return m
# m = np.row_stack([
# np.delete(np.delete(matrix(a,b),[0,4,5,6],0), [1,2,3,7],1)
# for a, b in zip(ega_a, ega_b)]).copy()
m = np.row_stack(np.dot(matrix(a,b),M.T) for a, b in zip(ega_a, ega_b))
U,s,Vt = np.linalg.svd(m)
# print(Vt.T)
print(vsr.Rot(*Vt[-1]))
# print(s)
e0 = vsr.CGA(vsr.Mot(1,0,0,0,0,0,0,0))
e12 = vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0))
e13 = vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0))
e23 = vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0))
e1i = vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0))
e2i = vsr.CGA(vsr.Mot(0,0,0,0,0,1,0,0))
e3i = vsr.CGA(vsr.Mot(0,0,0,0,0,0,1,0))
e123i = vsr.CGA(vsr.Mot(0,0,0,0,0,0,0,1))
a = cga_a[0]
b = cga_b[0]
e0 * a - b * e0
(lambda idx : [None for np.zeros(32); a[idx] = 1)(3)
(e12 * a - b * e12)
np.delete((e12 * a - b * e12),[0,6,7,8,9,10,11,12,13,14,15,26,27,28,29,30])
e13 * a - b * e13
e123i * a - b * e123i
vsr.CGA(vsr.Mot(1,2,3,4,5,6,7,8))
vsr.CGA(vsr.Mot(1,2,3,4,5,6,7,8))
vsr.Mot(vsr.CGA(vsr.Rot(1,2,3,4)) * vsr.CGA(vsr.Vec(1,2,3)))
import scipy.linalg as linalg
U,s,Vh = linalg.svd(m)
import scipy.io as io
io.savemat("/home/lars/m.mat", {"m":m})
M = io.loadmat("/home/lars/Downloads/M.mat")["M"]
print(M)
U,s,Vt = np.linalg.svd(M)
print(s)
print(m[:8])
print(M[:8])
matrix(vsr.EGA(1,0,0,0,0,0,0,0), vsr.EGA(0,0,0,0,0,0,0,0)).T
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(1,0,0,0,0,0,0,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,1,0,0,0,0,0,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,0,1,0,0,0,0,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,0,0,1,0,0,0,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,0,0,0,1,0,0,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,0,0,0,0,1,0,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,0,0,0,0,0,1,0))
print(vsr.EGA(0,1,0,0,0,0,0,0) * vsr.EGA(0,0,0,0,0,0,0,1))
np.array([np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(1,0,0,0,0,0,0,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,1,0,0,0,0,0,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,0,1,0,0,0,0,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,0,0,1,0,0,0,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,0,0,0,1,0,0,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,0,0,0,0,1,0,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,0,0,0,0,0,1,0)),
np.array(vsr.EGA(0,0,1,0,0,0,0,0) * vsr.EGA(0,0,0,0,0,0,0,1))]).T
Multivector()._gp_tensor[2,:,:]
vsr.EGA(0,0,0,0,0,0,0,0) * vsr.EGA(0,a[0],a[1],0,a[2],0,0,0)
np.inner(matrix(vsr.EGA(vsr.Vec(1,2,3)), vsr.EGA(0,0,0,0,0,0,0,0)), vsr.EGA(vsr.Vec(-12,9,-13)))
vsr.Vec(1,2,3) * vsr.Vec(-12,9,-13)
motor = vsr.Vec(1,1,1).trs() * vsr.Rot(vsr.Biv(0,1,0) * np.pi/6.0)
a = vsr.CGA(motor)
print(a)
a = vsr.EGA(1,0,0,0,0,0,0,0)
m2 = np.zeros((8,8))
for i in range(8):
ei = vsr.EGA(*np.zeros(8))
ei[i] = 1.0
m2[:,i] = ei * vsr.EGA(1,0,0,0,0,0,0,0)
print(m)
np.sum(m2,0)
M = np.zeros((8,8))
for i in range(8):
W = np.zeros((8,8))
for j in range(8):
a = vsr.EGA(0,0,0,0,0,0,0,0)
b = vsr.EGA(0,0,0,0,0,0,0,0)
a[i] = 1.
b[j] = j + 1
W[i,:] = np.array(a * b)
print(np.sum(W,0))
M[i,:] = np.sum(W,0)
print(M)
M = np.zeros((8,8))
mask = np.array([1,2,3,4,5,6,7,8])
for i in range(8):
W = np.zeros((8,8))
for j in range(8):
a = vsr.EGA(0,0,0,0,0,0,0,0)
b = vsr.EGA(0,0,0,0,0,0,0,0)
a[i] = 1.
b[j] = 1.
M[i,j] = np.dot(mask, np.array(a <= b))
print(M.T)
def row(a):
M = np.zeros(8)
for i in range(8):
b = vsr.EGA(0,0,0,0,0,0,0,0)
b[i] = i + 1
M += np.array(a * b)
return M
for i in range(8):
ei = vsr.EGA(0,0,0,0,0,0,0,0)
ei[i] = 1.
print(row(ei))
np.dot([1,2,3,4,5,6,7,8], np.array(vsr.EGA(0,0,0,0,0,1,0,0) * vsr.EGA(0,0,0,0,0,0,0,1)))
```
| github_jupyter |
# Quick demonstration of R-notebooks using the r-oce library
Created: 2017-01-23
The IOOS notebook
[environment](https://github.com/ioos/notebooks_demos/blob/229dabe0e7dd207814b9cfb96e024d3138f19abf/environment.yml#L73-L76)
installs the `R` language and the `Jupyter` kernel needed to run `R` notebooks.
Conda can also install extra `R` packages,
and those packages that are unavailable in `conda` can be installed directly from CRAN with `install.packages(pkg_name)`.
You can start `jupyter` from any other environment and change the kernel later using the drop-down menu.
(Check the `R` logo at the top right to ensure you are in the `R` jupyter kernel.)
In this simple example we will use two libraries aimed at the oceanography community written in `R`: [`r-gsw`](https://cran.r-project.org/web/packages/gsw/index.html) and [`r-oce`](http://dankelley.github.io/oce/).
(The original post for the examples below can be found author's blog: [http://dankelley.github.io/blog/](http://dankelley.github.io/blog/))
```
library(gsw)
library(oce)
```
Example 1: calculating the day length.
```
daylength <- function(t, lon=-38.5, lat=-13)
{
t <- as.numeric(t)
alt <- function(t)
sunAngle(t, longitude=lon, latitude=lat)$altitude
rise <- uniroot(alt, lower=t-86400/2, upper=t)$root
set <- uniroot(alt, lower=t, upper=t+86400/2)$root
set - rise
}
t0 <- as.POSIXct("2017-01-01 12:00:00", tz="UTC")
t <- seq.POSIXt(t0, by="1 day", length.out=1*356)
dayLength <- unlist(lapply(t, daylength))
par(mfrow=c(2,1), mar=c(3, 3, 1, 1), mgp=c(2, 0.7, 0))
plot(t, dayLength/3600, type='o', pch=20,
xlab="", ylab="Day length (hours)")
grid()
solstice <- as.POSIXct("2013-12-21", tz="UTC")
plot(t[-1], diff(dayLength), type='o', pch=20,
xlab="Day in 2017", ylab="Seconds gained per day")
grid()
```
Example 2: least-square fit.
```
x <- 1:100
y <- 1 + x/100 + sin(x/5)
yn <- y + rnorm(100, sd=0.1)
L <- 4
calc <- runlm(x, y, L=L, deriv=0)
plot(x, y, type='l', lwd=7, col='gray')
points(x, yn, pch=20, col='blue')
lines(x, calc, lwd=2, col='red')
data(ctd)
rho <- swRho(ctd)
z <- swZ(ctd)
drhodz <- runlm(z, rho, deriv = 1)
g <- 9.81
rho0 <- mean(rho, na.rm = TRUE)
N2 <- -g * drhodz/rho0
plot(ctd, which = "N2")
lines(N2, -z, col = "blue")
legend("bottomright", lwd = 2, col = c("brown", "blue"), legend = c("spline",
"runlm"), bg = "white")
```
Example 3: T-S diagram.
```
# Alter next three lines as desired; a and b are watermasses.
Sa <- 30
Ta <- 10
Sb <- 40
library(oce)
# Should not need to edit below this line
rho0 <- swRho(Sa, Ta, 0)
Tb <- uniroot(function(T) rho0-swRho(Sb,T,0), lower=0, upper=100)$root
Sc <- (Sa + Sb) /2
Tc <- (Ta + Tb) /2
## density change, and equiv temp change
drho <- swRho(Sc, Tc, 0) - rho0
dT <- drho / rho0 / swAlpha(Sc, Tc, 0)
plotTS(as.ctd(c(Sa, Sb, Sc), c(Ta, Tb, Tc), 0), pch=20, cex=2)
drawIsopycnals(levels=rho0, col="red", cex=0)
segments(Sa, Ta, Sb, Tb, col="blue")
text(Sb, Tb, "b", pos=4)
text(Sa, Ta, "a", pos=4)
text(Sc, Tc, "c", pos=4)
legend("topleft",
legend=sprintf("Sa=%.1f, Ta=%.1f, Sb=%.1f -> Tb=%.1f, drho=%.2f, dT=%.2f",
Sa, Ta, Sb, Tb, drho, dT),
bg="white")
```
Example 4: find the halocline depth.
```
findHalocline <- function(ctd, deltap=5, plot=TRUE)
{
S <- ctd[['salinity']]
p <- ctd[['pressure']]
n <- length(p)
## trim df to be no larger than n/2 and no smaller than 3.
N <- deltap / median(diff(p))
df <- min(n/2, max(3, n / N))
spline <- smooth.spline(S~p, df=df)
SS <- predict(spline, p)
dSSdp <- predict(spline, p, deriv=1)
H <- p[which.max(dSSdp$y)]
if (plot) {
par(mar=c(3, 3, 1, 1), mgp=c(2, 0.7, 0))
plotProfile(ctd, xtype="salinity")
lines(SS$y, SS$x, col='red')
abline(h=H, col='blue')
mtext(sprintf("%.2f m", H), side=4, at=H, cex=3/4, col='blue')
mtext(sprintf(" deltap: %.0f, N: %.0f, df: %.0f", deltap, N, df),
side=1, line=-1, adj=0, cex=3/4)
}
return(H)
}
# Plot two panels to see influence of deltap.
par(mfrow=c(1, 2))
data(ctd)
findHalocline(ctd)
findHalocline(ctd, 1)
```
| github_jupyter |
## Script to load financial time-series (per-minute ETFs) data from CSV files into a Pandas DF and a Postgres table
### The ingestion for Pandas is also done in its own perf tests notebook
```
data_path = '/workspace/data/datasets/unianalytica/group/analytics-perf-tests/symbols/'
import sys
import os
import csv
import psycopg2
import pandas as pd
import numpy as np
from datetime import datetime
import pytz
import time
```
### 1.Load up all files to one Pandas DF
Takes about 2 minutes (63 files, 3.5 GB CSV format total size)
```
symbol_dfs_list = []
records_count = 0
symbols_files = sorted(os.listdir(data_path))
for ix in range(len(symbols_files)):
current_symbol_df = pd.read_csv(data_path + symbols_files[ix], parse_dates=[2], infer_datetime_format=True,
names=['symbol_record_id', 'symbol', 'datetime', 'open', 'high', 'low', 'close', 'volume', 'split_factor', 'earnings', 'dividends'])
records_count = records_count + len(current_symbol_df)
symbol_dfs_list.append(current_symbol_df)
#print('Loaded symbol #{}'.format(ix+1))
print('Now concatenating the DFs...')
symbols_df = pd.concat(symbol_dfs_list)
symbols_df.index = np.arange(records_count)
del(symbol_dfs_list)
```
#### Adding `symbol_id` column
```
symbols_list = sorted(pd.unique(symbols_df.symbol))
keys = symbols_list
values = list(range(1, len(symbols_list)+1))
dictionary = dict(zip(keys, values))
symbols_df.insert(0, 'symbol_id', np.array([dictionary[x] for x in symbols_df.symbol.values]))
symbols_df.head()
```
### 2.Import all files to a single table in the tests DB on Postgres
```
symbols_files = sorted(os.listdir(data_path))
keys = symbols_files
values = list(range(1, len(symbols_files)+1))
dictionary = dict(zip(keys, values))
try:
conn = psycopg2.connect(dbname='tests', user='hitchhiker', host='localhost', password='freeride', port='9478')
except:
print('I am unable to connect to the database')
cur = conn.cursor()
sqlQuery = '''
DROP TABLE IF EXISTS public.symbols_minute;
DROP TABLE IF EXISTS public.symbols_minute_staging;
'''
cur.execute(sqlQuery)
conn.commit()
sqlQuery = '''
CREATE TABLE public.symbols_minute
(
symbol_id int,
symbol_record_id int,
symbol character varying(4),
datetime timestamp without time zone NOT NULL,
open real,
high real,
low real,
close real,
volume real,
split_factor real,
earnings real,
dividends real
);
CREATE INDEX symbols_minute_datetime_idx ON public.symbols_minute (datetime);
'''
cur.execute(sqlQuery)
conn.commit()
sqlQuery = '''
CREATE TABLE public.symbols_minute_staging
(
symbol_record_id int,
symbol character varying(4),
datetime timestamp without time zone PRIMARY KEY NOT NULL,
open real,
high real,
low real,
close real,
volume real,
split_factor real,
earnings real,
dividends real
);
'''
cur.execute(sqlQuery)
conn.commit()
num_files = 0
for ix in range(len(symbols_files)):
num_files += 1
#if num_files > 2: break
try:
cur.execute("TRUNCATE TABLE public.symbols_minute_staging;")
conn.commit()
f = open(data_path + symbols_files[ix], 'r')
cur.copy_from(f, 'symbols_minute_staging', sep=',')
conn.commit()
sqlQuery = '''
SELECT COUNT(*)
FROM public.symbols_minute_staging;
'''
cur.execute(sqlQuery)
current_count = cur.fetchall()[0][0]
sqlQuery = '''
INSERT INTO symbols_minute
SELECT '%(symbol_id)s', *
FROM public.symbols_minute_staging;
'''% {'symbol_id': dictionary[symbols_files[ix]]}
cur.execute(sqlQuery)
conn.commit()
print('{} records from {} are imported'.format(current_count, symbols_files[ix]))
except:
print('Cound not import ' + symbols_files[ix])
e = sys.exc_info()[0]
print("Error: %s" % e)
print('Data import finished.')
cur.execute("SELECT count(*) FROM public.symbols_minute;")
print(cur.fetchall()[0][0])
cur.execute("SELECT * FROM public.symbols_minute LIMIT 10;")
for row in cur.fetchall():
print(row)
```
## License
Copyright (c) 2019, PatternedScience Inc.
This code was originally run on the [UniAnalytica](https://www.unianalytica.com) platform, is published by PatternedScience Inc. on [GitHub](https://github.com/patternedscience/GPU-Analytics-Perf-Tests) and is licensed under the terms of Apache License 2.0; a copy of the license is available in the GitHub repository.
| github_jupyter |
```
import arviz as az
import xarray as xr
from generate_data import generate_data
from utils import StanModel_cache
n = 70
Years_indiv, Mean_RT_comp_Indiv, Mean_RT_incomp_Indiv = generate_data(8, n)
dims = {"y_obs_comp": ["subject"], "y_obs_incomp": ["subject"]}
log_lik_dict = {"y_obs_comp": "log_lik_comp", "y_obs_incomp": "log_lik_comp"}
data = {
"n": n,
"y_obs_comp": Mean_RT_comp_Indiv,
"y_obs_incomp": Mean_RT_incomp_Indiv,
"age": Years_indiv,
"mean_rt_c": Mean_RT_comp_Indiv.mean(),
"mean_rt_i": Mean_RT_incomp_Indiv.mean(),
}
exp_code = """
data {
int<lower=0> n;
real y_obs_comp[n];
real y_obs_incomp[n];
int<lower=0> age[n];
real mean_rt_c;
real mean_rt_i;
}
parameters {
real b;
real<lower=0> sigma;
real<lower=0> a_c;
real<lower=0> a_i;
real g_c;
real g_i;
}
transformed parameters {
real mu_c[n];
real mu_i[n];
for (j in 1:n) {
mu_c[j] = a_c*exp(-b*age[j]) + g_c;
mu_i[j] = a_i*exp(-b*age[j]) + g_i;
}
}
model {
a_c ~ cauchy(0, 5);
a_i ~ cauchy(0, 5);
b ~ normal(1, 1);
g_c ~ normal(mean_rt_c, .5);
g_i ~ normal(mean_rt_i, .5);
sigma ~ normal(0, .2);
y_obs_comp ~ normal(mu_c, sigma);
y_obs_incomp ~ normal(mu_i, sigma);
}
generated quantities {
real log_lik_comp[n];
real log_lik_incomp[n];
for (j in 1:n) {
log_lik_comp[j] = normal_lpdf(y_obs_comp[j] | mu_c[j], sigma);
log_lik_incomp[j] = normal_lpdf(y_obs_incomp[j] | mu_i[j], sigma);
}
}
"""
stan_model = StanModel_cache(model_code=exp_code)
fit = stan_model.sampling(data=data, iter=4000, control={"adapt_delta" : 0.9})
idata_exp = az.from_pystan(fit, dims=dims, log_likelihood=log_lik_dict)
```
The pointwise log likelihood stored is the following (both models have the same variables and shape, only the exponential model is shown)
```
idata_exp.log_likelihood
```
IC calculation and model comparison starts here
```
log_lik_exp = idata_exp.log_likelihood
print("Leave one *observation* out cross validation (whole model)")
condition_dim = xr.DataArray(["compatible", "incompatible"], name="condition")
idata_exp.sample_stats["log_likelihood"] = xr.concat((log_lik_exp.y_obs_comp, log_lik_exp.y_obs_incomp), dim=condition_dim)
print(az.loo(idata_exp))
print("\n\nLeave one *subject* out cross validation (whole model)")
idata_exp.sample_stats["log_likelihood"] = log_lik_exp.y_obs_comp + log_lik_exp.y_obs_incomp
print(az.loo(idata_exp))
print("\n\nLeave one observation out cross validation (y_obs_comp only)")
idata_exp.sample_stats["log_likelihood"] = log_lik_exp.y_obs_comp
print(az.loo(idata_exp))
print("\n\nLeave one observation out cross validation (y_obs_incomp only)")
idata_exp.sample_stats["log_likelihood"] = log_lik_exp.y_obs_incomp
print(az.loo(idata_exp))
```
| github_jupyter |
A $\textbf {Binomial Distribution}$ can be thought of as simply the probability of a SUCCESS or FAILURE outcome in an experiment or survey that is repeated multiple times. The binomial is a type of distribution that has two possible outcomes (the prefix “bi” means two, or twice).
$\textbf {For example,}$ a coin toss has only two possible outcomes: heads or tails and taking a test could have two possible outcomes: pass or fail.<br>
The Probability Density Function (PDF) for $\textbf {Binomial Distribution}$ is defined as::<br>
\begin{equation}
P(X=K) = {N\choose K}{p}^K{q}^N-K
\end{equation}
<br>
$\textbf {Coin Tossing Example:}$
If we toss a coin 5 times and find the probability of getting a 1 head.
The following events can happen are:
<ul>
<li> H,T,T,T,T</li>
<li> T,H,T,T,T</li>
<li> T,T,H,T,T</li>
<li> T,T,T,H,T</li>
<li> T,T,T,T,H</li>
</ul>
$Probability(P) = P(H{\cap T \cap T \cap T \cap T})$
<br>
We also know that coin tossing is a independent probability and it will not affect each other's probability.So, each probabilities will be multiplied.
<br>
$Probability(P) = P(H).P(T).P(T).P(T).P(T)$
<br>
Let's assume a coin is tossed for 25 crores times and Head (H) comes for 65% times. <br>
Probability of getting Head = P(H) = p (Success of getting Head) = 0.65 <br>
Probability of getting Tail = P(T) = q (Failure of getting Head or Success of getting Tail)
= 1-p = 0.35
<br>
<B>Conditions for Binomial Random Variable:</B><br>
<ol>
<li> The outcomes of the random variable should be 2. </li>
<li> They should an independent event </li>
</ol>
```
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as S
p = 0.5
N = 10
q = 1-p
RandomValues = np.arange(0,N+1)
RandomValues
BinomialValuesPDF = S.binom.pmf(RandomValues,N,p)
BinomialValuesPDF
plt.stem(RandomValues,BinomialValuesPDF)
```
The skewness at the x-axis of the graph will be dependent on the values of the p (Success) and q(Failure).<br>
<ul>
<li> If <b>p is greater than q</b> the skewness towards <b>Right side </b></li>
<li> If <b>p is less than q</b> the skewness towards <b>Left side </b></li>
</ul>
Let's see this by this example.
```
p = 0.7
N = 10
q = 1-p
RandomValues = np.arange(0,N+1)
BinomialValuesPDF = S.binom.pmf(RandomValues,N,p)
plt.stem(RandomValues,BinomialValuesPDF)
p = 0.3
N = 10
q = 1-p
RandomValues = np.arange(0,N+1)
BinomialValuesPDF = S.binom.pmf(RandomValues,N,p)
plt.stem(RandomValues,BinomialValuesPDF)
```
The mean of the binomial distribution is:
$ E[X]=Np $<br>
The Variance of the binomial distribution is : $ E[(X-E[X]^2)] = Npq $ <br>
The Standard deviation of the binomial distribution is : $\sqrt {Npq} $
| github_jupyter |
# Wide-field imaging demonstration
This script makes a fake data set, fills it with a number of point components, and then images it using a variety of algorithms. See imaging-fits for a similar notebook that checks for errors in the recovered properties of the images.
The measurement equation for a wide field of view interferometer is:
$$V(u,v,w) =\int \frac{I(l,m)}{\sqrt{1-l^2-m^2}} e^{-2 \pi j (ul+um + w(\sqrt{1-l^2-m^2}-1))} dl dm$$
We will show various algorithms for computing approximations to this integral. Calculation of the visibility $V$ from the sky brightness $I$ is called {\bf predict}, and the inverese is called {\bf invert}.
```
%matplotlib inline
import os
import sys
sys.path.append(os.path.join('..', '..'))
results_dir = './results'
os.makedirs(results_dir, exist_ok=True)
from matplotlib import pylab
pylab.rcParams['figure.figsize'] = (18.0, 18.0)
pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import constants as const
from astropy.wcs.utils import pixel_to_skycoord
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from arl.data.polarisation import PolarisationFrame
from arl.visibility.base import create_visibility, create_visibility, \
create_visibility_from_rows, create_visibility_from_rows
from arl.skycomponent.operations import create_skycomponent
from arl.image.operations import show_image, export_image_to_fits, create_w_term_like
from arl.image.iterators import image_raster_iter
from arl.visibility.iterators import vis_timeslice_iter, vis_wstack_iter
from arl.util.testing_support import create_named_configuration
from arl.imaging import invert_2d, create_image_from_visibility, \
predict_skycomponent_visibility, invert_facets, \
invert_timeslice, invert_wstack, invert_wprojection, advise_wide_field
from arl.visibility.iterators import vis_timeslice_iter
from arl.imaging.weighting import weight_visibility
import logging
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
doplot = True
```
Construct the SKA1-LOW core configuration
```
lowcore = create_named_configuration('LOWBD2-CORE')
```
We create the visibility.
This just makes the uvw, time, antenna1, antenna2, weight columns in a table
```
times = numpy.array([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) * (numpy.pi / 12.0)
frequency = numpy.array([1e8])
channel_bandwidth = numpy.array([1e7])
reffrequency = numpy.max(frequency)
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')
vt = create_visibility(lowcore, times, frequency, channel_bandwidth=channel_bandwidth,
weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame("stokesI"))
```
Advise on wide field parameters. This returns a dictioanry with all the input and calculated variables.
```
advice = advise_wide_field(vt, wprojection_planes=1)
```
Plot the synthesized UV coverage.
```
if doplot:
plt.clf()
plt.plot(vt.data['uvw'][:, 0], vt.data['uvw'][:, 1], '.', color='b')
plt.plot(-vt.data['uvw'][:, 0], -vt.data['uvw'][:, 1], '.', color='r')
plt.xlabel('U (wavelengths)')
plt.ylabel('V (wavelengths)')
plt.show()
plt.clf()
plt.plot(vt.data['uvw'][:, 0], vt.data['uvw'][:, 2], '.', color='b')
plt.xlabel('U (wavelengths)')
plt.ylabel('W (wavelengths)')
plt.show()
plt.clf()
plt.plot(vt.data['time'][vt.u>0.0], vt.data['uvw'][:, 2][vt.u>0.0], '.', color='b')
plt.plot(vt.data['time'][vt.u<=0.0], vt.data['uvw'][:, 2][vt.u<=0.0], '.', color='r')
plt.xlabel('U (wavelengths)')
plt.ylabel('W (wavelengths)')
plt.show()
plt.clf()
n, bins, patches = plt.hist(vt.w, 50, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('W (wavelengths)')
plt.ylabel('Count')
plt.show()
```
Show the planar nature of the uvw sampling, rotating with hour angle
```
if doplot:
rmax = numpy.sqrt(numpy.max(vt.u**2+vt.v**2+vt.w**2))
u = numpy.linspace(-rmax, rmax, 1000)
v = numpy.linspace(-rmax, rmax, 1000)
u, v = numpy.meshgrid(u, v)
skip=10
from arl.imaging.timeslice import fit_uvwplane_only
from matplotlib import cm, colors
fig=plt.figure()
for i, rows in enumerate(vis_timeslice_iter(vt)):
vis_slice = create_visibility_from_rows(vt, rows)
a, b = fit_uvwplane_only(vis_slice)
wvalues = a*u + b*v
r = numpy.sqrt(u**2 + v**2)
wvalues=numpy.where(r<rmax, wvalues, numpy.nan)
ax = fig.add_subplot(331+1+i, projection='3d')
norm = colors.Normalize(vmin=-rmax, vmax=+rmax)
ax.plot_surface(u, v, wvalues, rstride=skip, cstride=skip, cmap=cm.coolwarm, norm=norm)
ax.set_xlabel('U')
ax.set_ylabel('V')
ax.set_zlabel('W')
ha = vis_slice.time[0]/3600.0
ax.set_title('Hour angle %.1f h' % ha)
ax.view_init(15, 90)
ax.set_xlim(-1.1*rmax,1.1*rmax)
ax.set_ylim(-1.1*rmax,1.1*rmax)
ax.set_zlim(-1.1*rmax,1.1*rmax)
plt.show()
```
Create a grid of components and predict each in turn, using the full phase term including w.
```
params = {'npixel': 512,
'cellsize': 0.001,
'spectral_mode': 'channel',
'channel_bandwidth': 5e7,
'reffrequency': 1e8,
'kernel':'calculated',
'facets':4}
npixel = 512
cellsize=0.001
facets = 4
flux = numpy.array([[100.0]])
vt.data['vis'] *= 0.0
model = create_image_from_visibility(vt, npixel=512, cellsize=0.001, npol=1)
spacing_pixels = npixel // facets
log.info('Spacing in pixels = %s' % spacing_pixels)
spacing = 180.0 * cellsize * spacing_pixels / numpy.pi
centers = -1.5, -0.5, +0.5, +1.5
comps=list()
for iy in centers:
for ix in centers:
pra = int(round(npixel // 2 + ix * spacing_pixels - 1))
pdec = int(round(npixel // 2 + iy * spacing_pixels - 1))
sc = pixel_to_skycoord(pra, pdec, model.wcs)
log.info("Component at (%f, %f) %s" % (pra, pdec, str(sc)))
comp = create_skycomponent(flux=flux, frequency=frequency, direction=sc,
polarisation_frame=PolarisationFrame("stokesI"))
comps.append(comp)
predict_skycomponent_visibility(vt, comps)
```
Make the dirty image and point spread function using the two-dimensional approximation:
$$V(u,v,w) =\int I(l,m) e^{2 \pi j (ul+um)} dl dm$$
Note that the shape of the sources vary with position in the image. This space-variant property of the PSF arises from the w-term neglected in the two-dimensional invert.
```
dirty = create_image_from_visibility(vt, npixel=512, cellsize=0.001,
polarisation_frame=PolarisationFrame("stokesI"))
vt, _, _ = weight_visibility(vt, dirty)
dirty, sumwt = invert_2d(vt, dirty)
if doplot:
show_image(dirty)
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" % (dirty.data.max(), dirty.data.min(), sumwt))
export_image_to_fits(dirty, '%s/imaging-wterm_dirty.fits' % (results_dir))
```
This occurs because the Fourier transform relationship between sky brightness and visibility is only accurate over small fields of view.
Hence we can make an accurate image by partitioning the image plane into small regions, treating each separately and then glueing the resulting partitions into one image. We call this image plane partitioning image plane faceting.
$$V(u,v,w) = \sum_{i,j} \frac{1}{\sqrt{1- l_{i,j}^2- m_{i,j}^2}} e^{-2 \pi j (ul_{i,j}+um_{i,j} + w(\sqrt{1-l_{i,j}^2-m_{i,j}^2}-1))}
\int I(\Delta l, \Delta m) e^{-2 \pi j (u\Delta l_{i,j}+u \Delta m_{i,j})} dl dm$$
```
dirtyFacet = create_image_from_visibility(vt, npixel=512, cellsize=0.001, npol=1)
dirtyFacet, sumwt = invert_facets(vt, dirtyFacet, facets=4)
if doplot:
show_image(dirtyFacet)
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" % (dirtyFacet.data.max(), dirtyFacet.data.min(), sumwt))
export_image_to_fits(dirtyFacet, '%s/imaging-wterm_dirtyFacet.fits' % (results_dir))
```
That was the best case. This time, we will not arrange for the partitions to be centred on the sources.
```
dirtyFacet2 = create_image_from_visibility(vt, npixel=512, cellsize=0.001, npol=1)
dirtyFacet2, sumwt = invert_facets(vt, dirtyFacet2, facets=2)
if doplot:
show_image(dirtyFacet2)
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" % (dirtyFacet2.data.max(), dirtyFacet2.data.min(), sumwt))
export_image_to_fits(dirtyFacet2, '%s/imaging-wterm_dirtyFacet2.fits' % (results_dir))
```
Another approach is to partition the visibility data by slices in w. The measurement equation is approximated as:
$$V(u,v,w) =\sum_i \int \frac{ I(l,m) e^{-2 \pi j (w_i(\sqrt{1-l^2-m^2}-1))})}{\sqrt{1-l^2-m^2}} e^{-2 \pi j (ul+um)} dl dm$$
If images constructed from slices in w are added after applying a w-dependent image plane correction, the w term will be corrected.
The w-dependent w-beam is:
```
if doplot:
wterm = create_w_term_like(model, phasecentre=vt.phasecentre, w=numpy.max(vt.w))
show_image(wterm)
plt.show()
dirtywstack = create_image_from_visibility(vt, npixel=512, cellsize=0.001, npol=1)
dirtywstack, sumwt= invert_wstack(vt, dirtywstack, vis_slices=31,
padding=2)
show_image(dirtywstack)
plt.show()
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" %
(dirtywstack.data.max(), dirtywstack.data.min(), sumwt))
export_image_to_fits(dirtywstack, '%s/imaging-wterm_dirty_wstack.fits' % (results_dir))
```
The w-term can also be viewed as a time-variable distortion. Approximating the array as instantaneously co-planar, we have that w can be expressed in terms of $u,v$
$$w = a u + b v$$
Transforming to a new coordinate system:
$$ l' = l + a (\sqrt{1-l^2-m^2}-1))$$
$$ m' = m + b (\sqrt{1-l^2-m^2}-1))$$
Ignoring changes in the normalisation term, we have:
$$V(u,v,w) =\int \frac{I(l',m')}{\sqrt{1-l'^2-m'^2}} e^{-2 \pi j (ul'+um')} dl' dm'$$
To illustrate this, we will construct images as a function of time. For comparison, we show difference of each time slice from the best facet image. Instantaneously the sources are un-distorted but do lie in the wrong location.
```
for rows in vis_timeslice_iter(vt):
visslice = create_visibility_from_rows(vt, rows)
dirtySnapshot = create_image_from_visibility(visslice, npixel=512, cellsize=0.001, npol=1, compress_factor=0.0)
dirtySnapshot, sumwt = invert_2d(visslice, dirtySnapshot)
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" %
(dirtySnapshot.data.max(), dirtySnapshot.data.min(), sumwt))
if doplot:
dirtySnapshot.data -= dirtyFacet.data
show_image(dirtySnapshot)
plt.title("Hour angle %.2f hours" % (numpy.average(visslice.time) * 12.0 / 43200.0))
plt.show()
```
This timeslice imaging leads to a straightforward algorithm in which we correct each time slice and then sum the resulting timeslices.
```
dirtyTimeslice = create_image_from_visibility(vt, npixel=512, cellsize=0.001, npol=1)
dirtyTimeslice, sumwt= invert_timeslice(vt, dirtyTimeslice, timeslice='auto', padding=2)
show_image(dirtyTimeslice)
plt.show()
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" %
(dirtyTimeslice.data.max(), dirtyTimeslice.data.min(), sumwt))
export_image_to_fits(dirtyTimeslice, '%s/imaging-wterm_dirty_Timeslice.fits' % (results_dir))
```
Finally we try w-projection. For a fixed w, the measurement equation can be stated as as a convolution in Fourier space.
$$V(u,v,w) =G_w(u,v) \ast \int \frac{I(l,m)}{\sqrt{1-l^2-m^2}} e^{-2 \pi j (ul+um)} dl dm$$
where the convolution function is:
$$G_w(u,v) = \int \frac{1}{\sqrt{1-l^2-m^2}} e^{-2 \pi j (ul+um + w(\sqrt{1-l^2-m^2}-1))} dl dm$$
Hence when gridding, we can use the transform of the w beam to correct this effect while gridding.
```
dirtyWProjection = create_image_from_visibility(vt, npixel=512, cellsize=0.001, npol=1)
dirtyWProjection, sumwt = invert_wprojection(vt, dirtyWProjection, wstep=advice['w_sampling_primary_beam'], padding=1,
oversampling=4)
if doplot:
show_image(dirtyWProjection)
print("Max, min in dirty image = %.6f, %.6f, sumwt = %f" % (dirtyWProjection.data.max(),
dirtyWProjection.data.min(), sumwt))
export_image_to_fits(dirtyWProjection, '%s/imaging-wterm_dirty_WProjection.fits' % (results_dir))
```
| github_jupyter |
```
import sys
import os
import PIL
import numpy as np
from numpy.linalg import norm
from math import *
from scipy import ndimage
from scipy import misc
import skimage
from sympy import *
%matplotlib inline
import matplotlib.pyplot as plt
def estimateModelAttempt1():
r,g = symbols('r g')
a0,r0,g0 = symbols('a0 r0 g0')
a1,r1,g1 = symbols('a1 r1 g1')
print solveset(Eq(r0, a0*r + (1-a0)*255), r)
# r as a function of a0, r0
# Eq(r, rFromA0R0)
rFromA0R0 = (255*a0 + r0 - 255)/a0
print Eq(r1, a1*r + (1-a1)*255).subs(r, rFromA0R0)
# r1 as a function of a0, r0, a1
# Eq(r1, r1FromA0R0A1)
r1FromA0R0A1 = -255*a1 + 255 + a1*(255*a0 + r0 - 255)/a0
# Similarly, g1 as a function of a0, g0, a1
# Eq(g1, g1FromA0R0A1)
g1FromA0G0A1 = -255*a1 + 255 + a1*(255*a0 + g0 - 255)/a0
print solveset(Eq(r1, r1FromA0R0A1), a1)
a1FromA0R0R1 = a0*(r1 - 255)/(r0 - 255)
g1FromA0G0R0R1 = simplify(g1FromA0G0A1.subs(a1, a1FromA0R0R1))
print g1FromA0G0R0R1
# a0 disappears!
g1FromA0G0R0R1 = (g0*r1 - 255*g0 + 255*r0 - 255*r1)/(r0 - 255)
print '==='
estimateModelAttempt1()
def testModel1():
r0,g0,b0 = (157.,212.,244.)
r1,g1,b1 = (218.,239.,251.)
print 'g1FromR = ', (g0*r1 - 255*g0 + 255*r0 - 255*r1)/(r0 - 255)
print 'g1FromB = ', (g0*b1 - 255*g0 + 255*b0 - 255*b1)/(b0 - 255)
print 'b1FromR = ', (b0*r1 - 255*b0 + 255*r0 - 255*r1)/(r0 - 255)
print 'b1FromG = ', (b0*g1 - 255*b0 + 255*g0 - 255*g1)/(g0 - 255)
print 'r1FromG = ', (r0*g1 - 255*r0 + 255*g0 - 255*g1)/(g0 - 255)
print 'r1FromB = ', (r0*b1 - 255*r0 + 255*b0 - 255*b1)/(b0 - 255)
testModel1()
def testModel1_2():
r0,g0,b0 = (127.,255.,255.)
r1,g1,b1 = (255.,255.,255.)
print 'g1FromR = ', (g0*r1 - 255*g0 + 255*r0 - 255*r1)/(r0 - 255)
print 'b1FromR = ', (b0*r1 - 255*b0 + 255*r0 - 255*r1)/(r0 - 255)
print 'b1FromG = ', (b0*g1 - 255*b0 + 255*g0 - 255*g1)/(g0 - 255)
print 'r1FromG = ', (r0*g1 - 255*r0 + 255*g0 - 255*g1)/(g0 - 255)
testModel1_2()
def estimateSimplerModel():
r,g = symbols('r g')
a0,r0,g0 = symbols('a0 r0 g0')
a1,r1,g1 = symbols('a1 r1 g1')
r0def = a0*r + (1-a0)*255
g0def = a0*g + (1-a0)*255
# Does not depend on a0, getting (r - 255)/(g - 255)
print simplify((r0def-255)/(g0def-255))
# Conclusion 1: (r0-255)/(g0-255) = (r1-255)/(g1-255) if they come from the same color
# If the background was black, we'd get r0/g0 = r1/g1
# Conclusion 2: we can't recover the true color and alpha values if the background
# is grayscale.
estimateSimplerModel()
def testSimpleModel():
r0,g0,b0 = (157.,212.,244.)
r1,g1,b1 = (218.,239.,251.)
# Should be the same
print (r0-255.)/(g0-255.)
print (r1-255.)/(g1-255.)
# Should be the same
print (r0-255.)/(b0-255.)
print (r1-255.)/(b1-255.)
testSimpleModel()
def testColor0():
ref = np.array([81.,179.,235.])
v0 = np.array([157.,212.,244.])
v1 = np.array([218.,239.,251.])
v2 = np.array([110.,191.,238.])
alphas0 = (v0 - 255) / (ref - 255)
alphas1 = (v1 - 255) / (ref - 255)
alphas2 = (v2 - 255) / (ref - 255)
print 'alphas0', alphas0
print 'alphas1', alphas1
print 'alphas2', alphas2
testColor0()
def linearToSrgb(L):
if L <= 0.0031308:
return L * 12.92 * 255.0
else:
return 255.0 * ((1.055 * L**0.41667) - 0.055)
linearToSrgb = np.vectorize(linearToSrgb)
def sRgbToLinearRgb(S):
S = S/255.0
if (S <= 0.04045):
return S/12.92
else:
return ((S+0.055)/1.055)**2.4
sRgbToLinearRgb = np.vectorize(sRgbToLinearRgb)
def testSrgbToLinear():
srgb0 = np.array([157.,212.,244.])
linear0 = sRgbToLinearRgb(srgb0)
srgba1 = linearToSrgb(linear0)
testSrgbToLinear()
def testWithGamma1():
ref = np.array([241.,230.,42.])
v0 = np.array([243.,232.,66.])
v1 = np.array([245.,239.,127.])
ref = v0
#ref = sRgbToLinearRgb(ref)
#v0 = sRgbToLinearRgb(v0)
#v1 = sRgbToLinearRgb(v1)
alphas0 = (v0 - 255) / (ref - 255)
alphas1 = (v1 - 255) / (ref - 255)
print 'alphas0', alphas0
print 'alphas1', alphas1
r,g,b = ref
r0,g0,b0 = v0
r1,g1,b1 = v1
print 'g1FromR = ', (g*r1 - 255*g + 255*r - 255*r1)/(r - 255)
print 'g1FromB = ', (g*b1 - 255*g + 255*b - 255*b1)/(b - 255)
print 'b1FromR = ', (b*r1 - 255*b + 255*r - 255*r1)/(r - 255)
print 'b1FromG = ', (b*g1 - 255*b + 255*g - 255*g1)/(g - 255)
print 'r1FromG = ', (r*g1 - 255*r + 255*g - 255*g1)/(g - 255)
print 'r1FromB = ', (r*b1 - 255*r + 255*b - 255*b1)/(b - 255)
testWithGamma1()
def testWithGamma2():
ref = np.array([81.,179.,235.])
v0 = np.array([126.,198.,241.])
v1 = np.array([84.,180.,235.])
ref = sRgbToLinearRgb(ref)
v0 = sRgbToLinearRgb(v0)
v1 = sRgbToLinearRgb(v1)
alphas0 = (v0 - 255) / (ref - 255)
alphas1 = (v1 - 255) / (ref - 255)
print 'alphas0', alphas0
print 'alphas1', alphas1
r,g,b = ref
r0,g0,b0 = v0
r1,g1,b1 = v1
print 'g1FromR = ', (g*r1 - 255*g + 255*r - 255*r1)/(r - 255)
print 'g1FromB = ', (g*b1 - 255*g + 255*b - 255*b1)/(b - 255)
print 'b1FromR = ', (b*r1 - 255*b + 255*r - 255*r1)/(r - 255)
print 'b1FromG = ', (b*g1 - 255*b + 255*g - 255*g1)/(g - 255)
print 'r1FromG = ', (r*g1 - 255*r + 255*g - 255*g1)/(g - 255)
print 'r1FromB = ', (r*b1 - 255*r + 255*b - 255*b1)/(b - 255)
testWithGamma2()
def testPlotImage():
# image = ndimage.io.imread('../DaltonLensTests/gnuplotLt5CairoCropped.png')
# image = ndimage.io.imread('../DaltonLensTests/gnuplotLt5Cropped.png')
# image = ndimage.io.imread('../DaltonLensTests/gnuplotLt5ScreenCaptureCropped.png')
# image = ndimage.io.imread('../DaltonLensTests/ComplexPlotCropped2.png')
image = ndimage.io.imread('../DaltonLensTests/RandomPlotsCropped.png')
# image = ndimage.io.imread('../DaltonLensTests/XcodeBackground.png')
float_image = skimage.util.dtype.img_as_float(image[:,:,0:3])
npix = float_image.shape[0]*float_image.shape[1]
float_image = float_image.reshape((npix,3))
print float_image.shape
plt.plot (255.0 - float_image[:,0]*255., 255.0 - float_image[:,1]*255., '.')
plt.axis([0, 255, 0, 255])
plt.figure()
plt.plot (255.0 - float_image[:,0]*255., 255.0 - float_image[:,2]*255., '.')
plt.axis([0, 255, 0, 255])
plt.figure()
plt.plot (255.0 - float_image[:,1]*255., 255.0 - float_image[:,2]*255., '.')
plt.axis([0, 255, 0, 255])
plt.figure()
# Algorithm IDEA:
# compute points (R-255, G-255), (R-255, B-255), (G-255, B-255)
# see if they are in the same line as the reference points (fit a line, distance to line < k)
# count the number of discriminant points (max 3). Values very close to 255 for every channel
# are not informative. Informative if min(R,G), min(R,B) or min(G,B) < e.g. 100
# if compatible and informative, mark as definitely a match
# if compatible and one neighbor is a match, accept it too
# (b0*r1 - 255*b0 + 255*r0 - 255*r1)/(r0 - 255)
# ratios = (float_image[:,0]*255. - 255.0001)/(float_image[:,2]*255. - 255.0001)
# plt.plot (np.arange(0, npix, 1), ratios, '.')
# plt.figure()
testPlotImage()
from mpl_toolkits.mplot3d import Axes3D
def plotUncertainty():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
R = np.arange(0.0, 254.0, 1.0)
G = np.arange(0.0, 254.0, 1.0)
R, G = np.meshgrid(R, G)
gamma = ((256.0-R)/(255.0-G)) - ((255.0-R)/(256.0-G))
ax.set_xlabel('R')
ax.set_ylabel('G')
ax.plot_surface(R, G, gamma, color='b')
plotUncertainty()
def gammaOf(R,G):
return ((256.0-R)/(255.0-G)) - ((255.0-R)/(256.0-G))
gammaOf_ufunc = np.frompyfunc(gammaOf, 2, 1)
def distFromOrigin(R,G):
return (255.0-R)/(255.0-G)**2
def plotUncertaintyFromRatio():
R = np.arange(1.0, 254.0, 1.0)
G = np.arange(1.0, 254.0, 1.0)
gamma = gammaOf_ufunc.outer(R,G).flatten()
print np.shape(gamma)
rOverGValues = np.frompyfunc(distFromOrigin, 2, 1).outer(R,G).flatten()
print rOverGValues
print gamma
plt.axis('auto')
plt.plot(rOverGValues.flatten(), gamma.flatten())
plt.figure()
plotUncertaintyFromRatio()
```
| github_jupyter |
```
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import pickle
import re
import shutil
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab
import skimage.io as io
import torch
from mpl_toolkits.axes_grid1 import ImageGrid
from PIL import Image, ImageOps
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def add_border(input_image, border, color):
#img = Image.open(input_image)
if isinstance(border, int) or isinstance(border, tuple):
#debug# print(input_image, border, color)
if not input_image.mode == 'RGB':
rgbimg = Image.new("RGB", input_image.size)
rgbimg.paste(input_image)
input_image = rgbimg
bimg = ImageOps.expand(input_image, border=border, fill=color)
else:
raise RuntimeError('Border is not an integer or tuple!')
return bimg
def combine(query, support):
w1, h1 = query.size
w2, h2 = support.size
if w2 > w1:
support.resize(w1, h2*w1/w2)
w2, h2 = support.size
new_img = Image.new('RGB', (w1, h1+h2))
y_offset = 0
new_img.paste(support, (0, 0))
new_img.paste(query, (0, h2))
return new_img
def combine_images(images):
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_img = Image.new('RGBA', (total_width, max_height), (255, 0, 0, 0))
x_offset = 0
for im in images:
new_img.paste(im, (x_offset, 0))
x_offset += im.size[0]
# patch_box = Rectangle((bbox_x, bbox_y), bbox_w, bbox_h, linewidth=0, linestyle="dashed", alpha=0, facecolor="none")
return new_img
def get_one_croped_image(query_img_id, target_cat=None, cids=None, output_dir="", json_to_cid=None):
support_ids = pair_df[pair_df.img_id ==
query_img_id].support_ann_ids.values[0]
support_anns = [
ann for ann in all_support_anns if ann["id"] in support_ids]
global cat_f
if target_cat:
support_anns = [
ann for ann in support_anns if ann["category_id"] in target_cat]
if cids:
support_anns = [
ann for ann in support_anns if ann["category_id"] in cids]
support_ids = [ann["id"] for ann in support_anns]
# support_cids = [ann["category_id"] for ann in support_anns]
# cropped_boxes = []
return support_anns, support_ids
# for sid, support_ann in zip(support_ids, support_anns):
# img = coco_s.loadImgs(support_ann['image_id'])[0]
# #lvis use the example from train
# img_file = '../datasets/coco/train2017/%s'%img['file_name']
# I = Image.open(img_file)
# [x, y, w, h] = support_ann["bbox"]
# box = [x, y, x+w, y+h]
# cropped_box = I.crop(box)
# box_size = cropped_box.size
# # print(box_size)
# long_side = max(box_size)
# scaling_factor = 300/long_side
# new_size = (int(box_size[0]*scaling_factor), int(box_size[1]*scaling_factor))
# cropped_box=cropped_box.resize(new_size)
# #if support_ann["category_id"] in cat_r:
# color = (255, 0, 0)
# #elif support_ann["category_id"] in cat_c:
# # color = (0,255,0)
# #else:
# # color = (0,0,255)
# # cropped_box = add_border(cropped_box, 5, color)
# # #plt.savefig('{0}/output-{1}-{2}.png'.format(root, img['id'], classes), dpi=300, bbox_inches='tight',pad_inches = 0)
# cropped_boxes.append(cropped_box)
# cropped_box.save("{0}{1}.png".format(output_dir, sid))
# return combine_images(cropped_boxes), support_cids
def get_image_and_th(result, target_cat):
img_score = defaultdict()
img_thresholds = defaultdict()
for item in result:
# consider iouThr 0.5
if target_cat:
E = [i for i in item if i["category_id"] in target_cat]
else:
E = item
if not E:
continue
dtScores = np.concatenate([e['dtScores'] for e in E])
# dtScores = np.stack(E['dtScores'])
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][0] for e in E])[inds]
dtIg = np.concatenate([e['dtIgnore'][0] for e in E])[inds]
#dtm = np.stack([E['dtMatches'][0] for e in E])[inds]
# dtm = np.stack([e[0] for e in E['dtMatches']])[inds]
# dtIg = np.stack([e[0] for e in E['dtIgnore']])[inds]
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps).astype(dtype=np.float)
fp_sum = np.cumsum(fps).astype(dtype=np.float)
tp = np.array(tp_sum)
fp = np.array(fp_sum)
npig = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
beta = 0.5
f1 = ((1+beta**2)*pr*rc)/(beta**2*pr+rc)
if not pr.any() or not rc.any() or np.all(np.isnan(f1)):
continue
import pdb; pdb.set_trace()
precision = np.array(q)
scores = np.array(ss)
mean_precision = np.mean(precision)
mean_precision = np.max(f1)
image_id = [i["image_id"] for i in E][0]
img_score[image_id] = mean_precision
th = dtScoresSorted[np.nanargmax(f1)] - 0.00001
img_thresholds[image_id] = th
return img_score, img_thresholds
def get_resize_from_size(box_size, max_size):
long_side = max(box_size)
scaling_factor = max_size/long_side
new_size = (int(box_size[0]*scaling_factor),
int(box_size[1]*scaling_factor))
return new_size
def save_outputs(imgIds, thresholds, root, cocoGT, cocoDt, target_cat=None,):
json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(coco.getCatIds())
}
if not os.path.exists(root):
os.makedirs(root)
images = cocoDt.loadImgs(imgIds)
counter = 0
for img, th in zip(images, thresholds):
#if counter > 50:
# break
output_dir = root+"/"+str(img['id']) + "/"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# print(output_dir)
annIds = cocoDt.getAnnIds(img['id'], iscrowd=None)
anns = cocoDt.loadAnns(annIds)
if target_cat:
anns = [a for a in anns if a["category_id"] in target_cat]
anns = [a for a in anns if a["score"] > th]
# anns = [a for a in anns if a["category_id"]==1]
if len(anns) == 0:
print("no annotations")
continue
if target_cat:
cids = [ann["category_id"]
for ann in anns if ann["category_id"] in target_cat]
else:
cids = [ann["category_id"] for ann in anns if ann["category_id"]]
#if (not(set(cids)&set(cat_r)) and not(set(cids)&set(cat_c))):
#if not(set(cids)&set(cat_r)):
# continue
#if not len(set(cids)) == 1: # and not len(set(cids)) == 3:
# continue
# print((set(cids)&set(cat_r)),(set(cids)&set(cat_c)))
#if target_cat and (not set(cids) & set(target_cat)):
# print("no target categories")
# continue
if not cids:
print("no target categories")
continue
counter += 1
if target_cat:
anns = [ann for ann in anns if ann["category_id"] in target_cat]
plt.axis('off')
I = Image.open('../datasets/coco/val2017/%s' % img['file_name'])
# from shutil import copyfile
# copyfile('../datasets/coco/val2017/%s'%img['file_name'], output_dir+img['file_name'])
max_size = max(I.size)
w, h = I.size
# if max_size > 1000.0:
# scale = 1000.0/max_size
# w = int(w*scale)
# h = int(h*scale)
# I = I.resize(w, h)
# w, h = I.size
scale = 800/h
w = int(w*scale)
h = int(h*scale)
figsize = (w/300, h/300)
# I = I.resize((w, h))
# plt.autoscale(False)
plt.imshow(I); # plt.axis('off')
plt.savefig(output_dir+img['file_name'], dpi=300,
bbox_inches='tight', pad_inches=0, figsize=figsize)
# draw GT anns
annIds_gt = cocoGt.getAnnIds(img['id'], iscrowd=None)
anns_gt = cocoGt.loadAnns(annIds_gt)
anns_gt = [ann for ann in anns_gt if ann["category_id"] in set(cids)]
# anns_gt = [ann for ann in anns_gt if ann["iscrowd"] == 0]
for i, ann in enumerate(anns_gt):
anns_gt[i]["category_id"] = json_category_id_to_contiguous_id[ann["category_id"]]
coco.showAnns(anns_gt, class_names=class_names,
show_mask=True,
show_bbox=True,
box_width=1,
draw_caption=True,
text_size=10,
)
plt.savefig(output_dir + "gt_" + img['file_name'], dpi=300,
bbox_inches='tight', pad_inches=0, figsize=figsize)
plt.clf()
plt.cla()
plt.axis('off')
plt.imshow(I)
draw_caption=True
# print([ann["category_id"] for ann in anns])
for i, ann in enumerate(anns):
anns[i]["category_id"]=json_category_id_to_contiguous_id[ann["category_id"]]
# print([ann["category_id"] for ann in anns])
# assert False
coco.showAnns(anns, class_names=class_names,
show_mask = True,
show_bbox = True,
box_width = 1,
draw_caption = draw_caption,
text_size = 10
)
support_anns, support_ids= get_one_croped_image(
img['id'],
target_cat,
cids, output_dir,
)
support_cids = [ann["category_id"] for ann in support_anns]
support_cids = [json_category_id_to_contiguous_id[c] for c in support_cids]
classes = [class_names[i] for i in support_cids]
for sid, support_ann, kls in zip(support_ids, support_anns, classes):
img = coco_s.loadImgs(support_ann['image_id'])[0]
img_file = '../datasets/coco/train2017/%s'%img['file_name']
I = Image.open(img_file)
[x, y, w, h] = support_ann["bbox"]
box = [x, y, x+w, y+h]
cropped_box = I.crop(box)
box_size = cropped_box.size
# long_side = max(box_size)
# scaling_factor = 400/long_side
new_size = get_resize_from_size(max_size=400, box_size=box_size)
#(int(box_size[0]*scaling_factor), int(box_size[1]*scaling_factor))
# new_size = (400, 400)
cropped_box=cropped_box.resize(new_size)
cropped_box.save("{0}{1}.png".format(output_dir, kls))
classes = ','.join(classes)
output_file = '{0}pred_{1}_{2}.png'.format(output_dir, img['id'], classes)
plt.savefig(output_file, dpi=300, bbox_inches='tight',pad_inches=0)
# concat_img = combine_images([cropped_box[0], ])
plt.clf()
plt.cla()
plt.close()
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
# annFile = "../datasets/coco/annotations/instances_val2017.json"
# annFile = "../datasets/voc/VOC2012/Annotations/pascal_val2012_instance.json"
# lvis_v0.5_train.json_common_rare
annFile = "../datasets/lvis/annotations/lvis_v0.5_val.json_common_rare"
coco=COCO(annFile)
cats = coco.loadCats(coco.getCatIds())
all_cats=[cat['name'] for cat in cats]
# cat_r = [c["id"] for c in cats if c["frequency"] == "r"]
# cat_c = [c["id"] for c in cats if c["frequency"] == "c"]
# cat_f = [c["id"] for c in cats if c["frequency"] == "f"]
class_names = ["BG"] + all_cats
class_names = [(re.sub(r" ?\([^)]+\)", "", c)) for c in class_names]
annFile_s = annFile
annFile_s = "../datasets/lvis/annotations/lvis_v0.5_train.json_common_rare"
coco_s=COCO(annFile_s)
all_support_anns = coco_s.dataset['annotations']
# all_support_anns = [ann for ann in all_support_anns if ann["category_id"] not in cat_f]
# i = [a["id"] for i, a in enumerate(anns["categories"]) if i % 4 == (split-1)]
# i = [a["id"] for i, a in enumerate(anns["categories"]) if not i % 4 == (split-1)]
cids = coco.getCatIds()
for i in range(1):
#if i < 4:
# target_cat = [k for j, k in enumerate(cids) if not j%4 == (i-1)]
# target_cat = [1, 3, 18, 20]
target_cat = None
#print(target_cat)
#continue
#else:
# target_cat = [j for j in range(1, 81) if j%4 == 0]
# log_folder = "lvis_val_cocostyle_test_{0}".format(i)
# model_path = "../models/coco_{0}_mil12_aff005".format(i)
# log_folder = "inference/coco_2017_val"
model_path = "../models/lvis_mil12_aff005_test"
log_folder = "inference/lvis_val_cocostyle"
pair_df_file = "{0}/features/all_pair_df.pickle".format(model_path)
with open(pair_df_file, "rb") as f:
pair_df = pickle.load(f)
seg_file = "{0}/{1}/segm.json".format(model_path, log_folder)
cocoGt = COCO(annFile)
cocoDt = coco.loadRes(seg_file)
# coco = cocoDt
log_file = model_path + "/" + log_folder + "/coco_evaluate_result.pkl"
if os.path.isfile(log_file):
with open(log_file, 'rb') as handle:
coco_evaluate_result = pickle.load(handle)
else:
# output_folder="../lvis"
ann_type_id=0
split=0
annType = ['segm', 'bbox', 'keypoints']
annType = annType[ann_type_id] # specify type here
prefix = 'instances'
# dataDir = '../datasets/lvis/annotations'
# annFile = '%s/lvis_v0.5_val.json_common_rare' % (dataDir) #
resFile = '%s/%s/%s.json' % (model_path, log_folder, annType)
# targets_fname = "%s/%s/target.pth" % (model_path, log_folder)
# if os.path.isfile(targets_fname):
# targets = torch.load(targets_fname)
# else:
# targets = None
cocoDt = cocoGt.loadRes(resFile)
imgIds = sorted(cocoGt.getImgIds())
imgIds = imgIds
cocoEval = COCOeval(cocoGt, cocoDt, annType) #, split=split, targets=targets
cocoEval.params.imgIds = imgIds
coco_evaluate_result = cocoEval.evaluate_debug()
with open(log_file, "wb") as handle:
pickle.dump(coco_evaluate_result, handle)
recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
R = len(recThrs)
# coco_evaluate_result = [result for result in coco_evaluate_result if result]
_img_score, _img_thresholds = get_image_and_th(coco_evaluate_result, target_cat)
img_ids = np.array([k for k, v in _img_score.items()])
img_scores = np.array([v for k, v in _img_score.items()])
img_thresholds = np.array([v for k, v in _img_thresholds.items()])
inds = np.argsort(-img_scores, kind='mergesort')
inds = inds[0:1000]
img_thresholds = img_thresholds[inds]
img_scores = img_scores[inds]
test_images = img_ids[inds]
img_output_folder = "outputs_lvis_{0}".format(i)
# img_output_folder = img_output_folder + "_" + str(i)
save_outputs(test_images, img_thresholds, img_output_folder, cocoGt, cocoDt, target_cat)
print("done")
len(_img_score)
pair_df_file
```
| github_jupyter |
```
import pandas as pd
import mysql.connector as sql
from geopy.geocoders import Nominatim
import geopy.geocoders
pd.options.display.max_colwidth = 200
indices = ['hoou','oerinfo','hhu','openrub','digill','zoerr', 'tibav', 'oncampus']
col_names = [
"name",
"about",
"author",
"publisher",
"inLanguage",
"accessibilityAPI",
"accessibilityControl",
"accessibilityFeature",
"accessibilityHazard",
"license",
"timeRequired",
"educationalRole",
"alignmentType",
"educationalFramework",
"targetDescription",
"targetName",
"targetURL",
"educationalUse",
"typicalAgeRange",
"interactivityType",
"learningResourceType",
"date_published",
"url",
"thumbnail",
"tags",
"project",
"source",
"spider",
"date_scraped"
]
df = {}
def make_dataframes():
for index in indices:
db_connection = sql.connect(
host='localhost',
database='oerhoernchen_db',
user='oerhoernchen',
password='oerhoernchenpw')
db_cursor = db_connection.cursor()
db_cursor.execute('SELECT * FROM ' + index + '_tb')
table_rows = db_cursor.fetchall()
df[index] = pd.DataFrame(table_rows, columns=col_names)
make_dataframes()
# Geo stuff
geolocator = Nominatim(user_agent="oercrawler")
location_digill = geolocator.geocode("Ruhr-Universität Bochum")
location_hhu = geolocator.geocode("Heinrich-Heine-Universität Düsseldorf")
location_hoou = geolocator.geocode("Hamburg Online University")
location_oerinfo = geolocator.geocode("DIPF Leibniz-Institut für Bildungsforschung und Bildungsinformation")
location_oncampus = geolocator.geocode("Fachhochschule Lübeck")
location_openrub = geolocator.geocode("Ruhr Universität Bochum")
location_tibav = geolocator.geocode("TIB Hannover")
location_zoerr = geolocator.geocode("Universität Tübingen")
def add_location(row):
if row['source'] == "HOOU":
address = location_hoou.address
lat = location_hoou.latitude
lon = location_hoou.longitude
return address, lat, lon
elif row['source'] == "digiLL":
address = location_digill.address
lat = location_digill.latitude
lon = location_digill.longitude
return address, lat, lon
elif row['source'] == "HHU-Mediathek":
address = location_hhu.address
lat = location_hhu.latitude
lon = location_hhu.longitude
return address, lat, lon
elif row['source'] == "OERinfo":
address = location_oerinfo.address
lat = location_oerinfo.latitude
lon = location_oerinfo.longitude
return address, lat, lon
elif row['source'] == "ONCAMPUS":
address = location_oncampus.address
lat = location_oncampus.latitude
lon = location_oncampus.longitude
return address, lat, lon
elif row['source'] == "OpenRub":
address = location_openrub.address
lat = location_openrub.latitude
lon = location_openrub.longitude
return address, lat, lon
elif row['source'] == "TIB-AV-Portal":
address = location_tibav.address
lat = location_tibav.latitude
lon = location_tibav.longitude
return address, lat, lon
elif row['source'] == "ZOERR":
address = location_zoerr.address
lat = location_zoerr.latitude
lon = location_zoerr.longitude
return address, lat, lon
def write_location_to_df():
for index in indices:
df[index]['location'] = df[index].apply(add_location, axis=1)
df[index]['lat_lon'] = df[index]['location'].apply(lambda x: (x[-2], x[-1]))
df[index].drop(columns=['location'], inplace=True)
df[index]['lat_lon'] = df[index]['lat_lon'].astype(str)
df[index]['lat_lon'] = df[index]['lat_lon'].str.replace("(", "").str.replace(")", "")
write_location_to_df()
# Import dataframe into MySQL
import sqlalchemy
database_username = 'root'
database_password = 'my-secret-pw'
database_ip = 'localhost:3306'
database_name = 'oerhoernchen_db'
engine = sqlalchemy.create_engine('mysql+mysqlconnector://{0}:{1}@{2}/{3}'.format(database_username, database_password, database_ip, database_name))
for index in indices:
df[index].to_sql(name=index + "_tb_edited", con=engine, index=False, if_exists='replace')
# How to read in with sqlalchemy
df2 = pd.read_sql_table("hhu_tb", engine)
df2
import pandas as pd
df = pd.read_sql_table("indices", engine)
df
SITEMAPS = [{'name': 'HOOU', 'url': 'www.hoou.de/sitemap.xml', 'read': 1}, {'name': 'oer_oerinfo', 'url': 'www.oerinfo.de/sitemap.xml', 'read': 1}]
df_list = pd.DataFrame(SITEMAPS)
df_list
```
| github_jupyter |
```
import pandas as pd
pd.set_option("display.max_columns", None)
marine = pd.read_csv('data_processed/MarineLitterWatch_totals_by_category.csv')
tides = pd.read_csv('data_processed/TIDES_earth_totals_by_category.csv')
mdmap = pd.read_csv('data_processed/mdmap_totals_by_category.csv')
mdmap['Dataset'] = 'MDMAP'
tides['Dataset'] = 'TIDES'
print(mdmap.shape)
print(tides.shape)
print(marine.shape)
total_row_count = mdmap.shape[0] + tides.shape[0] + marine.shape[0]
print(total_row_count)
```
### preparing the marine data
```
marine.head(5)
# add 60,000 to uniqueID to make sure all the values from different files are unique.
marine['UniqueID'] = marine['UniqueID'] + 60000
marine.head(5)
#funtion to set the format of the date in marine dataset
def marDate(s):
s = str(s)
return(s[4:6]+'-'+s[6:] +'-'+s[0:4])
marine.Date = marine.Date.apply(marDate)
marine.Date.head()
marine.head()
```
### preparing the tides data
```
tides.head(5)
# splitting the Date column to get month, year and Date in different column and then joining it back to the dataframe.
tidesTemp = tides['Cleanup Date'].str.split('-', expand = True)
tides = tides.join(tidesTemp)
tides.head(5)
# renaming the columns to make sure they are consistant in all the 3 datasets.
tides = tides.rename(columns = {'Cleanup ID': 'UniqueID',
'Cleanup Date': 'Date',
'People': 'Total Volunteers',
'Total Items Collected': 'Total Item Count',
'LAT': 'Latitude 1',
'LONG': 'Longitude 1',
'Fishing Gear': 'Fishing Gear Raw Count',
'Glass': 'Glass Raw Count',
'Metal': 'Metal Raw Count',
'Other': 'Other Raw Count',
'Plastic': 'Plastic Raw Count',
'Processed Lumber':'Processed Lumber Raw Count',
'Rubber': 'Rubber Raw Count',
'Meters': 'Total Area',
0: 'Year',
1: 'Month',
2: 'Day'})
tides['Total Area Unit'] = 'meters'
#function to set date format in tides data
def tidDate(s):
s =str(s)
return(s[5:7]+'-'+s[-2:]+ '-'+ s[0:4])
tides.Date = tides.Date.apply(tidDate)
tides.Date.head()
tides.head()
```
### prepping the data for mdmap
```
mdmap.head(5)
# splitting the Date column to get month, year and Date in different column and then joining it back to the dataframe.
mdmapTemp = mdmap['Date'].str.split('-', expand = True)
mdmap = mdmap.join(mdmapTemp)
#renaming the columns to make sure they are consistant in all the 3 datasets.
mdmap = mdmap.rename(columns= {'Latitude Start': 'Latitude 1',
'Latitude End': 'Latitude 2',
'Longitude Start': 'Longitude 1',
'Longitude End': 'Longitude 2',
'Total Debris': 'Total Item Count',
'Cloth': 'Cloth Raw Count',
'Fishing Gear': 'Fishing Gear Raw Count',
'Glass': 'Glass Raw Count',
'Metal': 'Metal Raw Count',
'Other': 'Other Raw Count',
'Plastic': 'Plastic Raw Count',
'Processed Lumber': 'Processed Lumber Raw Count',
'Rubber': 'Rubber Raw Count',
0: 'Month',
1: 'Day',
2: 'Year',
'UniqueId': 'UniqueID',
'TotalArea': 'Total Area'})
mdmap['Total Area Unit'] = 'square meters'
mdmap.head()
# combining the 3 dataframes into a single dataframe
frames = [marine, tides, mdmap]
result = pd.concat(frames)
result.shape
result.head(5)
#list of columns to be removed from the results dataframe.
columns_to_keep = ['Dataset', 'UniqueID', 'Survey ID', 'Survey Year', 'Date', 'Day', 'Month', 'Year', 'Organization', 'Total Volunteers', 'Latitude 1', 'Longitude 1', 'Latitude 2', 'Longitude 2', 'Country', 'State', 'Total Area', 'Total Area Unit', 'Fishing Gear Raw Count', 'Glass Raw Count', 'Metal Raw Count', 'Other Raw Count', 'Plastic Raw Count', 'Processed Lumber Raw Count', 'Rubber Raw Count', 'Total Item Count', 'Pounds']
final_df = result[columns_to_keep]
final_df['Total Area'] = round(final_df['Total Area'], 2)
final_df.head()
set(final_df.Dataset)
len(final_df['UniqueID'].unique())
final_df.to_csv('data_processed/concatenated_datasets_all.csv', index=False)
final_df_gps_coords = final_df.dropna(subset=['Latitude 1', 'Longitude 1'])
final_df_gps_coords.head()
len(final_df_gps_coords['UniqueID'].unique())
final_df.to_csv('data_processed/concatenated_datasets_with_gps_coords.csv', index=False)
```
| github_jupyter |
* Hyperparameter tuning of the 5 classifiers for emotional state detection
* 5 fold cross validation with grid-search
* Multiclass classification
```
import pandas as pd
import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from pprint import pprint
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.feature_selection import SelectFromModel,RFECV
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score, PredefinedSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn import metrics
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from imblearn.over_sampling import SVMSMOTE
from imblearn.combine import SMOTEENN
from imblearn.combine import SMOTETomek
pd.options.mode.chained_assignment = None
import re
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#warnings.filterwarnings('always')
import pickle
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from imblearn.metrics import specificity_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import make_scorer, f1_score, roc_auc_score, precision_score, recall_score, confusion_matrix
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier, Pool, cv
from sklearn.neural_network import MLPClassifier
#from pandas_ml import ConfusionMatrix
#import collections
def read_input(p):
#Read input file of each person
filename='data/NonOverlap_w5_emoChange_SelFeat_data_p'+str(p)+'.csv'
raw_df= pd.read_csv(filename)
#drop all variables that contain all NANs
raw_df.dropna(axis=1,how='all', inplace=True)
#reset the index
raw_df.reset_index(drop=True, inplace=True)
#drop columns with all zeros in pandas dataframe
raw_df=raw_df.T[(raw_df!=0).any()].T
#print("The shape of the dataframe is ",raw_df.shape)
#print(raw_df['emotion'].value_counts())
return raw_df
# replace NANs with -999
def prep_data(data):
return data.fillna(-999)
#drop columns
def drop_cols(data, col_list):
return data.drop(col_list, axis=1)
# normalize data with minmax
def scale_data(trn_x, tst_x):
sc= StandardScaler()
scaled_trn_x = sc.fit_transform(trn_x)
scaled_tst_x = sc.fit_transform(tst_x)
return scaled_trn_x, scaled_tst_x
# oversampling with SMOTE with 'minority' and 'not majority'
def over_sample_SMOTE(X_train, y_train):
sm=SMOTE(sampling_strategy='not majority', random_state=10) # 'minority'
X_train_ovr, y_train_ovr=sm.fit_sample(X_train, y_train)
#print(X_train_ovr.shape, y_train_ovr.shape)
return X_train_ovr, y_train_ovr
# oversampling with SVMSMOTE
def over_sample_SVMSMOTE(X_train, y_train):
sm=SVMSMOTE(random_state=10)
X_train_ovr, y_train_ovr=sm.fit_sample(X_train, y_train)
#print(X_train_ovr.shape, y_train_ovr.shape)
return X_train_ovr, y_train_ovr
def select_k_features(X_train_scaled,X_test_scaled,y_train,k):
selection = SelectKBest(mutual_info_classif, k)
X_train = selection.fit_transform(X_train_scaled,y_train)
X_test = selection.transform(X_test_scaled)
return X_train, X_test
# define random state to re-generate the same result
random_state = 43
# total persons
p_list=[8, 10,12,13,15,20,21,25, 27, 33,35,40,46,48,49,52,54,55]
#p_list=[8]
# #of folds
n_fold=5
def print_results(accu, bl_accu, prec, rec_, spec_, roc_, f1_):
print('.....................')
print("Average Accuracy: %.2f%% (%.2f)" % (np.mean(accu), np.std(accu)))
print("Average Balanced_accuracy: %.2f%% (%.2f)" % (np.mean(bl_accu),np.std(bl_accu)))
print("Average Precision: %.2f%% (%.2f)" % (np.mean(prec),np.std(prec)))
print("Average Recall: %.2f%% (%.2f)" % (np.mean(rec_),np.std(rec_)))
print("Average Specificity: %.2f%% (%.2f)" % (np.mean(spec_),np.std(spec_)))
print("Average ROC AUC: %.2f%% (%.2f)" % (np.mean(roc_),np.std(roc_)))
print("Average F1 score: %.2f%% (%.2f)" % (np.mean(f1_),np.std(f1_)))
print('..................................................')
print('\n')
pipe = Pipeline([('scaler', StandardScaler()), # MinMaxScaler()
('selector', SelectKBest(mutual_info_classif, k=90)), #
('classifier', LogisticRegression())])
search_space = [{'selector__k': [ 50, 70, 90]},
{'classifier': [LogisticRegression(solver='lbfgs')],
'classifier__C': [0.01, 0.1, 1.0],
'classifier__penalty': ['l1', 'l2', None],
'classifier__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'classifier__max_iter':[100, 150, 200],
'classifier__class_weight':[None, 'balanced']},
{'classifier': [RandomForestClassifier()],
'classifier__max_depth': [5, 10, 30, None],
'classifier__criterion':['gini','entropy'],
'classifier__bootstrap': [True],
'classifier__max_features':['log2', None],
'classifier__n_estimators': [50, 100, 200, 300, 400]},
{'classifier': [MLPClassifier(random_state=1, early_stopping=True)],
'classifier__hidden_layer_sizes' : [(50, 50, 50), (50, 100, 50), (20, 20, 20), (30, ), (50,),(100,)],
'classifier__activation' : ['tanh', 'relu', 'logistic'],
'classifier__max_iter':[50, 100, 150, 200, 300],
'classifier__solver': ['sgd', 'adam', 'lbfgs'],
'classifier__alpha': [0.0001, 0.001, 0.05]},
{'classifier': [CatBoostClassifier(random_seed=1)],
'classifier__learning_rate': [0.05, 0.1, 0.15, 0.2]},
{'classifier': [XGBClassifier(objective='binary:logistic', random_state=1)],
'classifier__learning_rate': [0.05, 0.1, 0.15, 0.2],
'classifier__colsample_bytree':[.5, .75, 1],
'classifier__max_depth': np.arange(3, 6, 10),
'classifier__n_estimators': [50, 100, 200, 300, 400]}]
scorer = make_scorer(f1_score, average = 'binary')
LR_pipe = Pipeline([('scaler', StandardScaler()), # MinMaxScaler()
('selector', SelectKBest(mutual_info_classif, k=90)), #
('classifier', LogisticRegression())])
LR_search_space = [{'selector__k': [ 50, 70, 90, 110]},
{'classifier': [LogisticRegression(solver='lbfgs')],
'classifier__C': [0.01, 0.1, 1.0],
'classifier__penalty': ['l1', 'l2', None],
'classifier__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'classifier__max_iter':[100, 150, 200],
'classifier__class_weight':[None, 'balanced']}]
################################################################################
RF_pipe = Pipeline([('scaler', StandardScaler()), # MinMaxScaler()
('selector', SelectKBest(mutual_info_classif, k=90)), #
('classifier', RandomForestClassifier())])
RF_search_space = [{'selector__k': [ 50, 70, 90, 110]},
{'classifier': [RandomForestClassifier()],
'classifier__max_depth': [5, 10, 30, None],
'classifier__criterion':['gini','entropy'],
'classifier__bootstrap': [True],
'classifier__max_features':['log2', None],
'classifier__n_estimators': [50, 100, 200, 300, 400]}]
################################################################################
MLP_pipe = Pipeline([('scaler', StandardScaler()), # MinMaxScaler()
('selector', SelectKBest(mutual_info_classif, k=90)), #
('classifier', MLPClassifier(random_state=1, early_stopping=True))])
MLP_search_space = [{'selector__k': [ 50, 70, 90, 110]},
{'classifier': [MLPClassifier(random_state=1, early_stopping=True)],
'classifier__hidden_layer_sizes' : [(50, 50, 50), (50, 100, 50), (20, 20, 20), (30, ), (50,),(100,)],
'classifier__activation' : ['tanh', 'relu', 'logistic'],
'classifier__max_iter':[50, 100, 150, 200, 300],
'classifier__solver': ['sgd', 'adam', 'lbfgs'],
'classifier__alpha': [0.0001, 0.001, 0.05]}]
################################################################################
CB_pipe = Pipeline([('scaler', StandardScaler()), # MinMaxScaler()
('selector', SelectKBest(mutual_info_classif, k=90)), #
('classifier', CatBoostClassifier(random_seed=1))])
CB_search_space = [{'selector__k': [ 50, 70, 90, 110]},
{'classifier': [CatBoostClassifier(random_seed=1, verbose=False)],
'classifier__learning_rate': [0.05, 0.1, 0.15, 0.2]}]
################################################################################
XGB_pipe = Pipeline([('scaler', StandardScaler()), # MinMaxScaler()
('selector', SelectKBest(mutual_info_classif, k=90)), #
('classifier', XGBClassifier(objective='binary:logistic', random_state=1))])
XGB_search_space = [{'selector__k': [ 50, 70, 90, 110]},
{'classifier': [XGBClassifier(objective='binary:logistic', random_state=1)],
'classifier__learning_rate': [0.05, 0.1, 0.15, 0.2],
'classifier__colsample_bytree':[.5, .75, 1],
'classifier__max_depth': np.arange(3, 6, 10),
'classifier__n_estimators': [50, 100, 200, 300, 400]}]
def grid_search_wrapper(pipe = pipe, search_space = search_space, verbose= False,refit_score=scorer):
"""
fits a GridSearchCV classifiers using refit_score for optimization
prints classifier performance metrics
"""
cross_validation = StratifiedKFold(n_splits=5, shuffle=True, random_state=random_state)
grid_search = GridSearchCV(pipe, search_space, cv=cross_validation, verbose=verbose, n_jobs = -1) #scoring=scorer, refit=scorer
grid_search.fit(X, y)
return grid_search
p_list=[8, 10,12,13,15,20,21,25, 27, 33,35,40,46,48,49,52,54,55]
best_LR = []
best_RF = []
best_MLP = []
best_XGB = []
best_CB = []
# for each person in the dataset, find the best hyperparamters for the model in given range
for p in p_list:
df=read_input(p)
#df.head()
df=prep_data(df)
# remove day_of_month variable if present in data
if 'day_of_month' in df.columns:
drop_col=['day_of_month']
df=drop_cols(df, drop_col)
#remove classes that have less then 5 samples
min_c=df['emotion_change'].value_counts()
if (min_c <= 5).any():
r_label=min_c[min_c <= 5].index[0]
df = df.drop(df.index[df.emotion == r_label])
dataset = df
y = dataset['emotion_change'].copy()
X = dataset.loc[:, dataset.columns != 'emotion_change'].copy()
#X = X.apply(pd.to_numeric)
#perform grid searc and find the best models
print("Person "+str(p))
print("-------------------------------------------------------")
pipeline_grid_search_LR = grid_search_wrapper(pipe = LR_pipe, search_space = LR_search_space, verbose=2)
print(pipeline_grid_search_LR.best_estimator_)
print(pipeline_grid_search_LR.best_score_)
best_LR.append(pipeline_grid_search_LR.best_estimator_)
print("-------------------------------------------------------")
pipeline_grid_search_RF = grid_search_wrapper(pipe = RF_pipe, search_space = RF_search_space, verbose=2)
print(pipeline_grid_search_RF.best_estimator_)
print(pipeline_grid_search_RF.best_score_)
best_RF.append(pipeline_grid_search_RF.best_estimator_)
print("-------------------------------------------------------")
pipeline_grid_search_MLP = grid_search_wrapper(pipe = MLP_pipe, search_space = MLP_search_space, verbose=2)
print(pipeline_grid_search_MLP.best_estimator_)
print(pipeline_grid_search_MLP.best_score_)
best_MLP.append(pipeline_grid_search_MLP.best_estimator_)
print("-------------------------------------------------------")
pipeline_grid_search_XGB = grid_search_wrapper(pipe = XGB_pipe, search_space = XGB_search_space, verbose=2)
print(pipeline_grid_search_XGB.best_estimator_)
print(pipeline_grid_search_XGB.best_score_)
best_XGB.append(pipeline_grid_search_XGB.best_estimator_)
print("-------------------------------------------------------")
pipeline_grid_search_CB = grid_search_wrapper(pipe = CB_pipe, search_space = CB_search_space, verbose=False)
print(pipeline_grid_search_CB.best_estimator_)
print(pipeline_grid_search_CB.best_score_)
best_CB.append(pipeline_grid_search_CB.best_estimator_)
print("-------------------------------------------------------")
best_models = {} # dictionary of best models with best parameters
best_models['Logistic Regression'] = best_LR
best_models['RandomForest Classifier'] = best_RF
best_models['MLP Classifier'] = best_MLP
best_models['XGBoost Classifier'] = best_XGB
best_models['CatBoost Classifier'] = best_CB
#save the best models
np.save('Indiv_best_models.npy', best_models)
# Load the best models
best_models = np.load('Indiv_best_models.npy',allow_pickle='TRUE').item()
# this is to get all the detailed performance meterics after selecting the best model parameters
nfolds = 5
skf=StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=random_state)
for model_name, model in best_models.items():
accu = []
prec = []
rec_ = []
f1_ = []
bl_accu = []
roc_ = []
spec_ = []
model_index = -1
for p in p_list:
model_index = model_index + 1
model__ = model[model_index]
clf = model__['classifier']
if model_name == 'CatBoost Classifier':
clf = CatBoostClassifier(random_seed=1, verbose=False,learning_rate= 0.1)
df=read_input(p)
#df.head()
df=prep_data(df)
# remove day_of_month variable if present in data
if 'day_of_month' in df.columns:
drop_col=['day_of_month']
df=drop_cols(df, drop_col)
#remove classes that have less then 5 samples
min_c=df['emotion_change'].value_counts()
if (min_c <= 5).any():
r_label=min_c[min_c <= 5].index[0]
df = df.drop(df.index[df.emotion == r_label])
dataset = df
y = dataset['emotion_change'].copy()
X = dataset.loc[:, dataset.columns != 'emotion_change'].copy()
avg_ac=0.0
avg_bl_ac=0.0
avg_rc=0.0
avg_pr=0.0
avg_f1=0.0
avg_spec=0.0
avg_roc=0.0
avg_kp=0.0
i = 1
for train_index, test_index in skf.split(X ,y):
#print("fold", i)
i+=1
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
#scale features
X_train_scaled, X_test_scaled= scale_data(X_train, X_test)
#feature selection
X_train, X_test = select_k_features(X_train_scaled,X_test_scaled,y_train,k=int(str(model__['selector'])[14:16]))
#oversample training data
#X_train_imb,y_train_imb=over_sample_SMOTE(X_train, y_train)
#X_train_imb,y_train_imb=over_sample_SMOTENC(X_train, y_train, index1, index2)
#X_train_imb,y_train_imb=over_sample_SVMSMOTE(X_train, y_train)
# train model on imbalance-handled data
#clf.fit(X_train_imb, y_train_imb)
#train model on imbalance data
clf.fit(X_train, y_train)
# test model, measure class label and probability score
y_pred = clf.predict(X_test)
y_scores = clf.predict_proba(X_test)[:,1]
#calculate metrices
accuracy = accuracy_score(y_test, y_pred)
bl_accuracy = balanced_accuracy_score(y_test, y_pred)
precision=precision_score(y_test, y_pred, labels=np.unique(y_pred))
recall=recall_score(y_test, y_pred, labels=np.unique(y_pred))
f1=f1_score(y_test, y_pred, labels=np.unique(y_pred))
roc=roc_auc_score(y_test, y_scores, labels=np.unique(y_pred))
spec=specificity_score(y_test, y_pred ,labels=np.unique(y_pred))
ac=accuracy * 100.0
pr=precision*100
rc=recall*100
f1_p=f1*100
bl_ac=bl_accuracy*100
roc=roc*100
spec=spec*100
#update average metrices in each fold
avg_ac+=ac
avg_bl_ac+=bl_ac
avg_rc+=rc
avg_pr+=pr
avg_f1+=f1_p
avg_roc+=roc
avg_spec+=spec
avg_ac = avg_ac/nfolds
avg_bl_ac = avg_bl_ac/nfolds
avg_rc = avg_rc/nfolds
avg_pr = avg_pr/nfolds
avg_f1 = avg_f1/nfolds
avg_roc = avg_roc/nfolds
avg_spec = avg_spec/nfolds
accu.append(avg_ac)
prec.append(avg_pr)
rec_.append(avg_rc)
f1_.append(avg_f1)
bl_accu.append(avg_bl_ac)
roc_.append(avg_roc)
spec_.append(avg_spec)
print('Restuls for: ', model_name)
print_results(accu, bl_accu, prec, rec_, spec_, roc_, f1_)
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Keras
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Читай на TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ru/guide/keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Запусти в Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ru/guide/keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Изучай код на GitHub</a>
</td>
</table>
Note: Вся информация в этом разделе переведена с помощью русскоговорящего Tensorflow сообщества на общественных началах. Поскольку этот перевод не является официальным, мы не гарантируем что он на 100% аккуратен и соответствует [официальной документации на английском языке](https://www.tensorflow.org/?hl=en). Если у вас есть предложение как исправить этот перевод, мы будем очень рады увидеть pull request в [tensorflow/docs](https://github.com/tensorflow/docs) репозиторий GitHub. Если вы хотите помочь сделать документацию по Tensorflow лучше (сделать сам перевод или проверить перевод подготовленный кем-то другим), напишите нам на [docs@tensorflow.org list](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
Keras - это высокоуровневый API для создания моделей глубокого обучения.
Он используется для быстрого создания прототипов, сложных исследований,
а также для создания приложений. Три ключевые примущества Keras API:
- *Простота в использовании*<br>
Keras имеет простой интерфейс, оптимизированный для большинства
распространенных задач глубокого обучения. Также он дает конкретные подсказки как быстро
исправить возможные ошибки
- *Модульность*<br>
Модели Keras строятся при помощи объединения нескольких простых модулей, каждый из которых
может быть настроен независимо, и не накладывает каких либо значительных ограничений
- *Легко расширить модель*<br> Ты можешь создавать свои собственные модули,
чтобы свободно выражать свои идеи для исследования. Создавай новые слои,
функции потерь и разрабатывай современные модели глубокого обучения
## Импортируем tf.keras
`tf.keras` - это реализация [спецификации Keras API](https://keras.io){:.external} в TensorFlow.
Это высокоуровневый API для построения
моделей глубокого обучения с первоклассной поддержкой
функционала TensorFlow, например [eager execution](#eager_execution),
конвееры `tf.data` и алгоритмы оценки [Estimators](./estimators.md).
`tf.keras` делает TensorFlow простым в использовании, не теряя в гибкости и производительности.
Чтобы начать, просто импортируй `tf.keras` после TensorFlow в начале кода:
```
# Используй pyyaml если будешь сохранять в формате YAML
!pip install -q pyyaml
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import layers
print(tf.VERSION)
print(tf.keras.__version__)
```
`tf.keras` может запускать любой совместимый с Keras код, но тем не менее помни:
* Версия `tf.keras` в последнем релизе TensorFlow может быть не самой
последнией версией `keras`, доступной в PyPI. Всегда проверяй версию при помощи `tf.keras.__version__`
* Когда ты [сохраняешь только веса модели](#weights_only), `tf.keras` по умолчанию
сохраняет их в [формате контрольной точки](./checkpoints.md). Используй `save_format='h5'`
чтобы сохранять как HDF5
## Построим простую модель
### Последовательная модель
В Keras мы используем *слои* для построения *моделей*. Обычно модель - это граф, состоящий из нескольких слоев. Самый распространенный тип модели это стэк идущих друг за другом слоев - последовательная модель `tf.keras.Sequential`.
Для начала давай построим простую полносвязную сеть (или многослойный перцептрон):
```
model = tf.keras.Sequential()
# Добавим в нашу модель слой Dense из 64 блоков:
model.add(layers.Dense(64, activation='relu'))
# И еще один:
model.add(layers.Dense(64, activation='relu'))
# Также добавим слой softmax с 10 выводящими блоками:
model.add(layers.Dense(10, activation='softmax'))
```
### Конфигурация слоев
Существует много разных слоев `tf.keras.layers` с общими параметрами конфигурации:
* `activation`: Устанавливает функцию активации для данного слоя. Этот параметр должен указываться в имени встроенной функции или использоваться как вызываемый объект. По умолчанию активация не используется
* `kernel_initializer` и `bias_initializer`: Инициализация, которая создает веса данного слоя (ядро kernel и смещение bias). Этот параметр используется как имя или вызываемый объект. По умолчанию используется инициализатор `"Glorot uniform"`
* `kernel_regularizer` и `bias_regularizer`: Регуляризация, которая применяется к весам слоя (ядро kernel и смещение bias), например L1 или L2. По умолчанию не используется
Следующий код используется для построения `tf.keras.layers.Dense` с настройкой конфигурации каждого слоя:
```
# Создаем сигмоидный слой:
layers.Dense(64, activation='sigmoid')
# Или:
layers.Dense(64, activation=tf.sigmoid)
# Линейный слой с регуляризацией L1 с коэффицентом 0.01 примененная к ядру матрицы:
layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))
# Линейный слой с регуляризацией L2 с коэффицентом 0.01 примененная к вектору смещения (bias):
layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01))
# Линейный слой с ядром, инициализированным в случайной прямоугольный матрице:
layers.Dense(64, kernel_initializer='orthogonal')
# Линейный слой с вектором смещения, инициализированным с коэффицентом 2:
layers.Dense(64, bias_initializer=tf.keras.initializers.constant(2.0))
```
## Обучение и оценка
### Настроим конфигурацию обучения
После того как модель построена давай обозначим процесс обучения с помощью метода `compile`:
```
model = tf.keras.Sequential([
# Добавим в нашу модель слой `Dense` из 64 блоков:
layers.Dense(64, activation='relu', input_shape=(32,)),
# И еще один:
layers.Dense(64, activation='relu'),
# Также добавим слой softmax с 10 выводящими блоками:
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
```
У `tf.keras.Model.compile` есть три важных аргумента:
* `optimizer`: Этот объект определяет процедуру обучения. Мы будем использовать оптимизаторы из модуля `tf.train`, например такие как `tf.train.AdamOptimizer`, `tf.train.RMSPropOptimizer` и `tf.train.GradientDescentOptimizer`
* `loss`: Эта функция минимизации для решения задач оптимизации. Самыми распространенными выборами этого аргумента являются среднеквадратическое отклонение (`mse`), `categorical_crossentropy`, и `binary_crossentropy`. Функции потерь обозначены по имени или используются как вызываемые объекты из модуля `tf.keras.losses`
* `metrics`: Используются для наблюдения за процессом обучения. Используются как строки или вызываемые объекты модуля `tf.keras.metrics`
Ниже пример конфигурации модели для обучения:
```
# Настраиваем регрессию модели, используем среднеквадратическое отклонение
model.compile(optimizer=tf.train.AdamOptimizer(0.01),
loss='mse', # среднеквадратическое отклонение
metrics=['mae']) # средняя абсолютная ошибка
# Настраиваем модель для классификации по категориям
model.compile(optimizer=tf.train.RMSPropOptimizer(0.01),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_accuracy])
```
### Используем данные NumPy
При работе с небольшими датасетами мы будем использовать загружаемые в память массивы данных [NumPy](https://www.numpy.org/){:.external} для обучения и оценки моделей. Модель будет "подстраиваться" под тренировочные данные при помощи метода `fit`:
```
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, epochs=10, batch_size=32)
```
У `tf.keras.Model.fit` есть три важных аргумента:
* `epochs`: Процесс обучения структурирован по *эпохам*. Эпоха означает один проход по всему набору входных данных, который происходит небольшими порциями в батчах
* `batch_size`: Когда мы используем массивы NumPy модель делит данные на небольшие батчи и затем выполняет указанное количество проходов. Это число определяет размер батча. Помни, что последний батч может быть меньше если общее количество образцов данных не делится на размер батча
* `validation_data`: Когда мы строим прототип модели, мы хотим легко наблюдать за ее точностью на проверочных данных. При помощи данного аргумента - обычно кортеж или метка - модель будет отображать потери и статистику в режиме выводов inference для прошедших через модель данных в конце каждой эпохи
Вот пример использования `validation_data`:
```
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
val_data = np.random.random((100, 32))
val_labels = np.random.random((100, 10))
model.fit(data, labels, epochs=10, batch_size=32,
validation_data=(val_data, val_labels))
```
### Используем датасеты tf.data
Чтобы обучать модель на больших датасетах или на нескольких устройствах мы можем воспользоваться [Datasets API](./datasets.md). Просто добавь `tf.data.Dataset` к методу `fit`:
```
# Инициализируем пробную инстанцию датасета:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
# Не забудь указать количество шагов в каждой эпохе `steps_per_epoch` при использовании метода `fit`
model.fit(dataset, epochs=10, steps_per_epoch=30)
```
Таким образом, метод `fit` использует аргумент `steps_per_epoch` - это количество шагов обучения, которые модель должна сделать прежде, чем перейти на следующую эпоху. Поскольку `Dataset` принимает батчи данных, то в этом примере кода нам не требуется указывать размер батча в `batch_size`.
Также `dataset` можно использовать для проверки точности модели:
```
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32).repeat()
val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
val_dataset = val_dataset.batch(32).repeat()
model.fit(dataset, epochs=10, steps_per_epoch=30,
validation_data=val_dataset,
validation_steps=3)
```
### Оценка и предсказание
Методы `tf.keras.Model.evaluate` и `tf.keras.Model.predict` могут использовать данные NumPy и `tf.data.Dataset`.
Используй следующий пример кода для оценки потерь и других показателей предсказаний на использованных данных:
```
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.evaluate(data, labels, batch_size=32)
model.evaluate(dataset, steps=30)
```
Для *предсказания* вывода последнего слоя как массив NumPy, используй следующий код:
```
result = model.predict(data, batch_size=32)
print(result.shape)
```
## Построение сложных моделей
### Функциональный API
Последовательная модель `tf.keras.Sequential` представляет из себя обычное наложение или стек слоев, которые не могут представлять произвольные модели. Используй
[функциональный API Keras ](https://keras.io/getting-started/functional-api-guide/){:.external} для построения комплексных топологий моделей, например таких как:
* Модели с множественными входами данных
* Модели с множественными выводами данных
* Модели с общими слоями, где один и тот же слой вызывается несколько раз
* Модели с непоследовательным потоком данных, например где есть остаточные связи
Построение модели при помощи функционального API выглядит следующим образом:
1. Слой вызывается и возвращает тензор
2. Для определения `tf.keras.Model` используем входные и выводящие тензоры
3. Модель обучается точно так же, как и `Sequential`
Следующий пример показывает как использовать функциональный API для построения простой полносвязной сети:
```
# Возвращает тензор-"заполнитель", который мы используем в качестве примера
inputs = tf.keras.Input(shape=(32,))
# Вызываемый на тензор слой, возвращает тензор
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
predictions = layers.Dense(10, activation='softmax')(x)
```
Обозначим вход и вывод данных нашей модели:
```
model = tf.keras.Model(inputs=inputs, outputs=predictions)
# При помощи метода `compile` настраиваем конфигурацию обучения
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Тренируем модель в течение 5 эпох
model.fit(data, labels, batch_size=32, epochs=5)
```
### Создание подклассов моделей
Также ты можешь создать модель с нуля при помощи подклассов в `tf.keras.Model` и определения собственных прямых проходов. Создавай слои в методе `__init__` и установи их как атрибуты класса. Прямой проход должен быть указан в методе `call`.
Создание подклассов моделей особенно полезно, когда активирован [eager execution](./eager.md), так как прямой проход в этом случае всегда будет записан.
Ключевой момент: всегда используй правильный API для решения конкретной задачи. Поскольку создание подклассов модели предполагает определенную гибкость, эта гибкость осложняет определение структуры и может повлечь больше ошибок у пользователя. Если возможно, то всегда старайся использовать функциональный API.
Смотри следующий пример кода, в котором мы будем использовать подклассы `tf.keras.Model` и собственный прямой проход:
```
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# Здесь определим слои
self.dense_1 = layers.Dense(32, activation='relu')
self.dense_2 = layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
# А здесь укажем прямой проход,
# используя указанные выше слои (в `__init__`).
x = self.dense_1(inputs)
return self.dense_2(x)
def compute_output_shape(self, input_shape):
# Если ты хочешь использовать подклассы модели
# в функциональном стиле, тогда эта функция будет переписана
# во время запуска кода. В остальных случаях этот метод необязателен.
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_classes
return tf.TensorShape(shape)
```
Укажем класс новой модели:
```
model = MyModel(num_classes=10)
# При помощи метода `compile` настраиваем конфигурацию обучения
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Обучаем в течение 5 эпох
model.fit(data, labels, batch_size=32, epochs=5)
```
### Создание собственных слоев
Чтобы создать свой собственный слой при помощи подклассов `tf.keras.layers.Layer` нам потребуются следующие методы:
* `build`: Создает веса слоя. Чтобы добавить веса в модель используй метод `add_weight`
* `call`: Определяет прямой проход
* `compute_output_shape`: Определяет как вычислить выводящую форму слоя для данной входной формы
* Также слой можно сериализировать при помощи метода `get_config` и метода класса `from_config`
Вот пример использования нового слоя, в котором мы использовали `matmul` входа с матрицей ядра `kernel`:
```
class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
# Создаем обучаемую переменную весов для этого слоя
self.kernel = self.add_weight(name='kernel',
shape=shape,
initializer='uniform',
trainable=True)
# Обязательно вызови метод `build` в конце
super(MyLayer, self).build(input_shape)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return tf.TensorShape(shape)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
```
Теперь создадим модель, используя наш новый слой:
```
model = tf.keras.Sequential([
MyLayer(10),
layers.Activation('softmax')])
# При помощи метода `compile` настраиваем конфигурацию обучения
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Обучаем в течение 5 эпох
model.fit(data, labels, batch_size=32, epochs=5)
```
## Функции обратного вызова
Функция обратного вызова `callback` - это объект, который передается модели для обработки и расширения ее поведения во время обучения. Ты можешь написать свою собственную функцию callback, или использовать готовые `tf.keras.callbacks` в которые входят:
* `tf.keras.callbacks.ModelCheckpoint`: Сохраняет контрольные точки твоей модели через заданный интервал
* `tf.keras.callbacks.LearningRateScheduler`: Замедляет темп обучения `learning rate` для получения лучших результатов точности
* `tf.keras.callbacks.EarlyStopping`: Останавливает обучение как только точность перестает увеличиваться
* `tf.keras.callbacks.TensorBoard`: Следит за поведением модели при помощи [TensorBoard](./summaries_and_tensorboard.md)
Чтобы использовать `tf.keras.callbacks.Callback` просто передай его в метод `fit` своей модели:
```
callbacks = [
# Прерывает обучение если потери при проверке `val_loss` перестают
# уменьшаться после 2 эпох
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# Записываем логи TensorBoard в папку `./logs`
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(val_data, val_labels))
```
<a id='weights_only'></a>
## Сохранение и загрузка
### Сохраняем веса
Ты можешь сохранять и загружать веса модели при помощи `tf.keras.Model.save_weights`:
```
model = tf.keras.Sequential([
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Сохраняем веса в контрольную точку формата TensorFlow
model.save_weights('./weights/my_model')
# Восстанавливаем состояние модели,
# требуется использование модели с точно такой же архитектурой
model.load_weights('./weights/my_model')
```
По умолчанию, веса модели сохраняются в формате [контрольой точки TensorFlow](./checkpoints.md). Веса также могут быть сохранены в формате Keras HDF5, который является стандартным в бэкенд структуре Keras:
```
# Сохраняем веса в файл HDF5
model.save_weights('my_model.h5', save_format='h5')
# Загружаем текущее состояние модели
model.load_weights('my_model.h5')
```
### Сохраняем конфигурацию
Конфигурация модели также может быть сохранена: такой метод сериализирует архитектуру модели без сохранения весов.
Сохраненная конфигурация можеть быть загружена и инициализирована как оригинальная модель, даже без кода изначальной модели.
Keras поддерживает форматы сериализации данных JSON и YAML:
```
# Сериализация модели в формат JSON
json_string = model.to_json()
json_string
import json
import pprint
pprint.pprint(json.loads(json_string))
```
Давай воссоздадим только что инициализированную модель из JSON:
```
fresh_model = tf.keras.models.model_from_json(json_string)
```
Сериализация модели в формат YAML требуется установки `pyyaml` *до импорта TensorFlow*:
```
yaml_string = model.to_yaml()
print(yaml_string)
```
Восстановим модель из формата YAML:
```
fresh_model = tf.keras.models.model_from_yaml(yaml_string)
```
Важно: модели с подклассами не могут быть сериализированы, потому что их архитектура определяется кодом Python в методе `call`.
### Сохраняем модель полностью
Мы можем сохранить модель целиком в один файл, который будет содержать веса, конфигурацию модели и даже настройки оптимизатора. Это позволяет сохранить модель как контрольную точку и продолжить обучение позже - ровно с того же момента и без доступа к исходному коду.
```
# Создаем простую модель
model = tf.keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=(32,)),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, batch_size=32, epochs=5)
# Сохраняем модель целиком в один файл формата HDF5
model.save('my_model.h5')
# Восстаналиваем ту же самую модель, включая веса и оптимизатор
model = tf.keras.models.load_model('my_model.h5')
```
## Eager execution
[Eager execution](./eager.md) - это окружение императивного программирования, в котором все операции вычисляются мгновенно. В нем не требуется Keras, однако `tf.keras` поддерживается и оказывается весьма полезен для проверки программы и отладки кода.
Все API `tf.keras` для построения моделей совместимы с eager execution. В то время как функциональный API и `Sequential` могут быть использованы и здесь, наибольшее преимущество получат *модели с подклассами* и *модели с собственными слоями* - это именно те API, в которых тебе необходимо указать прямой проход в виде кода (вместо тех API, которые создают модели посредством сборки существующих слоев).
Смотри [Руководство по eager execution](./eager.md#build_a_model) для ознакомления с примерами использования моделей Keras с уникальными циклами обучения `tf.GradientTape`.
## Распределенное обучение
### Алгоритмы оценки Estimators
API [Estimators](./estimators.md) используется для обучения моделей в распределенном окружении. Он необходим для решения задач распределенного обучения на больших датасетах например для экспорта модели в продакшен.
`tf.keras.Model` может обучаться с API `tf.estimator` посредством конвертации модели в объект `tf.estimator.Estimator` при помощи `tf.keras.estimator.model_to_estimator`. Читай больше в статье [Создание Estimators из моделей Keras](./estimators.md#creating_estimators_from_keras_models).
```
model = tf.keras.Sequential([layers.Dense(10,activation='softmax'),
layers.Dense(10,activation='softmax')])
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(model)
```
Совет: используй [eager execution](./eager.md) для отладки
[функций входа Estimator](./premade_estimators.md#create_input_functions) и проверки данных.
### Обучение на нескольких GPU
Модели `tf.keras` могут обучаться на нескольких GPU при помощи
`tf.contrib.distribute.DistributionStrategy`. Этот API обеспечивает распределенное обучение на нескольких GPU почти без изменений основного кода модели.
В настоящее время `tf.contrib.distribute.MirroredStrategy` является единственной поддерживаемой стратегией распределенного обучения. `MirroredStrategy` выполняет внутриграфную репликацию с синхронным обучением, используя функцию all-reduce на одном устройстве. Чтобы использовать `DistributionStrategy` вместе с Keras, конвертируй модель `tf.keras.Model` в `tf.estimator.Estimator` при помощи `tf.keras.estimator.model_to_estimator`, а затем обучи получившийся estimator.
В следующем примере мы посмотрим как распределить `tf.keras.Model` на нескольких GPU на одном устройстве.
Для начала определим простую модель:
```
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = tf.train.GradientDescentOptimizer(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
```
Затем определим функцию *загрузки и обработки данных*. Функция `Input_fn` возвращает объект `tf.data.Dataset`, который используется для распределения данных на нескольких устройствах, где каждый GPU обрабатывает свой входящий батч.
```
def input_fn():
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(10)
dataset = dataset.batch(32)
return dataset
```
Далее, создадим `tf.estimator.RunConfig` и передадим аргумент `train_distribute`
к `tf.contrib.distribute.MirroredStrategy`. При создании
`MirroredStrategy` ты можешь определить список устройств или передать аргумент `num_gpus` с заданным количеством GPU для обучения. По умолчанию используются все доступные GPU как в следующем примере:
```
strategy = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(train_distribute=strategy)
```
Конвертируем модель Keras в `tf.estimator.Estimator`:
```
keras_estimator = tf.keras.estimator.model_to_estimator(
keras_model=model,
config=config,
model_dir='/tmp/model_dir')
```
Наконец, обучаем `Estimator`, передав аргументы `input_fn` и `steps`:
```
keras_estimator.train(input_fn=input_fn, steps=10)
```
| github_jupyter |
# Recommending movies: ranking
**Learning Objectives**
1. Get our data and split it into a training and test set.
2. Implement a ranking model.
3. Fit and evaluate it.
## Introduction
The retrieval stage is responsible for selecting an initial set of hundreds of candidates from all possible candidates. The main objective of this model is to efficiently weed out all candidates that the user is not interested in. Because the retrieval model may be dealing with millions of candidates, it has to be computationally efficient.
The ranking stage takes the outputs of the retrieval model and fine-tunes them to select the best possible handful of recommendations. Its task is to narrow down the set of items the user may be interested in to a shortlist of likely candidates.
Each learning objective will correspond to a _#TODO_ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/recommendation_systems/soulutions/basic_ranking.ipynb)
## Imports
Let's first get our imports out of the way.
```
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
```
**Note: Please ignore the incompatibility errors and re-run the above cell before proceeding for the lab.**
```
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
```
This notebook uses TF2.x.
Please check your tensorflow version using the cell below.
```
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
```
## Lab Task 1: Preparing the dataset
We're going to use the same data as the [retrieval](https://www.tensorflow.org/recommenders/examples/basic_retrieval) tutorial. This time, we're also going to keep the ratings: these are the objectives we are trying to predict.
```
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
```
As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
```
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
# TODO 1a -- your code goes here
```
Let's also figure out unique user ids and movie titles present in the data.
This is important because we need to be able to map the raw values of our categorical features to embedding vectors in our models. To do that, we need a vocabulary that maps a raw feature value to an integer in a contiguous range: this allows us to look up the corresponding embeddings in our embedding tables.
```
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
```
## Lab Task 2: Implementing a model
### Architecture
Ranking models do not face the same efficiency constraints as retrieval models do, and so we have a little bit more freedom in our choice of architectures.
A model composed of multiple stacked dense layers is a relatively common architecture for ranking tasks. We can implement it as follows:
```
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
# TODO 2a -- your code goes here
# Compute embeddings for movies.
# TODO 2b -- your code goes here
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
```
This model takes user ids and movie titles, and outputs a predicted rating:
```
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
```
### Loss and metrics
The next component is the loss used to train our model. TFRS has several loss layers and tasks to make this easy.
In this instance, we'll make use of the `Ranking` task object: a convenience wrapper that bundles together the loss function and metric computation.
We'll use it together with the `MeanSquaredError` Keras loss in order to predict the ratings.
```
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
```
The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop.
### The full model
We can now put it all together into a model. TFRS exposes a base model class (`tfrs.models.Model`) which streamlines bulding models: all we need to do is to set up the components in the `__init__` method, and implement the `compute_loss` method, taking in the raw features and returning a loss value.
The base model will then take care of creating the appropriate training loop to fit our model.
```
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
```
## Lab Task 3: Fitting and evaluating
After defining the model, we can use standard Keras fitting and evaluation routines to fit and evaluate the model.
Let's first instantiate the model.
```
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
```
Then shuffle, batch, and cache the training and evaluation data.
```
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
```
Then train the model:
```
model.fit(cached_train, epochs=3)
```
As the model trains, the loss is falling and the RMSE metric is improving.
Finally, we can evaluate our model on the test set:
```
# TODO 3a -- your code goes here
```
The lower the RMSE metric, the more accurate our model is at predicting ratings.
## Next steps
The model above gives us a decent start towards building a ranking system.
Of course, making a practical ranking system requires much more effort.
In most cases, a ranking model can be substantially improved by using more features rather than just user and candidate identifiers. To see how to do that, have a look at the [side features](https://www.tensorflow.org/recommenders/examples/featurization) tutorial.
A careful understanding of the objectives worth optimizing is also necessary. To get started on building a recommender that optimizes multiple objectives, have a look at our [multitask](https://www.tensorflow.org/recommenders/examples/multitask) tutorial.
| github_jupyter |
[](https://pythonista.io)
# Seguridad y autenticación básicas.
*Django* cuenta desde su instalación con un sistema simple de:
* Gestión de usuarios.
* Gestión de grupos.
* Autenticación básica.
* Gestión de sesiones.
La documentación correspondiente puede ser consultada en la siguiente liga:
https://docs.djangoproject.com/en/3.0/topics/auth/
**Nota:** Los alcances de este curso sólo cubren los temas de gestión básica de usuarios y autenticación simple.
## Aplicaciones y middleware asociados.
Las aplicaciones instaladas para la gestión de usuarios y permisos son:
* ```django.contrib.auth``` para la gestión de usuarios y grupos.
* ```django.contrib.contenttypes``` para asignar permisoso a los modelos que cree el desarrollador.
```
from tutorial.tutorial import settings
settings.INSTALLED_APPS
```
El middleware que utiliza *Django* por efecto es:
* ```django.contrib.sessions.middleware.SessionMiddleware```, el cual permite realizar la autenticación y autorización de acceso.
* ```django.contrib.auth.middleware.AuthenticationMiddleware```, el cual gestiona las sesiones.
```
settings.MIDDLEWARE
```
## El modelo ```django.contrib.auth.models.User```.
Los usuarios y grupos en *Django* se crean mediante modelos predeterminados.
* La clase ```django.contrib.auth.models.User``` es una subclase de ```django.db.models.Model``` y corresponde al modelo básico de un usuario de *Django*. A partir de este momento se hará referencia a esta clase como ```User```.
### El atributo de clase ```User.objects```.
Al ser una subclase de ```django.db.models.Model```, el modelo ```User``` cuenta con el atributo de clase ```objects```, el cual puede realizar operaciones de consulta, altas y bajas de instancias mediante métodos como:
* ```all()```.
* ```get()```.
* ```filter()```.
#### El método ```User.objects.create_user()```.
Este método permite crear la cuenta de un usuario y guardarla en la base de datos.
```
User.objects.create_user('<nombre de usuario>', '<correo electrónico>', '<contraseña>')
```
### Atributos de las instancias de ```User```.
Las instancias del modelo ```User``` cuenta con los siguientes atributos:
* ```id```, el cual corresponde a un identificador numérico que *Django* le otorga al modelo cuando es instanciado.
* ```username```, el cual corresponde al nombre de usuario. Este atributo es obligatorio para crear al modelo.
* ```password```, el cual corresponde a la contraseña del usuario. Este atributo es obligatorio para crear al modelo.
* ```first_name```, el cual corresponde al nombre del usuario.
* ```last_name```, el cual corresponde al apellido del usuario.
* ```email```, el cual corresponde a la dirección de correo electrónico del usuario.
* ```is_superuser``` es un valor de tipo ```bool``` que indica si el usuario es in superusuario.
* ```is_staff``` es un valor de tipo ```bool``` que indica si el usuario es parte del "staff" de la organización.
* ```is_active``` es un valor de tipo ```bool``` que indica si el usuario está activo.
* ```last_login``` es un valor de tipo ```datetime.datetime```con la fecha y hora del último acceso del usuario.
* ```date_joined``` es un valor de tipo ```datetime.datetime```con la fecha y hora en la que el usuario fue creado.
## Ejemplo ilustrativo de creación de un suario desde el shell.
``` ipython
In [1]: from django.contrib.auth.models import User
In [2]: User.objects.all()
Out[2]: <QuerySet []>
In [3]: User.objects.create_user('josech', 'josech@gmail.com', '0p3n5t4ck')
Out[3]: <User: josech>
In [4]: User.objects.all()
Out[4]: <QuerySet [<User: josech>]>
In [5]: usuario = User.objects.all()[0]
In [5]: usuario
Out[5]: <User: josech>
In [6]: usuario.is_superuser
Out[6]: False
In [7]: usuario.is_superuser=True
```
## Creación de un superusuario con el script ```manage.py```.
Para poder crear unsuperuisuario se utiliza el siguiente comando desde una terminal localziada en el directorio del proyecto.
```
python manage.py createsuperuser --email="<correo electronico>" --user="<usuario>"
```
**Ejemplo:**
```
python manage.py createsuperuser --email="falso@pythonista.io" --user="admin"
```
## Tablas asociadas a usuarios y grupos en la base de datos.
La gestión de usuarios está ligada a varias tablas de la base de datos asociada a *Django*, la cuales fueron creadas al ejecutar el comando ```manage.py migrate```.
Estas tablas tienen el prefijo ```auth_```:
* ```auth_user```.
* ```auth_group```.
* ```auth_permission```.
* ```auth_group_permissions```.
* ```auth_user_groups```.
* ```auth_user_user_permissions```.
## El módulo ```django.contrib.auth.urls```.
Este módulo define los siguientes patrones de *URL* predefinidos para gestionar los accesos de un usuario:
* ```'login/'``` ingreso a la sesión de un usuario.
* ```'logout/'``` salida de la sesión de un usuario.
* ```'password_change/'```, la cual permite cambiar la contraseña de un usuario.
* ```'password_reset/'```, la cual permite recuperar una contraseña.
* ```'reset/<uid64>/<token>'```, el cual permite reiniciar a un usuario.
* ```'password_change/done/'```.
* ```'password_reset/done/'```.
* ```'reset/done/'```.
Para que estos patrones puedan ser accedidos es necesario incluir un patrón en el objeto ```urlpatterns``` del script ```urls.py``` del proyecto de la siguiente manera:
```
path('<ruta base>', include('django.contrib.auth.urls'))
```
* Donde ```<ruta base>``` corresponde a la ruta desde la cual se accederá a cada *URL* definida por ```django.contrib.auth.urls```.
Con excepción de ```'login/'``` cada función de vista relacionada con cada patrón cuenta con una plantilla predefinida. Sin embargo, es posible crear plantillas a la medida.
Pro defecto las *URLs* de este módulo buscarán las plantillas correspondientes en el directorio```registration/``` el cual se encuentra a su vez en el directorio definido para las plantillas del proyecto.
### Configuración de la función de la *URL* ```login/```
En el caso de la *URL* ligada a la regla ```'login/'```, es necesario crear una plantilla en la ruta ```registration/login.html```, la cual permita enviar el usuario y la contraseña.
Un ejemplo de esta plantilla puede ser consultado en https://docs.djangoproject.com/en/2.2/topics/auth/default/#all-authentication-views.
### Redireccionamiento en caso de un ingreso correcto.
Cuando las credenciales ingresadas y enviadas son correctas, *Django* redirigirá al navegador a la ruta ```'/accounts/profile/'```.
Para indicar un redireccionamiento a una *URL* distinta es necesario definirla con el nombre ```LOGIN_REDIRECT_URL``` en el script ```settings.py``` del proyecto.
```
LOGIN_REDIRECT_URL = '<ruta>'
```
## Ejemplo ilustrativo.
### Definición del patrón de *URLs*.
El archivo ```src/21/urls.py``` contiene la siguiente definición de ```urlpatterns``` ligando al módulo ``` django.contrib.auth.urls``` con la ruta ```usuarios/```.
```python
urlpatterns = [path('admin/', admin.site.urls),
path('main/', include('main.urls')),
path('api/', include('api.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
```
* Se sustituirá el script ```tutorial/tutorial/urls.py``` con ```src/22/urls.py```.
* Para las platafomas Linux y MacOS X.
```
!cp src/21/urls.py tutorial/tutorial/urls.py
```
* Para la plataforma Windows.
```
!copy src\21\urls.py tutorial\tutorial\urls.py
```
* La siguiente celda desplegará el archivo sustituido.
```
%pycat tutorial/tutorial/urls.py
```
### Creación del subdirectorio ```registration/```.
* La siguiente celda creará el directorio ```registration/``` dentro del directorio ```tutorial/templates```.
```
%mkdir tutorial/templates/registration
%mkdir tutorial\templates\registration
```
### La plantilla ```registration/login.html```.
La plantilla ```src/22/login.html``` contiene el siguiente código:
``` html
{% extends "base.html" %}
{% block cuerpo %}
{% if form.errors %}
<p>Ingresó una contraseña incorrecta. Vuelva a intentar.</p>
{% endif %}
{% if next %}
{% if user.is_authenticated %}
<p>Su cuenta no tiene acceso a esta página. Vuelva a intentar con un usuario autorizado.</p>
{% else %}
<p>Por favor ingrese con usuario con los permisos adecuados.</p>
{% endif %}
{% endif %}
<form method="post" action="{% url 'login' %}">
{% csrf_token %}
<table>
<tr>
<td>{{ form.username.label_tag }}</td>
<td>{{ form.username }}</td>
</tr>
<tr>
<td>{{ form.password.label_tag }}</td>
<td>{{ form.password }}</td>
</tr>
</table>
<input type="submit" value="login" />
<input type="hidden" name="next" value="{{ next }}" />
</form>
{# Assumes you setup the password_reset view in your URLconf #}
<p><a href="{% url 'password_reset' %}">¿Perdió su contraseña?</a></p>
{% endblock %}
```
* A continuacion se copiará al script ```src/21/urls.py``` en el directorio ```tutorial/templates/registration/```.
* Para las platafomas Linux y MacOS X.
```
%cp src/21/login.html tutorial/templates/registration/
```
* Para la platafoma Windows.
```
%copy src\21\login.html tutorial\templates\registration\
```
* La siguiente celda despelgará el contenido del script ```tutorial/templates/registration/login.html```
```
%pycat tutorial/templates/registration/login.html
```
### Configuración del redireccionamiento de ingreso exitoso.
El script ```src/21/settings.py``` define a ```/api/``` como la ruta a la que el navegador será redirigido en caso de que el acceso sea exitoso.
``` python
LOGIN_REDIRECT_URL = '/api/'
```
* A continuación se sustituirá al script ```tutorial/tutorial/settings.py``` con ```src/21/settings.py```.
* Para las platafomas Linux y MacOS X.
```
%cp src/21/settings.py tutorial/tutorial/settings.py
```
* Para la platafoma Linux.
```
%copy src\21\settings.py tutorial\tutorial\settings.py
```
La siguiente celda mostrará el resultado de la sustitución en ```tutorial/tutorial/settings.py```.
```
%pycat tutorial/tutorial/settings.py
```
## Arranque desde una terminal.
* Desde una terminal diríjase al directorio ```tutorial```, en el cual se encuentra el script ```manage.py```.
* Ejecute el siguiente comando:
```
python manage.py runserver 0.0.0.0:8000
```
**Nota:**
Es necesario que el firewall de su equipo esté configurado para transmitir desde el puerto ```8000```.
## Resultados.
http://localhost:8000/accounts/login/
<p style="text-align: center"><a rel="license"
href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2020.</p>
| github_jupyter |
```
!pip install d2l==0.17.2
```
# Concise Implementation of RNNs
Now we will see how to implement the same language model more efficiently
using functions provided by high-level PyTorch APIs.
We begin as before by reading the time machine dataset.
```
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
```
## [**Defining the Model**]
PyTorch APIs provide implementations of recurrent neural networks.
We construct the recurrent neural network layer `rnn_layer` with a single hidden layer and 256 hidden units.
```
num_hiddens = 256
rnn_layer = nn.RNN(len(vocab), num_hiddens)
```
We (**use a tensor to initialize the hidden state**),
whose shape is
(number of hidden layers, batch size, number of hidden units).
```
state = torch.zeros((1, batch_size, num_hiddens))
state.shape
```
[**With a hidden state and an input,
we can compute the output with
the updated hidden state.**]
Here, it should be emphasized that
the "output" (`Y`) of `rnn_layer`
does *not* involve computation of output layers:
it refers to
the hidden state at *each* time step,
and they can be used as the input
to the subsequent output layer.
```
X = torch.rand(size=(num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
Y.shape, state_new.shape
```
Similarly,
[**we define an `RNNModel` class
for a complete RNN model.**]
Note that `rnn_layer` only contains the hidden recurrent layers, we need to create a separate output layer.
```
class RNNModel(nn.Module):
"""The RNN model."""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
# If the RNN is bidirectional (to be introduced later),
# `num_directions` should be 2, else it should be 1.
if not self.rnn.bidirectional:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T.long(), self.vocab_size)
X = X.to(torch.float32)
Y, state = self.rnn(X, state)
# The fully connected layer will first change the shape of `Y` to
# (`num_steps` * `batch_size`, `num_hiddens`). Its output shape is
# (`num_steps` * `batch_size`, `vocab_size`).
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, device, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# `nn.GRU` takes a tensor as hidden state
return torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device)
else:
# `nn.LSTM` takes a tuple of hidden states
return (torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device),
torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device))
```
## Training and Predicting
Before training the model, let us [**make a prediction with the a model that has random weights.**]
```
device = d2l.try_gpu()
net = RNNModel(rnn_layer, vocab_size=len(vocab))
net = net.to(device)
d2l.predict_ch8('time traveller', 10, net, vocab, device)
```
As is quite obvious, this model does not work at all. Next, we call `train_ch8` with the same hyperparameters defined before and [**train our model with PyTorch APIs**].
```
num_epochs, lr = 500, 1
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
```
Compared the scratch implemention, this model achieves comparable perplexity,
albeit within a shorter period of time, due to the code being more optimized by
high-level PyTorch APIs.
| github_jupyter |
# Soccerstats Predictions v0.5
The changelog from v0.4:
* Try to implement the *MulticlassClassificationEvaluator* evaluator for the random-forest model.
* Use the *accuracy* metric to evaluate the random-forest model.
## A. Data Cleaning & Preparation
### 1. Read csv file
```
# load and cache data
stat_df = sqlContext.read\
.format("com.databricks.spark.csv")\
.options(header = True)\
.load("data/teamFixtures.csv")\
.cache()
# count hyphen nulls ("-") per column
def count_hyphen_null(df, col):
return df.where(df[col] == "-").count()
# count cols with "-" ie. null
total_rows = stat_df.count()
hyphen_rows = count_hyphen_null(stat_df, "gameFtScore")
to_remove = total_rows - hyphen_rows
print("Total rows: {}".format(total_rows))
print("Hyphen nulls: {}".format(hyphen_rows))
print("Rows to remove: {}".format(to_remove))
```
### 2. Filter-out "gameFtScore" column values
```
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
# replace non-"-" values with null
nullify_ft_column = udf(
lambda row_value: None if row_value != "-" else row_value,
StringType()
)
stat_df = (stat_df.withColumn("gameFtScore", nullify_ft_column(stat_df.gameFtScore)))
# drop Null values
stat_df = stat_df.dropna()
stat_df.select("gameFtScore").show(5)
print("Total rows: {}".format(stat_df.count()))
```
### 3. Write-out new dataframe to Json
```
# optional: save to file
# stat_df.coalesce(1).write.format('json').save('sstats_fixtures.json')
```
### 4. Read fixtures Json to dataframe
```
fx_df = spark.read.json('data/fixtures1.json')
fx_df.printSchema()
```
### 5. Encode "fixture_id" on stat_df dataframe
```
import hashlib
from pyspark.sql.functions import array
def encode_string(value):
return hashlib.sha1(
value.encode("utf-8")
).hexdigest()
# add an encoded col to "stat_df"; fixture_id
fxcol_df = udf(
lambda row_value: encode_string(u"".join([x for x in row_value])),
StringType()
)
stat_df = (stat_df.withColumn("fixture_id", fxcol_df(array(
"leagueName",
"leagueDivisionName",
"gamePlayDate",
"gameHomeTeamName",
"gameAwayTeamName"
))))
# display some encoded fixtures
stat_df.select("fixture_id").show(5, False)
```
### 6. Concat the two dataframes: "stat_df" and "fx_df"
```
from pyspark.sql.functions import col
# use "left-outer-join" to concat
full_df = stat_df.alias("a")\
.join(fx_df, stat_df.fixture_id == fx_df.fixture_id, "left_outer")\
.select(*[col("a."+c) for c in stat_df.columns] + [fx_df.ft_score])
full_df.select("leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName", "ft_score").show(5, False)
```
### 7. Assess damage on "ft_score " nulls
```
# count nulls per column
def count_null(df, col):
return df.where(df[col].isNull()).count()
print("Total rows: {}".format(full_df.count()))
print("Ft_score nulls: {}".format(count_null(full_df, "ft_score")))
# drop null values in ft_Score
full_df = full_df.dropna()
print("Total rows: {}".format(full_df.count()))
print("Ft_score nulls: {}".format(count_null(full_df, "ft_score")))
```
## B. Machine Learning
```
# print dataframe schema
# full_df.printSchema()
```
### 1. Clean data
```
# drop unnecessary columns
ml_df = full_df.drop(
"gameID", "gamePlayDate", "gamePlayTime", "gameHomeTeamName",
"gameAwayTeamName", "gameHomeTeamID","gameAwayTeamID", "leagueName",
"leagueDivisionName", "gameFtScore", "fixture_id"
)
# separate col types
all_cols = ml_df.columns
all_cols.remove(all_cols[-1])
# cast types to columns: string to double
ml_df = ml_df.select(*[col(c).cast("double").alias(c) for c in all_cols] + [ml_df.ft_score])
ml_df.printSchema()
# add extra column; over/under
over_under_udf = udf(
lambda r: "over" if (int(r.split("-")[0]) + int(r.split("-")[1])) > 2 else "under",
StringType()
)
ml_df = (ml_df.withColumn("over_under", over_under_udf(ml_df.ft_score)))
ml_df.select("ft_score", "over_under").show(5)
# drop "ft_score"
ml_df = ml_df.drop("ft_score")
```
### 2. Some featurization
```
from pyspark.ml.feature import StringIndexer
from pyspark.sql import Row
# index the label; "over_under"
si = StringIndexer(inputCol = "over_under", outputCol = "over_under_indx")
df_indexed = si\
.fit(ml_df)\
.transform(ml_df)\
.drop("over_under")\
.withColumnRenamed("over_under_indx", "over_under")
from pyspark.ml.feature import Normalizer
from pyspark.sql.functions import mean, stddev
# normalize feature columns; [(x - mean)/std_dev]
def normalize_col(df, cols):
# find mean & std for each column
aggExpr = []
aggStd = []
for col in cols:
aggExpr.append(mean(df[col]).alias(col))
aggStd.append(stddev(df[col]).alias(col + "_stddev"))
averages = df.agg(*aggExpr).collect()[0]
std_devs = df.agg(*aggStd).collect()[0]
# standardize dataframe
for col in cols:
df = df.withColumn(col + "_norm", ((df[col] - averages[col]) / std_devs[col + "_stddev"]))
return df, averages, std_devs
# normalize dataframe
df_indexed, averages, std_devs = normalize_col(df_indexed, all_cols)
# display some normalized column
df_indexed.select("HTS_teamPosition", "HTS_teamPosition_norm").show()
from pyspark.ml.linalg import Vectors
feature_cols = [col+"_norm" for col in all_cols]
df_indexed = df_indexed[feature_cols + ["over_under"]]
# vectorize labels and features
row = Row("label", "features")
label_fts = df_indexed.rdd.map(
lambda r: (row(r[-1], Vectors.dense(r[:-1])))
).toDF()
label_fts.show(5)
# split train/test values
train, test = label_fts.randomSplit([0.8, 0.2])
# split train/validate values
train, validate = train.randomSplit([0.9, 0.1])
print("Train values: '{}'".format(train.count()))
print("Test values: '{}'".format(test.count()))
print("Validate values: '{}'".format(validate.count()))
```
### 3. Apply some ML models
```
from pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier, RandomForestClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
# 1. Logistic regression model
logr = LogisticRegression(
maxIter = 100,
regParam = 0.05,
labelCol="label"
)
# 2. decision tree model
d_tree = DecisionTreeClassifier(
maxDepth = 10,
labelCol = "label"
)
# 3. random forest model
r_forest = RandomForestClassifier(
numTrees = 100,
labelCol = "label"
)
from time import time
# start timer
start_time = time()
# fit models
lr_model = logr.fit(train)
dt_model = d_tree.fit(train)
rf_model = r_forest.fit(train)
print("Training time taken (min): {}".format((time() - start_time)/60))
# model evaluator
def testModel(model, df):
pred = model.transform(df)
evaluator = BinaryClassificationEvaluator(labelCol="label")
return evaluator.evaluate(pred)
# accuracy output
models = {
"Logistic regression": lr_model,
"Decision tree": dt_model,
"Random forest": rf_model
}
modelPerf = {k:testModel(v, test) for k,v in models.iteritems()}
print "Accuracy:", modelPerf
```
### 4. Try some hyper-parameter tuning
```
# from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
# from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# # tune best performing model: random forest
# paramGrid = ParamGridBuilder()\
# .addGrid(r_forest.maxDepth, [5, 10, 15, 20])\
# .addGrid(r_forest.numTrees, [30, 60, 90, 120])\
# .build()
# # define evaluation metric
# evaluator = MulticlassClassificationEvaluator(
# labelCol="label",
# predictionCol = "prediction",
# metricName="accuracy"
# )
# # start tuning
# cv = CrossValidator(
# estimator=r_forest,
# estimatorParamMaps=paramGrid,
# evaluator=evaluator,
# numFolds=5
# )
# # start timer
# cv_start_time = time()
# # fit tuned model
# cvModel = cv.fit(train)
# # calculate time taken to tune prameters
# print "Hyper-param tuning time taken (min): ", (time() - cv_start_time)/60
# # accuracy after tuning
# train_pred = cvModel.transform(train)
# test_pred = cvModel.transform(test)
# print("Random forest accuracy (train): {}".format(evaluator.evaluate(train_pred)))
# print("Random forest accuracy (test): {}".format(evaluator.evaluate(test_pred)))
```
| github_jupyter |
```
import os
import glob
import sys
import requests
import copy
import json
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
sys.path.append(os.path.join(os.pardir,"src"))
import build_dataset as ds
import cg_optimisers as cg_opt
import plotter
```
# RSMI optimisation for the dimer model
## Download sample dataset
```
url = 'https://polybox.ethz.ch/index.php/s/bUp9a5qZWuLGXMb/download'
filename = 'configs_intdimer2d_square_L64_T15.000.npy'
data_dir = os.path.join(os.pardir, 'data')
if os.path.isfile(os.path.join(data_dir, filename)):
print('Existing dataset found.')
else:
print('No existing dataset found.')
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
print('Created the data directory.')
print('Downloading data...')
r = requests.get(url, allow_redirects=True)
open(os.path.join(data_dir, filename), 'wb').write(r.content)
print('Data downloaded into /coarsegrainer/data/.')
```
## Enter system parameters
```
data_params = {
'model': 'intdimer2d',
'lattice_type': 'square',
'L': 64,
'T': 15.000,
'N_samples': 28800,
'dimension': 2,
}
generator=ds.dataset(**data_params)
print(data_params)
```
## Enter optimisation parameters
```
CG_params = {'init_temperature': 0.75,
'min_temperature': 0.1,
'relaxation_rate': 0.0001, # was 0.0005 for above BKT 0.002 for below BKT
'Nq': None,
'conv_activation': None,
'num_hiddens': 2,
'h_embed': True,
'use_probs': False,
'use_logits': True
}
ll = CG_params['ll'] = (8,8)
critic_params = {
'layers': 2,
'embed_dim': 8,
'hidden_dim': 32,
'activation': 'relu',
}
opt_params = {
"batch_size": 500, # was 800 for larger buffers
"iterations": 250, # was 400 for above BKT, 25 for below BKT
"shuffle": 100000,
"learning_rate": 9e-3 # was 4e-3
}
index = (10, 10) # index of the visible patch to be coarse-grained
buffer_size = 4
env_size = 4
V, E = generator.rsmi_data(index, ll, buffer_size=buffer_size, cap=ll[0]+2*(buffer_size+env_size))
```
## Perform the optimisation and plot results
```
estimates, _, filters, _ = cg_opt.train_RSMI_optimiser(CG_params, critic_params, opt_params, data_params,
E=E, V=V)
print('RSMI estimate is ', cg_opt.RSMI_estimate(estimates, ema_span=100))
plotter.plot_fancy_rsmimax(estimates, filters, opt_params, CG_params,
generator, N_samples=data_params['N_samples'],
mi_bound='InfoNCE', filter_lim=1.3, EMA_span=80, save=False,
series_skip=data_params['N_samples']//(opt_params['batch_size']*4)*opt_params['iterations'],
interpolation='hanning', cmap='RdBu')
print(CG_params)
print(critic_params)
print(opt_params)
```
| github_jupyter |
# Deep Q-learning
In this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use Q-learning to train an agent to play a game called [Cart-Pole](https://gym.openai.com/envs/CartPole-v0). In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible.

We can simulate this game using [OpenAI Gym](https://gym.openai.com/). First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game.
```
import gym
import tensorflow as tf
import numpy as np
```
>**Note:** Make sure you have OpenAI Gym cloned into the same directory with this notebook. I've included `gym` as a submodule, so you can run `git submodule --init --recursive` to pull the contents into the `gym` repo.
```
# Create the Cart-Pole game environment
env = gym.make('CartPole-v0')
```
We interact with the simulation through `env`. To show the simulation running, you can use `env.render()` to render one frame. Passing in an action as an integer to `env.step` will generate the next step in the simulation. You can see how many actions are possible from `env.action_space` and to get a random action you can use `env.action_space.sample()`. This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.
Run the code below to watch the simulation run.
```
env.reset()
rewards = []
for _ in range(100):
env.render()
state, reward, done, info = env.step(env.action_space.sample()) # take a random action
rewards.append(reward)
if done:
rewards = []
env.reset()
#Test
env.close()
```
To shut the window showing the simulation, use `env.close()`.
If you ran the simulation above, we can look at the rewards:
```
print(rewards[-20:])
```
The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.
## Q-Network
We train our Q-learning agent using the Bellman Equation:
$$
Q(s, a) = r + \gamma \max{Q(s', a')}
$$
where $s$ is a state, $a$ is an action, and $s'$ is the next state from state $s$ and action $a$.
Before we used this equation to learn values for a Q-_table_. However, for this game there are a huge number of states available. The state has four values: the position and velocity of the cart, and the position and velocity of the pole. These are all real-valued numbers, so ignoring floating point precisions, you practically have infinite states. Instead of using a table then, we'll replace it with a neural network that will approximate the Q-table lookup function.
<img src="assets/deep-q-learning.png" width=450px>
Now, our Q value, $Q(s, a)$ is calculated by passing in a state to the network. The output will be Q-values for each available action, with fully connected hidden layers.
<img src="assets/q-network.png" width=550px>
As I showed before, we can define our targets for training as $\hat{Q}(s,a) = r + \gamma \max{Q(s', a')}$. Then we update the weights by minimizing $(\hat{Q}(s,a) - Q(s,a))^2$.
For this Cart-Pole game, we have four inputs, one for each value in the state, and two outputs, one for each action. To get $\hat{Q}$, we'll first choose an action, then simulate the game using that action. This will get us the next state, $s'$, and the reward. With that, we can calculate $\hat{Q}$ then pass it back into the $Q$ network to run the optimizer and update the weights.
Below is my implementation of the Q-network. I used two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out.
```
class QNetwork:
def __init__(self, learning_rate=0.01, state_size=4,
action_size=2, hidden_size=10,
name='QNetwork'):
# state inputs to the Q-network
with tf.variable_scope(name):
self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')
# One hot encode the actions to later choose the Q-value for the action
self.actions_ = tf.placeholder(tf.int32, [None], name='actions')
one_hot_actions = tf.one_hot(self.actions_, action_size)
# Target Q values for training
self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')
# ReLU hidden layers
self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)
# Linear output layer
self.output = tf.contrib.layers.fully_connected(self.fc2, action_size,
activation_fn=None)
### Train with loss (targetQ - Q)^2
# output has length 2, for two actions. This next line chooses
# one value from output (per row) according to the one-hot encoded actions.
self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)
self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))
self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
```
## Experience replay
Reinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on.
Here, we'll create a `Memory` object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maxmium capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those.
Below, I've implemented a `Memory` object. If you're unfamiliar with `deque`, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer.
```
from collections import deque
class Memory():
def __init__(self, max_size = 1000):
self.buffer = deque(maxlen=max_size)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
idx = np.random.choice(np.arange(len(self.buffer)),
size=batch_size,
replace=False)
return [self.buffer[ii] for ii in idx]
```
## Exploration - Exploitation
To learn about the environment and rules of the game, the agent needs to explore by taking random actions. We'll do this by choosing a random action with some probability $\epsilon$ (epsilon). That is, with some probability $\epsilon$ the agent will make a random action and with probability $1 - \epsilon$, the agent will choose an action from $Q(s,a)$. This is called an **$\epsilon$-greedy policy**.
At first, the agent needs to do a lot of exploring. Later when it has learned more, the agent can favor choosing actions based on what it has learned. This is called _exploitation_. We'll set it up so the agent is more likely to explore early in training, then more likely to exploit later in training.
## Q-Learning training algorithm
Putting all this together, we can list out the algorithm we'll use to train the network. We'll train the network in _episodes_. One *episode* is one simulation of the game. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent:
* Initialize the memory $D$
* Initialize the action-value network $Q$ with random weights
* **For** episode = 1, $M$ **do**
* **For** $t$, $T$ **do**
* With probability $\epsilon$ select a random action $a_t$, otherwise select $a_t = \mathrm{argmax}_a Q(s,a)$
* Execute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$
* Store transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$
* Sample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$
* Set $\hat{Q}_j = r_j$ if the episode ends at $j+1$, otherwise set $\hat{Q}_j = r_j + \gamma \max_{a'}{Q(s'_j, a')}$
* Make a gradient descent step with loss $(\hat{Q}_j - Q(s_j, a_j))^2$
* **endfor**
* **endfor**
## Hyperparameters
One of the more difficult aspects of reinforcememt learning are the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation.
```
train_episodes = 1000 # max number of episodes to learn from
max_steps = 200 # max steps in an episode
gamma = 0.99 # future reward discount
# Exploration parameters
explore_start = 1.0 # exploration probability at start
explore_stop = 0.01 # minimum exploration probability
decay_rate = 0.0001 # exponential decay rate for exploration prob
# Network parameters
hidden_size = 64 # number of units in each Q-network hidden layer
learning_rate = 0.0001 # Q-network learning rate
# Memory parameters
memory_size = 10000 # memory capacity
batch_size = 20 # experience mini-batch size
pretrain_length = batch_size # number experiences to pretrain the memory
tf.reset_default_graph()
mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)
```
## Populate the experience memory
Here I'm re-initializing the simulation and pre-populating the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game.
```
# Initialize the simulation
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
memory = Memory(max_size=memory_size)
# Make a bunch of random actions and store the experiences
for ii in range(pretrain_length):
# Uncomment the line below to watch the simulation
# env.render()
# Make a random action
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
if done:
# The simulation fails so no next state
next_state = np.zeros(state.shape)
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
```
## Training
Below we'll train our agent. If you want to watch it train, uncomment the `env.render()` line. This is slow because it's rendering the frames slower than the network can train. But, it's cool to watch the agent get better at the game.
```
# Now train with experiences
saver = tf.train.Saver()
rewards_list = []
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
step = 0
for ep in range(1, train_episodes):
total_reward = 0
t = 0
while t < max_steps:
step += 1
# Uncomment this next line to watch the training
# env.render()
# Explore or Exploit
explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step)
if explore_p > np.random.rand():
# Make a random action
action = env.action_space.sample()
else:
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
total_reward += reward
if done:
# the episode ends so no next state
next_state = np.zeros(state.shape)
t = max_steps
print('Episode: {}'.format(ep),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_p))
rewards_list.append((ep, total_reward))
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
t += 1
# Sample mini-batch from memory
batch = memory.sample(batch_size)
states = np.array([each[0] for each in batch])
actions = np.array([each[1] for each in batch])
rewards = np.array([each[2] for each in batch])
next_states = np.array([each[3] for each in batch])
# Train network
target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})
# Set target_Qs to 0 for states where episode ends
episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)
target_Qs[episode_ends] = (0, 0)
targets = rewards + gamma * np.max(target_Qs, axis=1)
loss, _ = sess.run([mainQN.loss, mainQN.opt],
feed_dict={mainQN.inputs_: states,
mainQN.targetQs_: targets,
mainQN.actions_: actions})
saver.save(sess, "checkpoints/cartpole.ckpt")
```
## Visualizing training
Below I'll plot the total rewards for each episode. I'm plotting the rolling average too, in blue.
```
%matplotlib inline
import matplotlib.pyplot as plt
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews = np.array(rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
```
## Testing
Let's checkout how our trained agent plays the game.
```
test_episodes = 10
test_max_steps = 400
env.reset()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
for ep in range(1, test_episodes):
t = 0
while t < test_max_steps:
env.render()
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
if done:
t = test_max_steps
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
state = next_state
t += 1
env.close()
```
## Extending this
So, Cart-Pole is a pretty simple game. However, the same model can be used to train an agent to play something much more complicated like Pong or Space Invaders. Instead of a state like we're using here though, you'd want to use convolutional layers to get the state from the screen images.

I'll leave it as a challenge for you to use deep Q-learning to train an agent to play Atari games. Here's the original paper which will get you started: http://www.davidqiu.com:8888/research/nature14236.pdf.
| github_jupyter |
### Eco 100 diagrams
## Linear Demand and Supply Diagram
This is a jupyter notebook to generate a simple interactive supply and demand diagram using python and `ipywidgets` (interactive HTML widgets) to provide sliders and animations.
To run this notebook first run the code cells in the [code Section](#Code-Section) below and then return to run the cells below. If you are running this on Microsoft Azure notebook cloud service make sure you choose the kernel to be python 3.5 or above.
---
```
mkt(A=400, b=1, F=0, c=1)
```
### interactive plot
On the next cell move the sliders in the next cell to shift Supply or Demand in or out
Note: this will not display unless you are running this on a jupyter server
```
interact(mkt, A=(200,500,10),b=fixed(1),F=(0,300,10),c=fixed(1));
```
### Quantitative Restrictions
```
qrplot(150);
interact(qrplot, qr =(0,250,10));
```
End
## Code Section
We've put the code down here to keep the presentation uncluttered. Run the cells below first and then return to cells above where these functions are called.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from ipywidgets import interact, fixed
```
The following is just styling for the graph
```
plt.style.use('bmh')
plt.rcParams["figure.figsize"] = [7,7]
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
plt.rcParams["font.size"] = 18
```
Now let's define simple linear (inverse) demand and supply functions:
```
def PD(Q, A, b):
return np.array(A - b * Q)
def PS(Q, F, c):
return np.array(F + c * Q)
def mkt(A=200, b=1, F=0, c=1):
'''Draw supply and demand diagram and calculate market equilibrium price and quantity'''
xmax = ymax = 500
xclip = 400
Q = np.arange(xmax)
# for aesthetic reasons we clip demand curve line to end above the x-axis by plotting over shorter Q_
d_end = np.round((A-50)/b)
Q_ = np.arange(0, d_end)
s_end = (xmax - F)/c # to keep S label inside box
plt.figure(figsize=(7.7,7.5))
plt.xlim(0,xmax)
plt.ylim(0, ymax)
plt.xlabel('Q - Quantity')
plt.ylabel('P - Price')
plt.plot(Q_,PD(Q_,A,b))
plt.text(d_end, PD(d_end, A,b)-4, 'D', fontsize = 18)
plt.plot(Q,PS(Q,F,c))
plt.text(s_end, PS(s_end, F,c)-20, 'S', fontsize = 18)
# market equilibrium
Qe = (A-F)/(c+b)
Pe = PD(Qe, A, b)
CS = (1/2)*(A-Pe)*Qe
plt.scatter(Qe, Pe)
plt.plot([0, Qe, Qe],[Pe, Pe, 0], ':')
return Qe, Pe
```
#### Notes: the simple math behind the diagram
A demand curve tells us the quantity $Q$ that will be demanded of a good at any given price. This suggests a relationship of the form $Q(P)$, i.e. $Q$ as a function of $P$.
However, by historical convention economists have almost always drawn demand curves with quantity $Q$ on the horizontal axis and price $P$ on the vertical axis. For some this might suggest we are plotting a function of the form $P(Q)$. This is an 'inverse' demand function (the maximum price at which quantity Q will be demanded).
In the diagram we use a linear (inverse) **demand curve** of the form:
$$P^D(Q) = A + b \cdot Q$$
this of course corresponds to a Demand curve of the form $Q^D(P) = \frac{A}{b} - \frac{1}{b}P$
The (inverse) **supply curve** curve is of the form:
$$P^S(Q) = F + c \cdot Q$$
As will be seen later in the course the market supply curve is a marginal cost curve.
The market equilibrium price $P^e$ can be found where supply meets demand.
$$P^S(Q) = P^e = P^D(Q)$$
With the linear demand and supply system above we can easily solve for the market equilibrium quantity $Q^e$
$$A+ b \cdot Q^e = F + c \cdot Q^e$$
which leads to:
$$Q^e = \frac{A-F}{c+b}$$
And the market equilibrium price $P^e$ is then easuily found from either $P^D(Q^e)$ or $P^S(Q^e)$
## Quantitative Restriction plot
```
def qrplot(qr):
A, b, F, c = 500, 1, 0, 1
qe, pe = mkt(A=500, b=1, F=0, c=1)
pd = PD(qr, A, b)
ps = PS(qr, F, c)
plt.scatter(qr, pd)
plt.scatter(qr, ps)
plt.axvline(qr, linestyle=':')
plt.vlines(qr, ymin= ps, ymax=480, linewidth =3.5)
plt.text(qr+10,480, "S\'")
plt.hlines(pd,xmin=0, xmax=qr)
plt.hlines(ps,xmin=0, xmax=qr, linestyle=':')
csurplus = (1/2) * qr*(A-pd)
psurplus = (1/2) * qr*(ps-F)
rents = qr *(pd-ps)
tsurplus = csurplus + psurplus + rents
dwl = (1/2)*(qe-qr)*(pd-ps)
plt.text(qr/6,pd+(A-pd)/5, 'CS')
plt.text(qr/6,ps-(ps-F)/4, 'PS')
if qr<80:
plt.text(qr/6,(ps+pd)/2, 'Rents', rotation=90)
elif qr == qe:
pass
else:
plt.text(qr/6,(ps+pd)/2, 'LR')
print( f'Pd = {pd:2.0f}, Ps = {ps:2.0f}, license rent = {pd-ps:2.0f}')
print(f'CS = {csurplus:2.0f}, PS = {psurplus:2.0f}, rents = {rents:2.0f}, TS = {tsurplus:2.0f} DWL = {dwl:2.0f}')
Q = np.arange(qr)
Q2 = np.arange(qr, qe)
plt.fill_between(Q, PD(Q,A,b), pd)
plt.fill_between(Q, PS(Q,F,c), ps)
plt.fill_between(Q, pd, ps)
plt.fill_between(Q2, PD(Q2,A,b), PS(Q2,F,c));
```
| github_jupyter |
# Plan:
* Prepare input with extracted UASTs
* Filter data from DB (if needed)
* Prepare pairs (provide specific requirements if needed)
* Statistics & visualization
* Export datasets
```
%matplotlib inline
from collections import defaultdict, deque
from datetime import datetime
from glob import glob
import os
import sys
import bblfsh
from bblfsh import BblfshClient
from bblfsh.sdkversion import VERSION
import Levenshtein
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.pyplot import text as plt_text
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, size, udf
from pyspark.sql.types import BooleanType
import seaborn as sns
from sqlalchemy import Column, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlite3 import connect
from tqdm import tqdm_notebook as tqdm
```
## Initialize pyspark session
```
os.environ["PYSPARK_PYTHON"] = "python3"
os.environ['PYSPARK_SUBMIT_ARGS'] = '--conf "spark.tech.sourced.bblfsh.grpc.host=bblfshd" ' \
'--conf "spark.tech.sourced.engine.cleanup.skip=true" ' \
'--driver-memory 90g ' \
'--conf "spark.local.dir=/tmp/spark" ' \
'--conf "spark.driver.maxResultSize=22048" pyspark-shell '
spark = SparkSession.builder.appName("near_duplicates_dataset").master("local[26]")
spark = spark.getOrCreate()
```
## Prepare input with extracted UASTs
### Expected parquet file with fields: [blob_id, repository_id, content, path, commit_hash, uast]
```
base_path = "/storage/egor/tmp/"
data_loc = base_path + "dataset_with_uasts_full/"
print("data_loc:", data_loc)
## Read dataset
data = spark.read.parquet(data_loc)
print("number of rows:", data.count())
## Deduplicate by blob_id
data = data.drop_duplicates(["blob_id"])
print("number of rows after deduplication of blob_id:", data.count())
```
# Filter repositories and files to avoid duplications with DB
```
FILTER_REPOS = True
if FILTER_REPOS:
existing_db = base_path + "near_dupl_pairs/export_db/" + "2018-03-09T09_17_59Z-export.db"
query = "select repository_id_a, repository_id_b from file_pairs;"
ignore_repos = set()
with connect(existing_db) as conn:
repos_pairs = pd.read_sql_query(query, conn)
ignore_repos.update(repos_pairs["repository_id_a"].tolist())
ignore_repos.update(repos_pairs["repository_id_b"].tolist())
print("number of repos to ignore:", len(ignore_repos))
data = data[data.repository_id.isin(ignore_repos)== False]
print("number of rows after filtering repos:", data.count())
```
## Prepare pairs of similar files
```
def uast2sequence(root):
# hack for broken uast iterator
sequence = []
nodes = defaultdict(deque)
stack = [root]
nodes[id(root)].extend(root.children)
while stack:
if nodes[id(stack[-1])]:
child = nodes[id(stack[-1])].popleft()
nodes[id(child)].extend(child.children)
stack.append(child)
else:
sequence.append(stack.pop())
return sequence
def flatten_uast(uast):
seq = uast2sequence(uast)
res = [item.internal_type for item in seq]
return res
def uast_to_type_seq(uast):
from bblfsh import Node
return flatten_uast(Node.FromString(uast[0]))
def ratio_levenshtein(seq_a, seq_b):
return Levenshtein.ratio(seq_a, seq_b)
def calc_uast_sim(uast_a, uast_b):
type_seq_a = uast_to_type_seq(uast_a)
type_seq_b = uast_to_type_seq(uast_b)
res = ratio_levenshtein("".join(type_seq_a), "".join(type_seq_b))
return res
def extract_pairs(dataframe, filter_res=None):
if filter_res is None:
filter_res = lambda *args, **kwargs: True
elif not isinstance(filter_res, (list, tuple)):
raise ValueError("Expected list or tuple of filtering functions, got %s" % type(filter_res))
groups = dataframe.rdd.groupBy(lambda row: (row.repository_id, row.path))
n_groups = groups.count()
print("Number of groups:", n_groups)
def _extract_pairs(group):
key = group[0] # skip
rows = list(group[1])
if len(rows) < 2:
return
indices = list(range(len(rows)))
np.random.shuffle(indices)
n_pairs = 0
for a, b in zip(indices[:len(indices) // 2], indices[len(indices) // 2:]):
row_a = rows[a].asDict()
row_b = rows[b].asDict()
ratio = ratio_levenshtein(row_a["content"].decode("utf-8", "ignore"),
row_b["content"].decode("utf-8", "ignore"))
uast_ratio = calc_uast_sim(row_a["uast"], row_b["uast"])
if sum([fil(ratio, uast_ratio) for fil in filter_res]):
yield row_a, row_b, ratio, uast_ratio
return groups.flatMap(_extract_pairs)
```
## Sampling requirements
```
ranges = []
similarity_ranges = {"text_lower": 0.55,
"text_upper": 1.0,
"uast_lower": 0.45,
"uast_upper": 0.7}
ranges.append((similarity_ranges, 250))
similarity_ranges = {"text_lower": 0.55,
"text_upper": 0.7,
"uast_lower": 0.7,
"uast_upper": 1.}
ranges.append((similarity_ranges, 150))
similarity_ranges = {"text_lower": 0.3,
"text_upper": 0.55,
"uast_lower": 0.45,
"uast_upper": 1.}
ranges.append((similarity_ranges, 100))
def make_filter(sim_ranges):
def filter_similarity(text_sim, uast_sim):
return ((sim_ranges["text_lower"] <= text_sim <= sim_ranges["text_upper"]) and
(sim_ranges["uast_lower"] <= uast_sim <= sim_ranges["uast_upper"]))
return filter_similarity
```
## Select pairs that satisfy requirements above
```
filters = []
for sim_ranges, n_pairs in ranges:
filters.append(make_filter(sim_ranges))
pairs = extract_pairs(data, filter_res=filters).cache()
print("n_pairs extracted:", pairs.count())
all_pairs = pairs.collect()
xy = np.array([(row[2], row[3]) for row in all_pairs])
```
## Statistics
```
pairs_blobs = set()
for pair in all_pairs:
pairs_blobs.add(tuple(sorted((pair[0]["blob_id"], pair[0]["blob_id"]))))
print("number of unique blob id pairs:", len(pairs_blobs))
blobs = set()
for pair in all_pairs:
blobs.add(pair[0]["blob_id"])
blobs.add(pair[1]["blob_id"])
print("number of unique blob ids:", len(blobs))
```
### Lengths of texts
```
text_lengths = []
for pair in all_pairs:
text_lengths.append(len(pair[0]["content"]))
text_lengths.append(len(pair[1]["content"]))
sns.kdeplot(text_lengths, cut=0)
```
### Log lengths of texts
```
text_lengths = []
for pair in all_pairs:
text_lengths.append(np.log(len(pair[0]["content"])))
text_lengths.append(np.log(len(pair[1]["content"])))
sns.kdeplot(text_lengths, cut=0)
```
### Overall distribution
```
ax = sns.jointplot(x=xy[:, 0], y=xy[:, 1])
```
### Distribution with colorized length of texts
```
text_lengths = []
for pair in all_pairs:
text_lengths.append(np.log(max(len(pair[0]["content"]), len(pair[1]["content"]))))
colors = text_lengths
mymap = plt.get_cmap("Reds")
my_colors = mymap(colors)
plt.scatter(xy[:, 0], xy[:, 1], s=40, c=colors, edgecolors='None', cmap=mymap)
plt.colorbar()
```
## Sampling
### Select samples based on `abs(uast_score - text_score)` - the higher the higher probability to select
### The reason to make random sampling instead of selecting samples with highest diff - it creates unused ares (see below)
```
def dummy_sampler(similarity_ranges, xy):
res = []
for sim_range, n_samples in similarity_ranges:
fil_xy = xy[(sim_range["text_lower"] <= xy[:, 0]) & (xy[:, 0] <= sim_range["text_upper"]) &
(sim_range["uast_lower"] <= xy[:, 1]) & (xy[:, 1] <= sim_range["uast_upper"])]
# calculate pseudo probabilities based on distance
diff = np.abs(fil_xy[:, 0] - fil_xy[:, 1])
# select indicies
ind = np.arange(fil_xy.shape[0])[np.argsort(diff)[-min(n_samples, fil_xy.shape[0]):]]
xy_sample = fil_xy[ind]
res.append(xy_sample)
return np.vstack(res)
xy_final = dummy_sampler(ranges, xy)
ax = sns.jointplot(x=xy_final[:, 0], y=xy_final[:, 1])
```
### Proper random sampling
```
def sampler(similarity_ranges, xy):
res = []
input_ind = []
for sim_range, n_samples in similarity_ranges:
xy_ind = np.arange(xy.shape[0])[(sim_range["text_lower"] <= xy[:, 0]) &
(xy[:, 0] <= sim_range["text_upper"]) &
(sim_range["uast_lower"] <= xy[:, 1]) &
(xy[:, 1] <= sim_range["uast_upper"])]
fil_xy = xy[xy_ind]
# calculate pseudo probabilities based on distance
diff = np.abs(fil_xy[:, 0] - fil_xy[:, 1])
probas = np.log(diff + 1) / np.log(diff + 1).sum()
# select indicies
ind = np.random.choice(np.arange(fil_xy.shape[0]), size=n_samples, p=probas, replace=False)
input_ind.append(xy_ind[ind])
xy_sample = fil_xy[ind]
res.append(xy_sample)
return np.vstack(res), np.hstack(input_ind)
xy_final, list_ind = sampler(ranges, xy)
ax = sns.jointplot(x=xy_final[:, 0], y=xy_final[:, 1])
similar_pairs = [all_pairs[i] for i in list_ind.astype(int).tolist()]
print("total number of pairs to keep:", len(similar_pairs))
```
### Distribution with colorized length of texts
```
text_lengths = []
for pair in similar_pairs:
text_lengths.append(np.log(max(len(pair[0]["content"]), len(pair[1]["content"]))))
colors = text_lengths
mymap = plt.get_cmap("Reds")
my_colors = mymap(colors)
plt.scatter(xy_final[:, 0], xy_final[:, 1], s=40, c=colors, edgecolors='None', cmap=mymap)
plt.colorbar()
```
## Add dummy features to pairs (we don't compute them now)
```
for pair in similar_pairs:
pair[0]["bag"] = "no_features"
pair[1]["bag"] = "no_features"
```
## Export dataset
### Pickle
```
import pickle
save_loc = base_path + "near_dupl_pairs/" + str(datetime.now().date()) + "_500_pairs.pkl"
with open(save_loc, "wb") as f:
print("save similar pairs to", save_loc)
pickle.dump(similar_pairs, f)
```
### sqlite
```
Base = declarative_base()
fields = """blob_id_a TEXT, repository_id_a TEXT, commit_hash_a TEXT, path_a TEXT, content_a TEXT,
blob_id_b TEXT, repository_id_b TEXT, commit_hash_b TEXT, path_b TEXT, content_b TEXT,
score""".split()
fields = [field for field in fields if field != "TEXT,"]
start = "<Files("
end = "='%s')>"
repr_str = start + "='%s', ".join(fields) + end
class Files(Base):
extend_existing=True
__tablename__ = "files"
blob_id_a = Column(String, primary_key=True)
repository_id_a = Column(String)
commit_hash_a = Column(String)
path_a = Column(String)
content_a = Column(String)
blob_id_b = Column(String)
repository_id_b = Column(String)
commit_hash_b = Column(String)
path_b = Column(String)
content_b = Column(String)
score = Column(Float(precision="DOUBLE"))
def __repr__(self):
return repr_str % (self.blob_id_a,
self.repository_id_a,
self.commit_hash_a,
self.path_a,
self.content_a,
self.blob_id_b,
self.repository_id_b,
self.commit_hash_b,
self.path_b,
self.content_b,
self.score)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# from https://www.pythoncentral.io/introductory-tutorial-python-sqlalchemy/
engine = create_engine("sqlite:///" + base_path + "near_dupl_pairs/" +
str(datetime.now().date()) + "_500_pairs.db")
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
for pair in similar_pairs:
pair = Files(blob_id_a=pair[0]["blob_id"],
repository_id_a=pair[0]["repository_id"],
commit_hash_a=pair[0]["commit_hash"],
path_a=pair[0]["path"],
content_a=pair[0]["content"],
blob_id_b=pair[1]["blob_id"],
repository_id_b=pair[1]["repository_id"],
commit_hash_b=pair[1]["commit_hash"],
path_b=pair[1]["path"],
content_b=pair[1]["content"],
score=pair[2])
session.add(pair)
try:
session.commit()
except Exception as e:
import pdb;pdb.set_trace()
pass
```
| github_jupyter |
# ClickHouse. Аналитические функции
```
import pandas as pd
import numpy as np
df = pd.read_csv('/content/drive/MyDrive/dataset/clickhouse_data.csv', sep=';', parse_dates=['dt'])
df.info()
%%capture
!sudo apt-get install apt-transport-https ca-certificates dirmngr
!sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4
!echo "deb https://repo.clickhouse.tech/deb/stable/ main/" | sudo tee \
/etc/apt/sources.list.d/clickhouse.list
!sudo apt-get update
!sudo apt-get install -y clickhouse-server clickhouse-client
!sudo service clickhouse-server start
!clickhouse-client
%%capture
!pip install clickhouse-driver
from clickhouse_driver import Client
client = Client(host='localhost')
client.execute('SHOW DATABASES')
client.execute('DROP DATABASE IF EXISTS db')
client.execute('CREATE DATABASE db')
client.execute('SHOW DATABASES')
client = Client(host='localhost', user='default', port='9000', database='db')
client.execute('DROP TABLE IF EXISTS sales')
client.execute('CREATE TABLE sales (dt Date, \
group String, \
manager String, \
val Int32 \
) ENGINE = Memory')
client.execute('SHOW TABLES FROM db')
client.execute("INSERT INTO sales VALUES", df.to_dict('records'))
def select_clickhouse(sql):
return client.query_dataframe(sql)
sql = '''SELECT * FROM sales'''
select_clickhouse(sql)
```
### Комбинаторы агрегатных функций
If К имени любой агрегатной функции может быть приписан суффикс -If. В этом случае, агрегатная функция принимает ещё один дополнительный аргумент - условие (типа UInt8). Агрегатная функция будет обрабатывать только те строки, для которых условие сработало. Если условие ни разу не сработало - возвращается некоторое значение по умолчанию (обычно - нули, пустые строки).
Примеры: sumIf(column, cond), countIf(cond), avgIf(x, cond), quantilesTimingIf(level1, level2)(x, cond), argMinIf(arg, val, cond) и т. п.
```
sql = '''select s.group,
sumIf(s.val,s.manager='m1') as m1,
sumIf(s.val,s.manager='m2') as m2,
sumIf(s.val,s.manager='m3') as m3,
sumIf(s.val,s.manager='m4') as m4,
sumIf(s.val,s.manager='m5') as m5
from sales as s
group by s.group'''
select_clickhouse(sql)
```
### Параметрические агрегатные функции
sequenceCount(pattern)(time, cond1, cond2, …)
Вычисляет количество цепочек событий, соответствующих шаблону. Функция обнаруживает только непересекающиеся цепочки событий. Она начинает искать следующую цепочку только после того, как полностью совпала текущая цепочка событий.
```
sql = '''SELECT sequenceCount('(?1)(?t==1)(?2)(?t==1)(?3)')(s.dt, s.group = 'A',s.group = 'A',s.group='A') as A_A_A
FROM sales as s'''
select_clickhouse(sql)
```
| github_jupyter |
##### Copyright 2021 The C-HACKMASTERS.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# **C-HACK Tutorial 2: Lists, Dictionaries, Flow Control, Oh My!**
**Instructor**: Dave Beck<br>
**Contact**: dacb@uw.edu
**Acknowlegements**: Dave would like acknowledge the entire C-Hack hackmaster team for review and comments on the tutorial. In particular, this tutorial was reviewed by:
* Koto Durkee
* Anne Farkley
---
In this tutorial, we will review basic variables and introduce lists and dictionaries as new variable types, or data structures. We will close with seeing conditional execution and functions. Hang on for the ride!
Normally, a notebook begins with `import` statements that _import_ packages or libraries that do a lot of heavy lifting for us. We'll get to that later.
---
## 2.1 A quick review from last tutorial
Recall, we discussed **_variables_** and **_comments_**.
### 2.1.1 A quick review of variables
Variables are human names we give data objects in our code. Variables in Python should be named using appropriately descriptions of their purpose. By convention, most variable names are `lower case` and may optionally contain the underscore character ('`_`').
Some names you might want to use are off-limits because they are **_reserved words_**, meaning they are words in Python that have special meaning.
Examples of _reserved words_ that you should not use as variable names are in the table below. The ones in __bold__ are ones we will use in the tutorial today. Remember, these can't be used as variable names!
| | | | | | | |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| __import__ | __True__ | __False__ | __if__ | __else__ | __def__ | __in__ |
| __not__ | __and__ | __or__ | __None__ | from | continue | pass |
| class | await | raise | del | lambda | return | elif |
| with | as | finally | nonlocal | while | assert | except |
| global | yield | break | try | global |
Let's see an example of a decimal point containing number, known to computers as a **_floating point_** number. Let's use $\pi$ as the number. (Pro tip: you can write equations in Markdown. See this [reference](https://medium.com/analytics-vidhya/writing-math-equations-in-jupyter-notebook-a-naive-introduction-a5ce87b9a214).)
```
pickles = 3.14
print(pickles)
```
```
pickles = 3.14
print(pickles)
```
Cool. We defined a variable named pickles containing a poor estimate of $\pi$. Now every place the word `pickles` appears, it will have the value `3.14`, at least until we change it.
Is `pickles` a good variable name for the value $\pi$? If not, what would be a better name? Is it in the **_reserved word_** list above? Use the `code` cell below to create a new variable with your preferred name in it.
Variables in Python have different data types. The simple ones, we've already discussed such as an integer or `int`, a string or `str`, a decimal point containing number called a **_floating point_** number. **_Floating point_** numbers are special and are stored in a computer's memory using [internal representations](http://steve.hollasch.net/cgindex/coding/ieeefloat.html). One important thing to know about **_floating point_** numbers is that to a computer, the statement below may not always be true. For now, just think about **_floating point_** numbers as approximately representing the decimal number you see.
```
10.0 * 0.1 = 1.0
```
Variables can change their value in Python so we can change the value of `pickles` to something else. For example, a definition of what pickles are.
```
pickles = "A cucumber preserved in vinegar or brine."
print(pickles)
```
```
pickles = "A cucumber preserved in vinegar or brine."
print(pickles)
```
### 2.1.2 A quick review of comments
Just like we should use good naming conventions for variables so they make sense, we should have good comments to help readers follow our code. Good comments can turn a speck of coding gold into a valuable nugget of knowledge. Bad or wrong comments are bugs. If you want to learn more about why we call computer coding problems bugs, read about [Grace Hopper](https://en.wikipedia.org/wiki/Grace_Hopper) and see [her photo of a `bug` in her notebook](https://en.wikipedia.org/wiki/Grace_Hopper#/media/File:First_Computer_Bug,_1945.jpg).
To comment out some text, use the `#` or hashtag or sometimes called the pound character. By the way, is the `#` a **_reserved word_**?
```
print(pickles)
```
```
print(pickles)
```
```
# This is an example comment. Notice it is in english and full sentences. That is good style.
# two_pi = 6.28
```
```
# This is an example comment. Notice it is in english and full sentences. That is good style.
# two_pi = 6.28
```
If I tried to execute a cell with the following contents, what would it output?
```
print(two_pi)
```
---
## 2.2 Data Structures: Variables that organize data
Many variables you will encounter are more than the above simple **_data types_** (integers, strings, floating point numbers). In fact, they may contain a few to many pieces of data rolled into one thing or **_data structure_**. Next, we'll discuss two important _data structures_: **lists** and **dictionaries**. There are many advanced **_data structures_** in Python that behave like **lists** and **dictionaries**, in different settings but their concepts are often similar. Thus, understanding them here will help us [grok](https://en.wikipedia.org/wiki/Grok) or understand more advanced concepts. Let's start with the venerable **list**.
---
### 2.2.1 Lists
_For more than just shopping._
A list is an **_ordered_ _collection_** of data. By **_collection_**, we mean that it contains multiple data. By **_ordered_**, we mean that the data are arranged so that they appear first to last like words in a sentence. The order is important for the meaning of the sentence.
Let's begin by creating a list variable named `my_list` that contains three pieces of information.
```
my_list = ['I', 'like', 'pie']
my_list
```
```
my_list = ['I', 'like', 'pie']
my_list
```
Now the value of the variable `my_list` points to a list of three strings. The use of `[`, `]`, and `,` are used to denote the begining, end and separator of the list. Like a sentence, this list of words is ordered. I like cake.
**Notice, we didn't use a `print()` on the variable. The last variable _expression_ in a cell is shown in the notebook by default.**
The elements in a list have indices. That is, to access an element in the list, you can refer to it by its index. Think of a list like a very simple table.
| index | value |
|:---:|:---|
| 0 | `'I'` |
| 1 | `'like'` |
| 2 | `'pie'` |
**People might start counting at `1`, but computers start counting at `0`. The first element in a list has the index `0`, the last element in a list has the index of the length of the list minus 1. For our list which is three elements long, the first index will be 0 and the last index will be `2`.**
**Some programming languages also start counting at 1. These include [Fortran](https://en.wikipedia.org/wiki/Fortran), [Matlab](https://en.wikipedia.org/wiki/MATLAB), and the abominable [R](https://en.wikipedia.org/wiki/R). This is unfortunate. Be extra careful if you try to [port code](https://en.wikipedia.org/wiki/Porting) from those languages to Python.**
If you want to access a list element you can use its index. The index value is designated by appending `[` and `]` to the variable name with the index between. Examples are always easier than words:
```
my_list[0]
```
Given what you know... What will this output?
```
my_list[0]
```
If you try to use an index that is larger than the length of the list minus 1, you will get an error. Try it!
The different colors means you made a boo boo!
```
my_list[3]
```
```
my_list[3]
```
The last line of the error report is the most informative for us now. It should read:
```
IndexError: list index out of range
```
It is trying to tell you that an `IndexError` occured because the the index you tried to access is out of the range of `0` to `2`, inclusive. Translation: stay in your lane, programmer!
In addition to being able to recall or access a value of a list by the element's index in the list, we can change the value of the element. We will also use the `[`, index, `]` notation but like when we set the value of a variable, we will use the `=` character. Let's do it!
```
my_list[2] = 'cake'
my_list
```
```
my_list[2] = 'cake'
my_list
```
Sweet. Now, change the third element to your favorite food item. Is it really better than cake? Remember, don't change the number, just the string. The string is what appears between the `'` characters.
```
my_list[2] = 'toenails'
my_list
```
```
my_list[2] = 'toenails'
my_list
```
Finally, let's talk about empty lists and appending items to a list. An empty list is created by setting a variable to `[]`. This means the variable's **_data type_** is a list, but it contains no elements.
```
a_new_list = []
```
```
a_new_list = []
```
We can append items to a list by using the `.append()` **_function_**. We'll talk more about functions later, but when this **_function_** or **_method_** is used on a variable whose **_data type_** is list, it well append the value in between the `()` to the end of the list.
```
a_new_list.append("1st element")
print(a_new_list)
a_new_list.append("2nd element")
print(a_new_list)
```
```
a_new_list.append("1st element")
print(a_new_list)
a_new_list.append("2nd element")
print(a_new_list)
```
Finally, in addition to the `.append()` **_function_**, there are a lot of **_functions_** (or **_methods_**) available for **_lists_**. See a complete list of them [here](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists).
One more we'll touch on quickly is the `len()` **_function_**. It returns the length of a **_list_**. Here is an example:
```
len(a_new_list)
```
Before you run this cell. What do you think it will output?
```
len(a_new_list)
```
---
#### 2.2.1.1 Slicing
Sometimes you want to make a list from consecutive elements of a list. This is called **_slicing_** where you cut up a list and get just the consecutive values you want. **_Slicing_** is done with the `:` character in the index area between the `[` and the `]`. Here is an example to pull just the last two items out of `my_list`. We use the first index, then a `:`, then the last index plus 1. Like this:
```
my_list[1:3]
```
You might be asking... **WHY +1??** This is because with _slices_ or _ranges_ in Python are known as **[_half-open intervals_](https://en.wikipedia.org/wiki/Interval_(mathematics))** where the lower bound is inclusive and the upper bound is the non-inclusive limit. **TL;DR**: add one the the upper end of a _slice_ or a _range_ in Python.
```
my_list[1:3]
```
```
my_list[1:3]
```
Just for giggles, try it with `my_list[1:2]`. You will see the _range_ of the _slice_ is only `1`. That's because `2 - 1 = 1`.
```
my_list[1:2]
```
```
my_list[1:2]
```
You don't even need to use the upper bound if all you really mean is _the end of the list_. For that, you can leave the index empty. Hot dog! Let's see an example...
```
my_list[1:]
```
```
my_list[1:]
```
**But wait, there's more!** You can set multiple elements of a list **at the same time** by _slicing_. **Dig this!**
```
my_list[1:] = ['love', 'puppies']
my_list
```
```
my_list[1:] = ['love', 'puppies']
my_list
```
And who doesn't love puppies? Recap... Lists are _ordered_ _collections_ of information that you can recognize by their use of `[` and `]`. To access or _address_ elements in the list, you can use _indices_. They start at `0` in Python. The last element of a list has the index of the length of the list minus 1. When _slicing_ a list, use two indices separated by `:`. If you leave one off, it means everying up to or beyond that element.
So, for example, the first two elements of our list could be accessed by?
```
my_list[:2]
```
**Why `2`? Why no number before the `:`?**
---
#### 2.2.1.2 Negative indices?!?!
A brief note for folks who want to learn more. You can use negative index numbers to access from the end of the list towards the front. That is, a negative 1 (`-1`) is the last element in the list. A negative 2 (`-2`) is the second to last. The same rules apply for slicing with the `:` character. For more information on this serious cool thing that you probably won't use soon, read up [here](https://googlethatforyou.com?q=negative%20indexing%20in%20python).
A quick demo example... Let's get last element in the list using negative indexing. That will be `-1`. Here goes...
```
my_list[-1]
```
```
my_list[-1]
```
We can also use _slicing_ with _negative indices_. Remember, that _slicing_ works the same way with _negative indices_, i.e. the the upper bound is non-inclusive. Here is an example using upper and lower bounds. Were you surprised by the results?
```
my_list[-3:-1]
```
```
my_list[-3:-1]
```
---
#### 2.2.1.3 Lists can contain most anything
So far, we've seen a list containing some strings. That made our sentence analogy about the _ordering_ of _objects_ or strings in a list make sense. But lists can contain a mixture of _data types_ and _data structures_. As a quick example, let's make a list that contains a integer, a string and a floating point number. This will be a four element list.
```
zoo = [ 42, 'Elephants', 'ate', 3.14 ]
```
```
zoo = [ 42, 'Elephants', 'ate', 3.14 ]
zoo
```
We can even make a list of lists. **OH. MY. GOSH! SO META!**
```
list_of_lists = [
[ 42, 43, 44, 44 ],
[ 'a', 'b', 'c', 'd' ]
]
list_of_lists[0]
list_of_lists[0][1]
a_list = list_of_lists[0]
a_list[1]
```
```
list_of_lists = [
[ 42, 43, 44, 44 ],
[ 'a', 'b', 'c', 'd' ]
]
```
The important thing here is to have the right number of `[` and `]` to embed a list in a list separated between `,`. Yeah, this **is** super meta.
```
list_of_lists[0]
list_of_lists[0][1]
list_of_lists[1][1]
a_list = list_of_lists[0]
a_list[1]
```
Make sure you understand why the above works. Take a minute and play with the first and last indices.
---
### 2.2.2 Dictionaries
_For more than just reading on a Friday night._
Sometimes we want to _access_ elements in a _data structure_ by something other than an index. Consider a [dictionary on the internet](dictionary.com). You look up the word on a seach engine and go to the web page of the entry for the word. Python has a similar concept where the index for an element in a **_collection_** is not a number, as it is in a list above, but a `key` that, may be a string like `'pickles'`.
In the case of a Python **_dictionary_**, we call the definition a **_value_** and the way we look up the definition is a **_key_**. This results in **_key_** and **_value_** pairs. One **_key_** maps to one **_value_**. In our analogy of a internet dictionary, this is the same as the word to definition pairs.
Let's create a simple dictionary with a definition we have alread seen in the tutorials... Pickles.
```
my_dict = {} # create an empty dictionary
my_dict['pickles'] = "A cucumber preserved in vinegar or brine."
my_dict
```
```
my_dict = {} # create an empty dictionary
my_dict['pickles'] = "A cucumber preserved in vinegar or brine."
my_dict
```
Notice the use of the `#` comment. Nice. Unlike the list, for dictionaries, some of their operations use the `{` and `}` brackets. Using a key to access or retrieve a value from the dictionary still uses the `[` and `]` brackets. Stick with it, for realz.
Case matters in a dictionary because the `key` is a _data type_ itself.
```
my_dict['Pickles']
```
returns an error (`KeyError: 'Pickles'`) and the following does not
```
my_dict['pickles']
```
```
my_dict['Pickles']
```
This is a lot like the `IndexError` from the list case.
Moving on... You can change the value of a dictionary **_key_** by **_reassigning_** it. For example below, we use the same key `'pickles'` to change the definition of the word in our dictionary. Notice we still use the `[` and `]` brackets but we use the **_key_** instead of the **_index_** like we did with lists. The change we made in the string is that the cucumbers in pickles are usually small.
```
my_dict['pickles'] = "A small cucumber preserved in vinegar or brine."
```
```
my_dict['pickles'] = "A small cucumber preserved in vinegar or brine."
my_dict
```
Let's add two `key` and `value` pairs to our dictionary which are in the table below:
| key | value |
|-----|-------|
| list | An ordered collection. |
| dictionary | A collection with _unique indices_. |
Something like:
```
my_dict['list'] = "An ordered collection"
print(my_dict)
```
Is probably where we want to begin.
```
my_dict['list'] = "An ordered collection"
my_dict
```
Finally, like the **_list's_** `.append()` **_function_**, there are a lot of **_functions_** (or **_methods_**) available for dictionaries. See a complete list of them [here](https://docs.python.org/3/tutorial/datastructures.html#dictionaries).
## 2.3 Flow control: If this, then that...
**_Flow control_** is a fancy phrase meaning to execute some code statements under certain conditions. The simplist case, is an `if` statement (figure right below): If a variable is `True` then do something. If it is `False` then do something else, or do nothing at all.
<img src="https://docs.oracle.com/cd/B19306_01/appdev.102/b14261/lnpls008.gif">Flow control figure</img>
In the above figure, the `selection` refers to `if` statements. `Iteration` refers to loops or repeating some statements over and over while changing a small number of variable values. `Sequence` roughly corresponds to blocks of statements in **_functions_**.
Flow control refers how to programs do loops, conditional execution, and order of functional operations. Let's start with conditionals, or the venerable ``if`` statement.
Let's start with a simple list of instructors for these sessions.
```
instructors = ['Jesse', 'Dave', 'Wes', 'Stephanie', 'C-Hacker']
instructors
```
```
instructors = ['Jesse', 'Dave', 'Wes', 'Stephanie', 'C-Hacker']
instructors
```
### 2.3.1 If
If statements can be use to execute some lines or block of code if a particular condition is satisfied. E.g. Let's print something based on the entries in the list.
```
if 'C-Hacker' in instructors:
print('#fakeinstructor')
```
```
if 'C-Hacker' in instructors:
print('#fakeinstructor')
```
Notice the use the special **_reserved word_** **in**. This returns the value `True` when a value appears in a **_list_** and `False` when it does not. Notice how it reads like English. Readability is a key feature of Python and is part of the language design philosophy.
Usually we want conditional logic on both sides of a binary condition, e.g. some action when ``True`` and some when ``False``
```
if 'C-Hacker' in instructors:
print('There are fake names for class instructors in your list!')
else:
print("Nothing to see here")
```
```
if 'C-Hacker' in instructors:
print('There are fake names for class instructors in your list!')
else:
print("Nothing to see here")
```
There is a special do nothing word: `pass` that skips over some arm of a conditional, e.g.
```
if 'Wes' in instructors:
print("Congratulations! Wes is part of your tutorial, it will be grand!")
else:
pass
```
```
if 'Wes' in instructors:
print("Congratulations! Wes is part of your tutorial, it will be grand!")
else:
pass
```
The use of `pass` here is very important. While you can actually skip the `else` and `pass` statements and the code will behave identically, using them is an important signal that you intended for the negative case to do nothing. When you are writing code, you should start thinking about reading code and how others will read your code.
In short, when you have one side of an `if` statement that has no code use an `else` and a `pass` to be a good citizen. Remember, the person you will collaborate the most about your code is yourself in 3-6 months. Love yourself, use `pass`.
_Note_: what have you noticed in this session about quotes? What is the difference between ``'`` and ``"``?
Another simple example:
```
if True is False:
print("I'm so confused")
else:
print("Everything is right with the world")
```
```
if True is False:
print("I'm so confused")
else:
print("Everything is right with the world")
```
It is always good practice to handle all cases explicity. **_Conditional fall through_** is a common source of bugs.
Sometimes we wish to test multiple conditions. Use `if`, `elif`, and `else`.
```
my_favorite = 'pie'
if my_favorite is 'cake':
print("He likes cake! I'll start making a double chocolate velvet cake right now!")
elif my_favorite is 'pie':
print("He likes pie! I'll start making a cherry pie right now!")
else:
print("He likes " + my_favorite + ". I don't know how to make that.")
```
```
my_favorite = 'pie'
if my_favorite is 'cake':
print("He likes cake! I'll start making a double chocolate velvet cake right now!")
elif my_favorite is 'pie':
print("He likes pie! I'll start making a cherry pie right now!")
else:
print("He likes " + my_favorite + ". I don't know how to make that.")
```
**Note**: There is a big difference between the above using `elif` and this code that uses sequential `if`s:
```
if my_favorite is 'cake':
print("He likes cake! I'll start making a double chocolate velvet cake right now!")
if my_favorite is 'pie':
print("He likes pie! I'll start making a cherry pie right now!")
else:
print("He likes " + my_favorite + ". I don't know how to make that.")
```
Before you run, the cell, can you describe how these two blocks differ in their outcomes?
```
if my_favorite is 'cake':
print("He likes cake! I'll start making a double chocolate velvet cake right now!")
if my_favorite is 'pie':
print("He likes pie! I'll start making a cherry pie right now!")
else:
print("He likes " + my_favorite + ". I don't know how to make that.")
```
**Conditionals** can take ``and`` and ``or`` and ``not``. E.g.
```
my_favorite = 'pie'
if my_favorite is 'cake' or my_favorite is 'pie':
print(my_favorite + " : I have a recipe for that!")
else:
print("Ew! Who eats that?")
```
```
my_favorite = 'pie'
if my_favorite is 'cake' or my_favorite is 'pie':
print(my_favorite + " : I have a recipe for that!")
else:
print("Ew! Who eats that?")
```
### 2.3.2 For
For loops are the standard loop, though `while` is also common. For has the general form:
```
for items in list:
do stuff
```
**NOTICE THE INDENTATION! INDENTING IS AN IMPORTANT PART OF Python's SYNTAX**
For loops and collections like tuples, lists and dictionaries are natural friends.
```
instructors
for instructor in instructors:
print(instructor)
print(instructor)
```
```
instructors
for instructor in instructors:
print(instructor)
```
Note that after the **_for_** loop has ended, the `instructor` variable remains defined and contains the last value of the list that was iteratred over.
```
instructor
```
You can combine loops and conditionals:
```
for instructor in instructors:
if instructor.endswith('Clown'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
```
```
for instructor in instructors:
if instructor.endswith('Hacker'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
```
Dictionaries can use the `keys` method for iterating.
```
my_dict.keys()
for key in my_dict.keys():
if len(key) > 4:
print(my_dict[key])
```
```
my_dict.keys()
for key in my_dict.keys():
if len(key) > 4:
print(my_dict[key])
```
#### 2.3.2.1 range()
Manually constructing a list of sequential numbers is a total pain. A total pain. So Python has a **_function_** called `range` that simplifies the creation of **_lists_** that contain a sequence. Let's see it in action! Note that if we want a sequence from 0 to 2, inclusive, we call the `range` function with a argument of `3`. This is like the upper bound in **_slicing_** - it is always 1 plus the maximum value you want in the list.
```
range(3)
```
```
range(3)
```
Wait up, hoss. That result doesn't look like a list! True. However, it acts identically to a list, but works a little different under the hood to save memory. The equivalent hand made list would look like this:
```
[0, 1, 2]
```
```
[0, 1, 2]
```
We can convert a `range` to a `list` by using the `list` type cast **_function_**.
```
list(range(3))
```
```
list(range(3))
```
Notice that Python (in the newest versions, e.g. 3+) has an object type that is a range. This saves memory and speeds up calculations vs. an explicit representation of a range as a list - but it can be automagically converted to a list on the fly by Python. To show the contents as a `list` we can use the type case like with the tuple above.
Sometimes, in older Python docs, you will see `xrange`. This used the range object back in Python 2 and `range` returned an actual list. Beware of this!
```
list(range(3))
```
Remember earlier with slicing, the syntax `:3` meant `[0, 1, 2]`? Well, the same upper bound philosophy applies here.
```
xs = [0, 1, 2]
for x in xs[0:1]:
if x < 2:
print(x)
else:
pass
```
```
xs = [0, 1, 2]
for x in xs[0:1]:
if x < 2:
print(x)
else:
pass
```
Let's use range to acccess our instructor list using list element indexing.
```
for index in range(3):
instructor = instructors[index]
if instructor.endswith('Clown'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
```
```
for index in range(5):
instructor = instructors[index]
if instructor.endswith('Hacker'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
```
This would probably be better written as below. Why is it better to use to use the `len()` function than hard code the length of the list?
```
for index in range(len(instructors)):
instructor = instructors[index]
if instructor.endswith('Hacker'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
```
```
for index in range(len(instructors)):
instructor = instructors[index]
if instructor.endswith('Hacker'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
```
But in all, it isn't very Pythonesque to use indexes like that (unless you have another reason in the loop) and you would opt instead for the `instructor in instructors` form.
More often, you are doing something with the numbers that requires them to be integers, e.g. math.
```
sum = 0
for i in range(10):
sum += i
print(sum)
```
Before we leave the topic of `range()`, let's take a quick look at the documentation for it [here](https://docs.python.org/3.3/library/stdtypes.html?highlight=range#ranges). Notice, it has another calling semantic than the one have have been using.
We have been using this version:
```
range(stop)
```
Where the list will end at `stop` minus 1. There is another way the `range` **_function_** can be called which is to give it an inclusive `start` and an exclusive `stop`:
```
range(start, stop)
```
This returns a list of number that go from `start` to `stop` minus 1.
Let's look at a quick example:
```
range(1, 9)
```
```
range(1, 9)
```
What is the difference between `range(3)` and `range(0, 3)`? Use the cells below to experiment. It might help to **_typecast_** the resulting object to a `list` so you can see the result more clearly, e.g. `list(range(3))`
#### 2.3.2.2 For loops can be nested
You can put a for loop _inside_ another for loop. This is called _nesting_. Think of it like the [Matryoshka dolls](https://en.wikipedia.org/wiki/Matryoshka_doll). The example below has a nested loop that counts to four each time the outer loop counts up one. The `print` **_function_** shows the value of the outside loop iterator `i` and the inside loop iterator `j` and the product of the two values. Notice how the inside loop runs through 1 to 3 for each value of the outside loop.
We use some string formatting for the `print` statement. These are called **_f-strings_** because there is an `f` before the string. Don't worry too much about the `print` function statement here as it isn't the point of this example. _Advanced topic_: for more on formatting strings, see [here](https://docs.python.org/3/tutorial/inputoutput.html#fancier-output-formatting).
```
for i in range(1, 4):
for j in range(1, 4):
print(f'{i} * {j} = {i * j}')
```
```
for i in range(1, 4):
for j in range(1, 4):
print(f'{i} * {j} = {i * j}')
```
#### 2.3.2.3 You can exit loops early if a condition is met
Sometimes, in a for loop, you experience a condition where you want to terminate any further iterations of the loop. The **_reserved word_** `break` will completely exit a for loop. In this example, we exit the for loop when the iteration variable `i` is equal to the integer 4.
```
for i in range(10):
if i == 4:
break
i
```
```
for i in range(10):
if i == 4:
break
i
```
#### 2.3.2.4 You can skip stuff in a loop with `continue`
Sometimes, in a for loop, you want to skip certain elements. The `continue` statement will effectively skip any further statements for that element in a list. Below, we sum the numbers from `0` to `9` and skip the value `5`.
```
sum = 0
for i in range(10):
if i == 5:
continue
else:
print(i)
sum += i
print("sum is ", sum)
```
```
sum = 0
for i in range(10):
if i == 5:
continue
else:
print(i)
sum += i
print("sum is ", sum)
```
#### 2.3.2.5 You can iterate over letters in a string
Strings are basically a list. Therefore, you can use a for loop to iteratre over the characters in a string. Note that `c` is a typical variable name for characters in a string. Generally, one letter variable names are not a good thing.
```
my_string = "caffeine"
for c in my_string:
print(c)
```
```
my_string = "caffeine"
for c in my_string:
print(c)
```
## 2.4 The Zen of Python
Finally, let's use end with a philosophy of Python. This is a fun [Easter Egg](https://en.wikipedia.org/wiki/Easter_egg_(media)), to see it, `import this`:
```
import this
```
## 2.5 References
Some links to references from content in this notebook are consolidated here for easy access. Enjoy!
* [Using equations in Markdown in Jupyter notebooks](https://medium.com/analytics-vidhya/writing-math-equations-in-jupyter-notebook-a-naive-introduction-a5ce87b9a214)
* [How numbers are represented in a computer](http://steve.hollasch.net/cgindex/coding/ieeefloat.html)
* [Grace Hopper](https://en.wikipedia.org/wiki/Grace_Hopper) and [her photo of a `bug` in her notebook](https://en.wikipedia.org/wiki/Grace_Hopper#/media/File:First_Computer_Bug,_1945.jpg)
* Programmer culture touchpoint: the word [grok](https://en.wikipedia.org/wiki/Grok)
* Converting code from one programming language to another is called [Porting](https://en.wikipedia.org/wiki/Porting)
* _slices_ and _ranges_ in Python are known as **[_half-open intervals_](https://en.wikipedia.org/wiki/Interval_(mathematics))**
* [Python documentation for lists](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists)
* [Negative list indices in Python](https://googlethatforyou.com?q=negative%20indexing%20in%20python)
* [Python documentation for dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries)
* [Python documentation for the `range` function](https://docs.python.org/3.3/library/stdtypes.html?highlight=range#ranges)
* [Third party documentation for `if` statements](https://www.w3schools.com/python/python_conditions.asp)
* [Third party documentation for `for` loops](https://www.w3schools.com/python/python_for_loops.asp)
* [Third party documentation for functions](https://www.w3schools.com/python/python_functions.asp)
* The correct name for the "Russian Nesting Doll" is a "[Matryoshka doll](https://en.wikipedia.org/wiki/Matryoshka_doll)"
* [Formatting strings to include values of variables](https://docs.python.org/3/tutorial/inputoutput.html#fancier-output-formatting)
* [Zen of Python](https://www.python.org/dev/peps/pep-0020/)
* [Easter Egg](https://en.wikipedia.org/wiki/Easter_egg_(media))
## 2.6 Breakout for Data Structures and Flow Control
### 2.6.1 The FizzBuzz task
Let's do this! **FizzBuzz** is our first task for today. **FizzBuzz** is a common toy programming problem that is often used in software engineering job interviews. Today, we are not after the _most compact_, _most clever_, or even the _most beautiful_ solution. Your goal is to solve the problem using **_for_** and **_if_** as your primary tools. You will likely also want to use the `%` operator. Before we describe **FizzBuzz**, let's talk about the **moduluo** operation.
If you recall from yesterday, you may have experimented with the **_[modulus](https://en.wikipedia.org/wiki/Modulo_operation)_** or `%` operator. When used between two **_integer_** values, it returns the integer remainder of division. Let's start with a simple example of `5` **modulo** `3`:
```
5 % 3
```
```
5 % 3
```
Let's do another example... What is the result of the following:
```
10 % 5
```
```
10 % 5
```
Now, on to **FizzBuzz**. No it isn't the high-test caffeine cola you might need right now. Instead, it is a challenge to output certain text as the code iterates over elements in a list. Here is the formal definition of the problem that we will use today.
_"Write a program that prints the numbers from 1 to 100. But for multiples of three print `Fizz` instead of the number and for the multiples of five print `Buzz`. For numbers which are multiples of both three and five print `FizzBuzz`."_
To get started, you will need a `for` loop and it will probably use a `range` list. Inside of the for loop, you will likely have at least two `if` statements that use the `%` operator followed by a `print`.
For advanced folks, a hint here is that you might be able to avoid some `if` statements by using the `end` parameter to the `print` function. Notice how these two cell blocks output differently.
```
print("Fizz")
print("Buzz")
```
In another cell:
```
print("Fizz", end="")
print("Buzz", end="")
print()
```
#### 2.6.1.1 Some possible FizzBuzz solutions
There are many many solutions for the **_FizzBuzz_** problem. Some are more compact, or elegant than others. We aren't going for compact or elegant, just working.
Here is my solution which is compact but by no means the only solution.
```
for number in range(1, 101):
if number % 3 is 0:
print('Fizz', end='')
if number % 5 is 0:
print('Buzz', end='')
if number % 3 is not 0 and number % 5 is not 0:
print(number, end='')
print()
```
```
for number in range(1, 101):
if number % 3 is 0:
print('Fizz', end='')
if number % 5 is 0:
print('Buzz', end='')
if number % 3 is not 0 and number % 5 is not 0:
print(number, end='')
print()
```
Perhaps a more common approach is to do something like:
```
for number in range(1, 101):
if number % 3 is 0 and number % 5 is 0:
print('FizzBuzz')
elif number % 3 is 0:
print('Fizz')
elif number % 5 is 0:
print('Buzz')
else:
print(number)
print()
```
```
for number in range(1, 101):
if number % 3 is 0 and number % 5 is 0:
print('FizzBuzz')
elif number % 3 is 0:
print('Fizz')
elif number % 5 is 0:
print('Buzz')
else:
print(number)
print()
```
| github_jupyter |
# Tutorial: Confidence Intervals
By Delaney Granizo-Mackenzie, Jeremiah Johnson, and Gideon Wulfsohn
Part of the Quantopian Lecture Series:
http://www.quantopian.com/lectures
http://github.com/quantopian/research_public
Notebook released under the Creative Commons Attribution 4.0 License.
## Sample Mean vs. Population Mean
Sample means and population means are different. Generally, we want to know about a population mean, but we can only calculate a sample mean. We then want to use the sample mean to estimate the population mean. We use confidence intervals in an attempt to determine how accurately our sample mean estimates the population mean.
## Confidence Interval
If I asked you to estimate the average height of a woman in the USA, you might do this by measuring 10 women and estimating that the mean of that sample was close to the population. Let's try that.
```
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
# We'll set a seed here so our runs are consistent
np.random.seed(10)
# Let's define some 'true' population parameters, we'll pretend we don't know these.
POPULATION_MU = 64
POPULATION_SIGMA = 5
# Generate our sample by drawing from the population distribution
sample_size = 10
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, sample_size)
print heights
mean_height = np.mean(heights)
print 'sample mean: ', mean_height
```
Unfortunately simply reporting the sample mean doesn't do much for us, as we don't know how it relates to the population mean. To get a sense for how it might relate, we can look for how much variance there is in our sample. Higher variance indicates instability and uncertainty.
```
print 'sample standard deviation: ', np.std(heights)
```
This still doesn't do that much for us, to really get a sense of how our sample mean relates to the population mean we need to compute a standard error. The standard error is a measure of the variance of the sample mean.
#### IMPORTANT
Computing a standard error involves assuming that the way you sample is unbaised, and that the data are normal and independent. If these conditions are violated, your standard error will be wrong. There are ways of testing for this and correcting.
The formula for standard error is.
$$SE = \frac{\sigma}{\sqrt{n}}$$
Where $\sigma$ is the sample standard deviation and $n$ is the number of samples.
```
SE = np.std(heights) / np.sqrt(sample_size)
print 'standard error: ', SE
```
There is a function in scipy's stats library for calculating the standard error. Note that this function by default contains a degrees-of-freedom correction that is often not necessary (for large enough samples, it is effectively irrelevant). You can omit the correction by setting the parameter ddof to 0.
```
stats.sem(heights, ddof=0)
```
Assuming our data are normally distributed, we can use the standard error to compute our confidence interval. To do this we first set our desired confidence level, say 95%, we then determine how many standard deviations contain 95% of the mass. Turns out that the 95% of the mass lies between -1.96 and 1.96 on a standard normal distribution. When the samples are large enough (generally > 30 is taken as a threshold) the Central Limit Theorem applies and normality can be safely assumed; if sample sizes are smaller, a safer approach is to use a $t$-distribution with appropriately specified degrees of freedom. The actual way to compute the values is by using a cumulative distribution function (CDF). If you are not familiar with CDFs, inverse CDFs, and their companion PDFs, you can read about them [here](https://en.wikipedia.org/wiki/Probability_density_function) and [here](https://en.wikipedia.org/wiki/Cumulative_distribution_function). Look [here](https://en.wikipedia.org/wiki/Student%27s_t-distribution) for information on the $t$-distribution. We can check the 95% number using one of the Python functions.
NOTE: Be careful when applying the Central Limit Theorem, however, as many datasets in finance are fundamentally non-normal and it is not safe to apply the theorem casually or without attention to subtlety.
We can visualize the 95% mass bounds here.
```
# Set up the x axis
x = np.linspace(-5,5,100)
# Here's the normal distribution
y = stats.norm.pdf(x,0,1)
plt.plot(x,y)
# Plot our bounds
plt.vlines(-1.96, 0, 1, colors='r', linestyles='dashed')
plt.vlines(1.96, 0, 1, colors='r', linestyles='dashed')
# Shade the area
fill_x = np.linspace(-1.96, 1.96, 500)
fill_y = stats.norm.pdf(fill_x, 0, 1)
plt.fill_between(fill_x, fill_y)
plt.xlabel('$\sigma$')
plt.ylabel('Normal PDF');
```
### Here's the trick
Now, rather than reporting our sample mean without any sense of the probability of it being correct, we can compute an interval and be much more confident that the population mean lies in that interval. To do this we take our sample mean $\mu$ and report $\left(\mu-1.96 SE , \mu+1.96SE\right)$.
This works because assuming normality, that interval will contain the population mean 95% of the time.
### SUBTLETY:
In any given case, the true value of the estimate and the bounds of the confidence interval are fixed. It is incorrect to say that "The national mean female height is between 63 and 65 inches with 95% probability," but unfortunately this is a very common misinterpretation. Rather, the 95% refers instead to the fact that over many computations of a 95% confidence interval, the true value will be in the interval in 95% of the cases (assuming correct calibration of the confidence interval, which we will discuss later). But in fact for a single sample and the single confidence interval computed from it, we have no way of assessing the probability that the interval contains the population mean. The visualization below demonstrates this.
In the code block below, there are two things to note. First, although the sample size is sufficiently large to assume normality, we're using a $t$-distribution, just to demonstrate how it is used. Second, the $t$-values needed (analogous to the $\pm1.96$ used above) are being calculated from the inverted cumulative density function, the ppf in scipy.stats. The $t$-distribution requires the extra parameter degrees of freedom (d.o.f), which is the size of the sample minus one.
```
np.random.seed(8309)
n = 100 # number of samples to take
samples = [np.random.normal(loc=0, scale=1, size=100) for _ in range(n)]
fig, ax = plt.subplots(figsize=(10, 7))
for i in np.arange(1, n, 1):
sample_mean = np.mean(samples[i]) # calculate sample mean
se = stats.sem(samples[i]) # calculate sample standard error
h = se*stats.t.ppf((1+0.95)/2, len(samples[i])-1) # calculate t; 2nd param is d.o.f.
sample_ci = [sample_mean - h, sample_mean + h]
if ((sample_ci[0] <= 0) and (0 <= sample_ci[1])):
plt.plot((sample_ci[0], sample_ci[1]), (i, i), color='blue', linewidth=1);
plt.plot(np.mean(samples[i]), i, 'bo');
else:
plt.plot((sample_ci[0], sample_ci[1]), (i, i), color='red', linewidth=1);
plt.plot(np.mean(samples[i]), i, 'ro');
plt.axvline(x=0, ymin=0, ymax=1, linestyle='--', label = 'Population Mean');
plt.legend(loc='best');
plt.title('100 95% Confidence Intervals for mean of 0');
```
### Further Reading
This is only a brief introduction, Wikipedia has excellent articles detailing these subjects in greater depth. Let's go back to our heights example. Since the sample size is small, we'll use a $t$-test.
```
# standard error SE was already calculated
t_val = stats.t.ppf((1+0.95)/2, 9) # d.o.f. = 10 - 1
print 'sample mean height:', mean_height
print 't-value:', t_val
print 'standard error:', SE
print 'confidence interval:', (mean_height - t_val * SE, mean_height + t_val * SE)
```
There is a built-in function in scipy.stats for computing the interval. Remember to specify the degrees of freedom.
```
print '99% confidence interval:', stats.t.interval(0.99, df=9,
loc=mean_height, scale=SE)
print '95% confidence interval:', stats.t.interval(0.95, df = 9,
loc=mean_height, scale=SE)
print '80% confidence interval:', stats.t.interval(0.8, df = 9,
loc=mean_height, scale=SE)
```
Note that as your confidence increases, the interval necessarily widens.
Assuming normality, there's also a built in function that will compute our interval for us. This time you don't need to specify the degrees of freedom. Note that at a corresponding level of confidence, the interval calculated using the normal distribution is narrower than the interval calcuated using the $t$-distribution.
```
print stats.norm.interval(0.99, loc=mean_height, scale=SE)
print stats.norm.interval(0.95, loc=mean_height, scale=SE)
print stats.norm.interval(0.80, loc=mean_height, scale=SE)
```
## What does this mean?
Confidence intervals allow us to set our desired confidence, and then report a range that will likely contain the population mean. The higher our desired confidence, the larger range we report. In general, one can never report a single point value, because the probability that any given point is the true population mean is incredibly small. Let's see how our intervals tighten as we change sample size.
```
np.random.seed(10)
sample_sizes = [10, 100, 1000]
for s in sample_sizes:
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, s)
SE = np.std(heights) / np.sqrt(s)
print stats.norm.interval(0.95, loc=mean_height, scale=SE)
```
## Visualizing Confidence Intervals
Here is some code to visualize a confidence interval on a graph. Feel free to play around with it.
```
sample_size = 100
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, sample_size)
SE = np.std(heights) / np.sqrt(sample_size)
(l, u) = stats.norm.interval(0.95, loc=np.mean(heights), scale=SE)
print (l, u)
plt.hist(heights, bins=20)
plt.xlabel('Height')
plt.ylabel('Frequency')
# Just for plotting
y_height = 5
plt.plot([l, u], [y_height, y_height], '-', color='r', linewidth=4, label='Confidence Interval')
plt.plot(np.mean(heights), y_height, 'o', color='r', markersize=10);
```
## Miscalibration and Violation of Assumptions
The computation of a standard deviation, standard error, and confidence interval all rely on certain assumptions. If these assumptions are violated then the 95% confidence interval will not necessarily contain the population parameter 95% of the time. We say that in this case the confidence interval is miscalibrated. Here is an example.
### Example: Autocorrelated Data
If your data generating process is autocorrelated, then estimates of standard deviation will be wrong. This is because autocorrelated processes tend to produce more extreme values than normally distributed processes. This is due to new values being dependent on previous values, series that are already far from the mean are likely to stay far from the mean. To check this we'll generate some autocorrelated data according to the following process.
$$X_t = \theta X_{t-1} + \epsilon$$
$$\epsilon \sim \mathcal{N}(0,1)$$
```
def generate_autocorrelated_data(theta, mu, sigma, N):
# Initialize the array
X = np.zeros((N, 1))
for t in range(1, N):
# X_t = theta * X_{t-1} + epsilon
X[t] = theta * X[t-1] + np.random.normal(mu, sigma)
return X
X = generate_autocorrelated_data(0.5, 0, 1, 100)
plt.plot(X);
plt.xlabel('t');
plt.ylabel('X[t]');
```
It turns out that for larger sample sizes, you should see the sample mean asymptotically converge to zero. This is because the process is still centered around zero, but let's check if that's true. We'll vary the number of samples drawn, and look for convergence as we increase sample size.
```
sample_means = np.zeros(200-1)
for i in range(1, 200):
X = generate_autocorrelated_data(0.5, 0, 1, i * 10)
sample_means[i-1] = np.mean(X)
plt.bar(range(1, 200), sample_means);
plt.xlabel('Sample Size');
plt.ylabel('Sample Mean');
```
Definitely looks like there's some convergence, we can also check what the mean of the sample means is.
```
np.mean(sample_means)
```
Pretty close to zero. We could also derive symbolically that the mean is zero, but let's assume that we've convinced ourselves with the simple empiral analysis. Now that we know the population mean, we can check the calibration of confidence intervals. First we'll write two helper functions which compute a naive interval for some input data, and check whether the interval contains the true mean, 0.
```
def compute_unadjusted_interval(X):
T = len(X)
# Compute mu and sigma MLE
mu = np.mean(X)
sigma = np.std(X)
SE = sigma / np.sqrt(T)
# Compute the bounds
return stats.norm.interval(0.95, loc=mu, scale=SE)
# We'll make a function that returns true when the computed bounds contain 0
def check_unadjusted_coverage(X):
l, u = compute_unadjusted_interval(X)
# Check to make sure l <= 0 <= u
if l <= 0 and u >= 0:
return True
else:
return False
```
Now we'll run many trials, in each we'll sample some data, compute a confidence interval, and then check if the confidence interval contains the population mean. We'll keep a running tally, and we should expect to see 95% of the trials succeed if the intervals are calibrated correctly.
```
T = 100
trials = 500
times_correct = 0
for i in range(trials):
X = generate_autocorrelated_data(0.5, 0, 1, T)
if check_unadjusted_coverage(X):
times_correct += 1
print 'Empirical Coverage: ', times_correct/float(trials)
print 'Expected Coverage: ', 0.95
```
Clearly the coverage is wrong. In this case we'd need to do what's known as a Newey-West correction on our standard error estimate to account for the autocorrelation. In practice it's important to check for the assumptions you make. It is quick and easy to check if your data are stationary (which implies not autocorrelated), and it can save you a lot of pain and suffering to do so. A normality test such as `Jarque Bera` will also be a good idea, as it may detect certain distribution properties which may violate assumptions of many following statistical analyses.
*This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/release_notebooks/NLU1.1.2_Bengali_ner_Hindi_Embeddings_30_new_models.ipynb)
```
import os
from sklearn.metrics import classification_report
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install nlu pyspark==2.4.7 > /dev/null
import nlu
import pandas as pd
```
#### [Named Entity Recognition for Bengali (GloVe 840B 300d)](https://nlp.johnsnowlabs.com/2021/01/27/ner_jifs_glove_840B_300d_bn.html)
```
#Bengali for : It began to be widely used in the United States in the early '90s.
nlu.load("bn.ner").predict("৯০ এর দশকের শুরুর দিকে বৃহৎ আকারে মার্কিন যুক্তরাষ্ট্রে এর প্রয়োগের প্রক্রিয়া শুরু হয়'")
```
#### [Bengali Lemmatizer](https://nlp.johnsnowlabs.com/2021/01/20/lemma_bn.html)
```
#Bengali for : One morning in the marble-decorated building of Vaidyanatha, an obese monk was engaged in the enchantment of Duis and the milk service of one and a half Vaidyanatha. Give me two to eat
nlu.load("bn.lemma").predict("একদিন প্রাতে বৈদ্যনাথের মার্বলমণ্ডিত দালানে একটি স্থূলোদর সন্ন্যাসী দুইসের মোহনভোগ এবং দেড়সের দুগ্ধ সেবায় নিযুক্ত আছে বৈদ্যনাথ গায়ে একখানি চাদর দিয়া জোড়করে একান্ত বিনীতভাবে ভূতলে বসিয়া ভক্তিভরে পবিত্র ভোজনব্যাপার নিরীক্ষণ করিতেছিলেন এমন সময় কোনোমতে দ্বারীদের দৃষ্টি এড়াইয়া জীর্ণদেহ বালক সহিত একটি অতি শীর্ণকায়া রমণী গৃহে প্রবেশ করিয়া ক্ষীণস্বরে কহিল বাবু দুটি খেতে দাও")
```
#### [Japanese Lemmatizer](https://nlp.johnsnowlabs.com/2021/01/15/lemma_ja.html)
```
#Japanese for : Some residents were uncomfortable with this, but it seems that no one is now openly protesting or protesting.
nlu.load("ja.lemma").predict("これに不快感を示す住民はいましたが,現在,表立って反対や抗議の声を挙げている住民はいないようです。")
```
#### [Aharic Lemmatizer](https://nlp.johnsnowlabs.com/2021/01/20/lemma_am.html)
```
#Aharic for : Bookmark the permalink.
nlu.load("am.lemma").predict("መጽሐፉን መጽሐፍ ኡ ን አስያዛት አስያዝ ኧ ኣት ።")
```
#### [Bhojpuri Lemmatizer](https://nlp.johnsnowlabs.com/2021/01/18/lemma_bh.html)
```
#Bhojpuri for : In this event, participation of World Bhojpuri Conference, Purvanchal Ekta Manch, Veer Kunwar Singh Foundation, Purvanchal Bhojpuri Mahasabha, and Herf - Media.
nlu.load("bh.lemma").predict("एह आयोजन में विश्व भोजपुरी सम्मेलन , पूर्वांचल एकता मंच , वीर कुँवर सिंह फाउन्डेशन , पूर्वांचल भोजपुरी महासभा , अउर हर्फ - मीडिया के सहभागिता बा ।")
```
#### [Named Entity Recognition - BERT Tiny (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_small_bert_L2_128_en.html)
```
nlu.load("en.ner.onto.bert.small_l2_128").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - BERT Mini (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_small_bert_L4_256_en.html)
```
nlu.load("en.ner.onto.bert.small_l4_256").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - BERT Small (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_small_bert_L4_512_en.html)
```
nlu.load("en.ner.onto.bert.small_l4_512").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - BERT Medium (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_small_bert_L8_512_en.html)
```
nlu.load("en.ner.onto.bert.small_l8_512").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - BERT Base (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_bert_base_cased_en.html)
```
nlu.load("en.ner.onto.bert.cased_base").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - BERT Large (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_electra_small_uncased_en.html)
```
nlu.load("en.ner.onto.bert.cased_large").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - ELECTRA Small (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_electra_small_uncased_en.html)
```
nlu.load("en.ner.onto.electra.uncased_small").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.""",output_level = "document")
```
#### [Named Entity Recognition - ELECTRA Base (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_electra_base_uncased_en.html)
```
nlu.load("en.ner.onto.electra.uncased_base").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadellabase.""",output_level = "document")
```
#### [Named Entity Recognition - ELECTRA Large (OntoNotes)](https://nlp.johnsnowlabs.com/2020/12/05/onto_electra_large_uncased_en.html)
```
nlu.load("en.ner.onto.electra.uncased_large").predict("""William Henry Gates III (born October 28, 1955) is an American business magnate,
software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft,
Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect,
while also being the largest individual shareholder until May 2014.
He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico;
it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.
During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time
role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.
He gradually transferred his duties to Ray Ozzie and Craig Mundie.
He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadellabase.""",output_level = "document")
```
#### [Recognize Entities OntoNotes - BERT Tiny](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_bert_tiny_en.html)
```
nlu.load("en.ner.onto.bert.tiny").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - BERT Mini](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_bert_mini_en.html)
```
nlu.load("en.ner.onto.bert.mini").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - BERT Small](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_bert_small_en.html)
```
nlu.load("en.ner.onto.bert.small").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - BERT Medium](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_bert_medium_en.html)
```
nlu.load("en.ner.onto.bert.medium").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - BERT Base](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_bert_base_en.html)
```
nlu.load("en.ner.onto.bert.base").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - BERT Large](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_bert_large_en.html)
```
nlu.load("en.ner.onto.bert.large").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - ELECTRA Small](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_electra_small_en.html)
```
nlu.load("en.ner.onto.electra.small").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - ELECTRA Base](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_electra_base_en.html)
```
nlu.load("en.ner.onto.electra.base").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
#### [Recognize Entities OntoNotes - ELECTRA Large](https://nlp.johnsnowlabs.com/2020/12/09/onto_recognize_entities_electra_large_en.html)
```
nlu.load("en.ner.onto.large").predict("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.",output_level="document")
```
| github_jupyter |
# GOES-16 IR channel subregion plot (using reprojected data)
This jupyter notebook shows how to make a sub-region plot of a **reprojected** IR channel of GOES-16.
Import the GOES package.
```
import GOES
```
Set path and name of file that will be read.
```
path = '/home/joao/Downloads/GOES-16/ABI/'
file = 'OR_ABI-L2-CMIPF-M6C13_G16_s20200782000176_e20200782009496_c20200782010003.nc'
```
Reads the file.
```
ds = GOES.open_dataset(path+file)
```
Prints the contents of the file.
```
print(ds)
```
Set the map domain.
```
domain = [-90.0,-30.0,-60.0,15.0]
```
Gets image with the coordinates of corners of their pixels.
```
CMI, LonCen, LatCen = ds.image('CMI', lonlat='center', domain=domain)
```
Gets information about data.
```
sat = ds.attribute('platform_ID')
band = ds.variable('band_id').data[0]
wl = ds.variable('band_wavelength').data[0]
standard_name = CMI.standard_name
units = CMI.units
time_bounds = CMI.time_bounds
```
Creates a grid map with cylindrical equidistant projection and 2 km of spatial resolution.
```
LonCenCyl, LatCenCyl = GOES.create_gridmap(domain, PixResol=2.0)
```
Calculates the coordinates of corners of pixels.
```
LonCorCyl, LatCorCyl = GOES.calculate_corners(LonCenCyl, LatCenCyl)
```
Calculates the parameters for reprojection. For this we need install the **pyproj** and **pyresample** packages. Try with ***pip install pyproj*** and ***pip install pyresample***.
```
import pyproj as pyproj
Prj = pyproj.Proj('+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +a=6378.137 +b=6378.137 +units=km')
AreaID = 'cyl'
AreaName = 'cyl'
ProjID = 'cyl'
Proj4Args = '+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +a=6378.137 +b=6378.137 +units=km'
ny, nx = LonCenCyl.data.shape
SW = Prj(LonCenCyl.data.min(), LatCenCyl.data.min())
NE = Prj(LonCenCyl.data.max(), LatCenCyl.data.max())
area_extent = [SW[0], SW[1], NE[0], NE[1]]
from pyresample import utils
AreaDef = utils.get_area_def(AreaID, AreaName, ProjID, Proj4Args, nx, ny, area_extent)
```
Reprojects images.
```
from pyresample.geometry import SwathDefinition
from pyresample.kd_tree import resample_nearest
import numpy as np
SwathDef = SwathDefinition(lons=LonCen.data, lats=LatCen.data)
CMICyl = resample_nearest(SwathDef, CMI.data, AreaDef, radius_of_influence=6000,
fill_value=np.nan, epsilon=3, reduce_data=True)
```
Deletes unnecessary data.
```
del CMI, LonCen, LatCen, SwathDef, LonCenCyl, LatCenCyl
```
Creates a custom color palette using the [custom_color_palette](https://github.com/joaohenry23/custom_color_palette) package.
```
# import packages
import custom_color_palette as ccp
import matplotlib.pyplot as plt
# set the colors of the custom palette
lower_colors = ['maroon','red','darkorange','#ffff00','forestgreen','cyan','royalblue',(148/255,0/255,211/255)]
lower_palette = [lower_colors, ccp.range(180.0,240.0,1.0)]
upper_colors = plt.cm.Greys
upper_palette = [upper_colors, ccp.range(240.0,330.0,1.0), [ccp.range(180.0,330.0,1.0),240.0,330.0]]
# pass parameters to the creates_palette module
cmap, cmticks, norm, bounds = ccp.creates_palette([lower_palette, upper_palette], extend='both')
# creating colorbar labels
ticks = ccp.range(180,330,10)
```
Creates plot.
```
# import packages
import numpy as np
import cartopy.crs as ccrs
from cartopy.feature import NaturalEarthFeature
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
# calculates the central longitude of the plot
lon_cen = 360.0+(domain[0]+domain[1])/2.0
# creates the figure
fig = plt.figure('map', figsize=(4,4), dpi=200)
ax = fig.add_axes([0.1, 0.16, 0.80, 0.75], projection=ccrs.PlateCarree(lon_cen))
ax.outline_patch.set_linewidth(0.3)
# add the geographic boundaries
l = NaturalEarthFeature(category='cultural', name='admin_0_countries', scale='50m', facecolor='none')
ax.add_feature(l, edgecolor='gold', linewidth=0.25)
# plot the data
img = ax.pcolormesh(LonCorCyl.data, LatCorCyl.data, CMICyl.data, cmap=cmap, norm=norm,
transform=ccrs.PlateCarree())
# add the colorbar
cb = plt.colorbar(img, ticks=ticks, orientation='horizontal', extend='both',
cax=fig.add_axes([0.12, 0.05, 0.76, 0.02]))
cb.ax.tick_params(labelsize=5, labelcolor='black', width=0.5, length=1.5, direction='out', pad=1.0)
cb.set_label(label='{} [{}]'.format(standard_name, units), size=5, color='black', weight='normal')
cb.outline.set_linewidth(0.5)
# set the title
ax.set_title('{} - C{:02d} [{:.1f} μm]'.format(sat,band, wl), fontsize=7, loc='left')
ax.set_title(time_bounds.data[0].strftime('%Y/%m/%d %H:%M UTC'), fontsize=7, loc='right')
# Sets X axis characteristics
dx = 15
xticks = np.arange(domain[0], domain[1]+dx, dx)
ax.set_xticks(xticks, crs=ccrs.PlateCarree())
ax.xaxis.set_major_formatter(LongitudeFormatter(dateline_direction_label=True))
ax.set_xlabel('Longitude', color='black', fontsize=7, labelpad=3.0)
# Sets Y axis characteristics
dy = 15
yticks = np.arange(domain[2], domain[3]+dy, dy)
ax.set_yticks(yticks, crs=ccrs.PlateCarree())
ax.yaxis.set_major_formatter(LatitudeFormatter())
ax.set_ylabel('Latitude', color='black', fontsize=7, labelpad=3.0)
# Sets tick characteristics
ax.tick_params(left=True, right=True, bottom=True, top=True,
labelleft=True, labelright=False, labelbottom=True, labeltop=False,
length=0.0, width=0.05, labelsize=5.0, labelcolor='black')
# Sets grid characteristics
ax.gridlines(xlocs=xticks, ylocs=yticks, alpha=0.6, color='gray',
draw_labels=False, linewidth=0.25, linestyle='--')
# set the map limits
ax.set_extent([domain[0]+360.0, domain[1]+360.0, domain[2], domain[3]], crs=ccrs.PlateCarree())
plt.show()
```
| github_jupyter |
# Project 2: inverse kinematics and resolved rate control
In this project, we will implement an inverse kinematics algorithm and controllers for the Kuka iiwa 14 robot using the results from Project 1.
## Instructions
* Answer all questions in the notebook
* You will need to submit on Brightspace:
1. the code you wrote to answer the questions in a Jupyter Notebook. The code should be runnable as is.
2. a 2-3 pages report in pdf format (pdf only) detailing the methodology you followed to answer the questions as well as answers to the questions that require a typeset answer. You may add the plots in the report (does not count for the page limit) or in the Jupyter notebook.
As a reminder, the [Kuka iiwa 14 robot](https://www.kuka.com/en-us/products/robotics-systems/industrial-robots/lbr-iiwa) has 7 revolute joints and its kinematics is described in the picture below:

# Setup
Run the cell below only once when resetting the runtime in Colab - this will not do anything when running on a local Jupyter Notebook.
```
## check if we are in Google Colab
try:
import google.colab
RUNNING_IN_COLAB = True
print('detected Colab - setting up environment')
# then we need to install the conda environment
try:
import condacolab
condacolab.check()
except:
!pip install -q condacolab
import condacolab
condacolab.install()
except:
RUNNING_IN_COLAB = False
# after installing condalab, the runtime restarts
# -> need to check for colab env once more here
try:
import google.colab
RUNNING_IN_COLAB = True
except Exception as e:
RUNNING_IN_COLAB = False
if RUNNING_IN_COLAB:
try:
# Check if packages are installed or not. If not, install them.
import pinocchio
except:
# Install pinocchio, meschat-python
!conda install pinocchio meshcat-python
# get the class repo - first check if it exists
import os, sys
if not os.path.isdir('/content/ROB6003/Project2'):
print('cloning LAB repository')
os.chdir('/content')
!git clone https://github.com/righetti/ROB6003.git
print('cloning done')
else:
print('lab repos was found, skipping cloning')
print('done configuring for Colab')
sys.path.append('/content/ROB6003/Project2/')
os.chdir('/content/ROB6003/Project2/')
print('done adding system path and changing directory.')
```
# Starting the visualization environment
The following code will start a visualization environment (click on the printed address to see the robot)
You need to run this only ONCE. Each time you run this cell you will get a new display environment (so you need to close the previous one!)
This should work out of the box on Google Colab and you local Jupyter Notebook (make sure you have installed the right libraries in your local computer if you do not use Colab).
```
import numpy as np
import robot_visualizer
import time
import matplotlib.pyplot as plt
robot_visualizer.start_robot_visualizer()
```
# Displaying an arbitrary configuration
As in the previous project, you can use the following function to display arbitrary configurations of the robot
```
# here we display an arbitrary configuration of the robot
q = np.random.sample([7])
print(f'we show the configuration for the angles {q}')
robot_visualizer.display_robot(q)
```
## Question 1: inverse kinematics
* Write a function ``compute_IK_position`` that gets a desired end-effector 3D position (in spatial frame) and returns a vector of joint angles that solves the inverse kinematics problem
* The file ``desired_end_effector_positions.npy`` contains a sequence of 10 desired end-effector positions. For all the positions attainable by the robot, compute an inverse kinematics solution. For the positions for which an inverse kinematics solution does not exist, what is the issue and how close can you get the end-effector to the desired position?
* Write a function ``compute_IK_position_nullspace`` that solves the inverse kinematics problem and additionally uses joint redundancy (i.e. the nullspace) to try and keep the joints close to the following configuration $[1,1,-1,-1,1,1,1]$. Explain how you used the nullspace to implement this function.
* Use this new function to reach the positions set in the file ``desired_end_effector_positions.npy``, how do the solutions compare to the first ones you found?
```
## a script to load the desired end effector positions and display each of them every second
## you maybe modify this script to test your code
# load the file
with open('desired_end_effector_positions.npy', 'rb') as f:
desired_endeff = np.load(f)
# first we display the robot in 0 position
robot_visualizer.display_robot(np.zeros([7,1]))
# for each end-eff position
for i in range(desired_endeff.shape[1]):
# displays the desired endeff position
robot_visualizer.display_ball(desired_endeff[:,i])
time.sleep(1.)
```
## Question 2: Joint control and joint trajectories generation
We would like the robot to go from its initial configuration to the desired end-effector positions (in spatial coordinates) $[0.7, 0.2,0.7]$ in 5 seconds and then to the configuration $[0.3, 0.5,0.9]$ during the following 5 seconds.
* Compute inverse kinematics solutions to reach both goals
* Write a function ``get_point_to_point_motion`` that returns a desired position and velocity and takes as input the total motion duration T, the desired initial position and the desired final position. The generated trajectory needs to ensure that at t=0 and t=T both the velocity and acceleration are 0. You can use this function to interpolate between desired positions in both joint and end-effector space.
* Modify the ``robot_controller`` function below to move the robot from its initial configuration to reach the first goal (displayed in pink) at t=5 and the second goal ((in yellow) at t=10 by interpolating joint positions using the function ``get_point_to_point_motion`` you wrote above.
* Plot the resulting joint simulated and desired positions and velocities
* Plot the resulting end-effector positions and velocities
```
T = 10.
end_effector_goal1 = np.array([[0.7], [0.2],[0.7]])
end_effector_goal2 = np.array([[0.3], [0.5],[0.9]])
## this code is to save what the controller is doing for plotting and analysis after the simulation
global save_joint_positions, save_joint_velocities, save_t, ind
global save_des_joint_positions, save_des_joint_velocities
save_joint_positions = np.zeros([7,int(np.ceil(T / 0.001))+1])
save_joint_velocities = np.zeros_like(save_joint_positions)
save_des_joint_positions = np.zeros_like(save_joint_positions)
save_des_joint_velocities = np.zeros_like(save_joint_positions)
save_t = np.zeros([int(np.ceil(T / 0.001))+1])
ind=0
# end of saving code
def robot_controller(t, joint_positions, joint_velocities):
"""A typical robot controller
at every time t, this controller is called by the simulator. It receives as input
the current joint positions and velocities and needs to return a [7,1] vector
of desired torque commands
As an example, the current controller implements a PD controller and at time = 5s
it makes joint 2 and 3 follow sine curves
"""
desired_joint_positions = np.zeros([7,1])
desired_joint_velocities = np.zeros([7,1])
# when t>5. we generate sines for joint 2 and 3 as an example
if t > 5.:
desired_joint_positions[2] = 1. - np.cos(2*np.pi/5.*t)
desired_joint_velocities[2] = 2*np.pi/5. * np.sin(2*np.pi/5.*t)
desired_joint_positions[3] = .5 - 0.5*np.cos(2*np.pi/5.*t)
desired_joint_velocities[3] = np.pi/5. * np.sin(2*np.pi/5.*t)
# we compute the desired control commands using a PD controller
P = np.array([100., 100., 100., 100., 100., 100., 100.])
D = np.array([2.,2,2,2,2,2,2.])
desired_joint_torques = np.diag(P) @ (desired_joint_positions - joint_positions)
desired_joint_torques += np.diag(D) @ (desired_joint_velocities - joint_velocities)
## this code is to save what the controller is doing for plotting and analysis after the simulation
global save_joint_positions, save_joint_velocities, save_t, ind
global save_des_joint_positions, save_des_joint_velocities
save_joint_positions[:,ind] = joint_positions[:,0]
save_joint_velocities[:,ind] = joint_velocities[:,0]
save_des_joint_positions[:,ind] = desired_joint_positions[:,0]
save_des_joint_velocities[:,ind] = desired_joint_velocities[:,0]
save_t[ind] = t
ind += 1
## end of saving code
return desired_joint_torques
robot_visualizer.display_ball(end_effector_goal1[:,0])
robot_visualizer.display_ball2(end_effector_goal2[:,0])
robot_visualizer.simulate_robot(robot_controller, T=T)
# we plot the simulated vs. actual position of the robot
plt.figure(figsize=[9,12])
for i in range(7):
plt.subplot(7,1,i+1)
plt.plot(save_t, save_joint_positions[i,:])
plt.plot(save_t, save_des_joint_positions[i,:])
plt.ylim([-np.pi,np.pi])
plt.ylabel(f'q {i}')
plt.xlabel('Desired vs. actual joint positions - Time [s]')
# we plot the simulated vs. actual position of the robot
plt.figure(figsize=[9,12])
for i in range(7):
plt.subplot(7,1,i+1)
plt.plot(save_t, save_joint_velocities[i,:])
plt.plot(save_t, save_des_joint_velocities[i,:])
plt.ylim([-3,3])
plt.ylabel(f'dq {i}')
plt.xlabel('Desired vs. actual joint velocities - Time [s]')
```
## Question 3: End-effector control
As in Question 2, we would like the robot to go from its initial configuration to the desired end-effector positions (in spatial coordinates) $[0.7, 0.2,0.7]$ in 5 seconds and then to the configuration $[0.3, 0.5,0.9]$ during the following 5 seconds.
* Modify the ``robot_controller2`` function below to move the robot from its initial configuration to the first goal (reaching at t=5) and the second goal (t=10) by interpolating the desired end effector positions and directly mapping end-effector error to desired joint velocities (i.e. use P gains equal to 0 in joint space and do resolved-rate control).
* Plot the resulting joint simulated and desired positions and velocities
* Plot the resulting end-effector positions and velocities
* Compare results with Question 2
* Add a nullspace term to optimize a desired configuration of your choice and discuss the results
```
T = 10.
## this code is to save what the controller is doing for plotting and analysis after the simulation
global save_joint_positions, save_joint_velocities, save_t, ind
global save_des_joint_positions, save_des_joint_velocities
save_joint_positions = np.zeros([7,int(np.ceil(T / 0.001))+1])
save_joint_velocities = np.zeros_like(save_joint_positions)
save_des_joint_positions = np.zeros_like(save_joint_positions)
save_des_joint_velocities = np.zeros_like(save_joint_positions)
save_t = np.zeros([int(np.ceil(T / 0.001))+1])
ind=0
# end of saving code
def robot_controller2(t, joint_positions, joint_velocities):
"""A typical robot controller
at every time t, this controller is called by the simulator. It receives as input
the current joint positions and velocities and needs to return a [7,1] vector
of desired torque commands
As an example, the current controller implements a PD controller and at time = 5s
it makes joint 2 and 3 follow sine curves
"""
desired_joint_positions = np.zeros([7,1])
desired_joint_velocities = np.zeros([7,1])
# here we will only use a D controller (i.e. on the desired joint velocities)
# we increased the D gain for that purpose compared to the previous controller
D = np.array([4.,4,4,4,4,4,4.])
##TODO - find the desired joint velocities
desired_joint_torques = np.diag(D) @ (desired_joint_velocities - joint_velocities)
## this code is to save what the controller is doing for plotting and analysis after the simulation
global save_joint_positions, save_joint_velocities, save_t, ind
global save_des_joint_positions, save_des_joint_velocities
save_joint_positions[:,ind] = joint_positions[:,0]
save_joint_velocities[:,ind] = joint_velocities[:,0]
save_des_joint_positions[:,ind] = desired_joint_positions[:,0]
save_des_joint_velocities[:,ind] = desired_joint_velocities[:,0]
save_t[ind] = t
ind += 1
## end of saving code
return desired_joint_torques
robot_visualizer.display_ball(end_effector_goal1[:,0])
robot_visualizer.display_ball2(end_effector_goal2[:,0])
robot_visualizer.simulate_robot(robot_controller2, T=T)
```
## Question 4: Impedance control and gravity compensation
As in Question 2 and 3, we would like the robot to go from its initial configuration to the desired end-effector positions (in spatial coordinates) $[0.7, 0.2,0.7]$ in 5 seconds and then to the configuration $[0.3, 0.5,0.9]$ during the following 5 seconds.
In the previous questions, a gravity compensation controller was running "in the background" in addition to the control law you were computing. In this question, we remove this and implement a complete impedance controller with gravity compensation.
You are given a function ``robot_visualizer.rnea(q,dq,ddq)`` which implements the Recursive Newton Euler Algorithm (RNEA). It takes as arguments a vector of positions, velocities and accelerations, and computes (and returns) the following $M(q) \cdot \ddot{q} + C(q,\dot{q}) + G(q)$
* Modify the ``robot_controller3`` function below to implement an impedance controller with gravity compensation (add a small amount of joint damping, using a joint-space D gain of 0.1). Use this controller to move the robot from its initial configuration to the first goal (reaching at t=5) and the second goal (t=10) by interpolating the desired end effector positions as in the previous questions.
* Plot the resulting joint simulated and desired positions and velocities
* Plot the resulting end-effector positions and velocities
* Compare the controller when the small joint samping is on or off - can you explain the difference?
* Compare results with Question 2 and 3. Which controller would you rather choose and why?
```
T = 10.
## this code is to save what the controller is doing for plotting and analysis after the simulation
global save_joint_positions, save_joint_velocities, save_t, ind
global save_des_joint_positions, save_des_joint_velocities
save_joint_positions = np.zeros([7,int(np.ceil(T / 0.001))+1])
save_joint_velocities = np.zeros_like(save_joint_positions)
save_des_joint_positions = np.zeros_like(save_joint_positions)
save_des_joint_velocities = np.zeros_like(save_joint_positions)
save_t = np.zeros([int(np.ceil(T / 0.001))+1])
ind=0
# end of saving code
def robot_controller3(t, joint_positions, joint_velocities):
"""A typical robot controller
at every time t, this controller is called by the simulator. It receives as input
the current joint positions and velocities and needs to return a [7,1] vector
of desired torque commands
As an example, the current controller implements a PD controller and at time = 5s
it makes joint 2 and 3 follow sine curves
"""
desired_joint_positions = np.zeros([7,1])
desired_joint_velocities = np.zeros([7,1])
# here we will only use the D controller to inject small joint damping
D = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
##TODO - implement gravity compensation and impedance control
desired_joint_torques = -np.diag(D) @ joint_velocities
## this code is to save what the controller is doing for plotting and analysis after the simulation
global save_joint_positions, save_joint_velocities, save_t, ind
global save_des_joint_positions, save_des_joint_velocities
save_joint_positions[:,ind] = joint_positions[:,0]
save_joint_velocities[:,ind] = joint_velocities[:,0]
save_des_joint_positions[:,ind] = desired_joint_positions[:,0]
save_des_joint_velocities[:,ind] = desired_joint_velocities[:,0]
save_t[ind] = t
ind += 1
## end of saving code
return desired_joint_torques
robot_visualizer.display_ball([0.7, 0.2,0.7])
robot_visualizer.display_ball2([0.3, 0.5,0.9])
robot_visualizer.simulate_robot(robot_controller3, T=T, gravity_comp = False)
```
| github_jupyter |
# Gathering historical data about the addition of newspaper titles to Trove
The number of digitised newspapers available through Trove has increased dramatically since 2009. Understanding when newspapers were added is important for historiographical purposes, but there's no data about this available directly from Trove. This notebook uses web archives to extract lists of newspapers in Trove over time, and chart Trove's development.
Trove has always provided a browseable list of digitised newspaper titles. The url and format of this list has changed over time, but it's possible to find captures of this page in the Internet Archive and extract the full list of titles. The pages are also captured in the Australian Web Archive, but the Wayback Machine has a more detailed record.
The pages that I'm looking for are:
* [http://trove.nla.gov.au/ndp/del/titles](https://web.archive.org/web/*/http://trove.nla.gov.au/ndp/del/titles)
* [https://trove.nla.gov.au/newspaper/about](https://web.archive.org/web/*/https://trove.nla.gov.au/newspaper/about)
This notebook creates the following data files:
* [trove_newspaper_titles_2009_2021.csv](https://github.com/GLAM-Workbench/trove-newspapers/blob/master/trove_newspaper_titles_2009_2021.csv) – complete dataset of captures and titles
* [trove_newspaper_titles_first_appearance_2009_2021.csv](https://github.com/GLAM-Workbench/trove-newspapers/blob/master/trove_newspaper_titles_first_appearance_2009_2021.csv) – filtered dataset, showing only the first appearance of each title / place / date range combination
I've also created a [browseable list of titles](https://gist.github.com/wragge/7d80507c3e7957e271c572b8f664031a), showing when they first appeared in Trove.
```
import requests
import json
import re
from surt import surt
from bs4 import BeautifulSoup
import arrow
import pandas as pd
import altair as alt
from IPython.display import display, HTML
from pathlib import Path
```
## Code for harvesting web archive captures
We're using the Memento protocol to get a list of captures. See the [Web Archives section](https://glam-workbench.net/web-archives/) of the GLAM Workbench for more details.
```
# The code in this cell is copied from notebooks in the Web Archives section of the GLAM Workbench (https://glam-workbench.net/web-archives/)
# In particular see: https://glam-workbench.net/web-archives/#find-all-the-archived-versions-of-a-web-page
# These are the repositories we'll be using
TIMEGATES = {
'awa': 'https://web.archive.org.au/awa/',
'nzwa': 'https://ndhadeliver.natlib.govt.nz/webarchive/wayback/',
'ukwa': 'https://www.webarchive.org.uk/wayback/en/archive/',
'ia': 'https://web.archive.org/web/'
}
def convert_lists_to_dicts(results):
'''
Converts IA style timemap (a JSON array of arrays) to a list of dictionaries.
Renames keys to standardise IA with other Timemaps.
'''
if results:
keys = results[0]
results_as_dicts = [dict(zip(keys, v)) for v in results[1:]]
else:
results_as_dicts = results
for d in results_as_dicts:
d['status'] = d.pop('statuscode')
d['mime'] = d.pop('mimetype')
d['url'] = d.pop('original')
return results_as_dicts
def get_capture_data_from_memento(url, request_type='head'):
'''
For OpenWayback systems this can get some extra capture info to insert into Timemaps.
'''
if request_type == 'head':
response = requests.head(url)
else:
response = requests.get(url)
headers = response.headers
length = headers.get('x-archive-orig-content-length')
status = headers.get('x-archive-orig-status')
status = status.split(' ')[0] if status else None
mime = headers.get('x-archive-orig-content-type')
mime = mime.split(';')[0] if mime else None
return {'length': length, 'status': status, 'mime': mime}
def convert_link_to_json(results, enrich_data=False):
'''
Converts link formatted Timemap to JSON.
'''
data = []
for line in results.splitlines():
parts = line.split('; ')
if len(parts) > 1:
link_type = re.search(r'rel="(original|self|timegate|first memento|last memento|memento)"', parts[1]).group(1)
if link_type == 'memento':
link = parts[0].strip('<>')
timestamp, original = re.search(r'/(\d{14})/(.*)$', link).groups()
capture = {'urlkey': surt(original), 'timestamp': timestamp, 'url': original}
if enrich_data:
capture.update(get_capture_data_from_memento(link))
print(capture)
data.append(capture)
return data
def get_timemap_as_json(timegate, url, enrich_data=False):
'''
Get a Timemap then normalise results (if necessary) to return a list of dicts.
'''
tg_url = f'{TIMEGATES[timegate]}timemap/json/{url}/'
response = requests.get(tg_url)
response_type = response.headers['content-type']
if response_type == 'text/x-ndjson':
data = [json.loads(line) for line in response.text.splitlines()]
elif response_type == 'application/json':
data = convert_lists_to_dicts(response.json())
elif response_type in ['application/link-format', 'text/html;charset=utf-8']:
data = convert_link_to_json(response.text, enrich_data=enrich_data)
return data
```
## Harvest the title data from the Internet Archive
This gets the web page captures from the Internet Archive, scrapes the list of titles from the page, then does a bit of normalisation of the title data.
```
titles = []
# These are the pages that listed available titles.
# There was a change in 2016
pages = [{'url': 'http://trove.nla.gov.au/ndp/del/titles', 'path': '/ndp/del/title/'},
{'url': 'https://trove.nla.gov.au/newspaper/about', 'path': '/newspaper/title/'}]
for page in pages:
for capture in get_timemap_as_json('ia', page['url']):
if capture['status'] == '200':
url = f'https://web.archive.org/web/{capture["timestamp"]}id_/{capture["url"]}'
#print(url)
capture_date = arrow.get(capture['timestamp'][:8], 'YYYYMMDD').format('YYYY-MM-DD')
#print(capture_date)
response = requests.get(url)
soup = BeautifulSoup(response.content)
title_links = soup.find_all('a', href=re.compile(page['path']))
for title in title_links:
# Get the title text
full_title = title.get_text().strip()
# Get the title id
title_id = re.search(r'\/(\d+)\/?$', title['href']).group(1)
# Most of the code below is aimed at normalising the publication place and dates values to allow for easy grouping & deduplication
brief_title = re.sub(r'\(.+\)\s*$', '', full_title).strip()
try:
details = re.search(r'\((.+)\)\s*$', full_title).group(1).split(':')
except AttributeError:
place = ''
dates = ''
else:
try:
place = details[0].strip()
# Normalise states
try:
place = re.sub(r'(, )?([A-Za-z]+)[\.\s]*$', lambda match: f'{match.group(1) if match.group(1) else ""}{match.group(2).upper()}', place)
except AttributeError:
pass
# Normalise dates
dates = ' - '.join([d.strip() for d in details[1].strip().split('-')])
except IndexError:
place = ''
dates = ' - '.join([d.strip() for d in details[0].strip().split('-')])
titles.append({'title_id': title_id, 'full_title': full_title, 'title': brief_title, 'place': place, 'dates': dates, 'capture_date': capture_date, 'capture_timestamp': capture['timestamp']})
```
## Convert the title data to a DataFrame for analysis
```
df = pd.DataFrame(titles)
df
# Number of captures
len(df['capture_timestamp'].unique())
# Number of days on which the pages were captured
len(df['capture_date'].unique())
```
Save this dataset as a CSV file.
```
df.to_csv('trove_newspaper_titles_2009_2021.csv', index=False)
```
## How did the number of titles change over time?
```
# Drop duplicates in cases where there were mutiple captures on a single day
captures_df = df.drop_duplicates(subset=['capture_date', 'full_title'])
# Calculate totals per capture
capture_totals = captures_df['capture_date'].value_counts().to_frame().reset_index()
capture_totals.columns = ['capture_date', 'total']
capture_totals
alt.Chart(capture_totals).mark_line(point=True).encode(
x=alt.X('capture_date:T', title='Date captured'),
y=alt.Y('total:Q', title='Number of newspaper titles'),
tooltip=[alt.Tooltip('capture_date:T', format='%e %b %Y'), 'total:Q'],
).properties(width=700)
```
## When did titles first appear?
For historiographical purposes, its useful to know when a particular title first appeared in Trove. Here we'll only keep the first appearance of each title (or any subsequent changes to its date range / location).
```
first_appearance = df.drop_duplicates(subset=['title', 'place', 'dates'])
first_appearance
```
Find when a particular newspaper first appeared.
```
first_appearance.loc[first_appearance['title'] == 'Canberra Times']
```
Generate an alphabetical list for easy browsing. View the [results as a Gist](https://gist.github.com/wragge/7d80507c3e7957e271c572b8f664031a).
```
with Path('titles_list.md').open('w') as titles_list:
for title, group in first_appearance.groupby(['title', 'title_id']):
places = ' | '.join(group['place'].unique())
titles_list.write(f'<h4><a href="http://nla.gov.au/nla.news-title{title[1]}">{title[0]} ({places})</a></h4>')
titles_list.write(group.sort_values(by='capture_date')[['capture_date','dates', 'place']].to_html(index=False))
```
Save this dataset to CSV.
```
first_appearance.to_csv('trove_newspaper_titles_first_appearance_2009_2021.csv', index=False)
```
----
Created by [Tim Sherratt](https://timsherratt.org/) for the [GLAM Workbench](https://glam-workbench.github.io/).
Support this project by becoming a [GitHub sponsor](https://github.com/sponsors/wragge?o=esb).
| github_jupyter |
```
# # download example data
# # !pip install gdown
# from downloaddata import download_example_data
# download_example_data()
%config IPCompleter.use_jedi = False
from wholeslidedata.iterators import create_batch_iterator
from wholeslidedata.image.wholeslideimagewriter import WholeSlideMaskWriter, HeatmapTileCallback, PredictionTileCallback
from wholeslidedata.image.wholeslideimage import WholeSlideImage
import time
from pprint import pprint
from tqdm.notebook import tqdm
from matplotlib import pyplot as plt
from hooknet.configuration.config import create_hooknet
from pathlib import Path
user_config = './inference_configs/tlsgcinference.yml'
output_folder = Path('/home/user/output')
mode='training'
model = create_hooknet(user_config=user_config)
training_iterator = create_batch_iterator(mode=mode,
user_config='./inference_configs/tlsgcinference.yml',
presets=('slidingwindow',),
cpus=4,
number_of_batches=-1,
return_info=True)
spacing =0.5
tile_size = 1024
output_size = 1030
image_path = None
wsm_writer = None
tls_heat_writer = None
for x_batch, y_batch, info in tqdm(training_iterator):
x_batch = list(x_batch.transpose(1,0,2,3,4))
predictions = model.predict_on_batch(x_batch, argmax=False)
for idx, prediction in enumerate(predictions):
sample_reference = info['sample_references'][idx]['reference']
point = info['sample_references'][idx]['point']
image = training_iterator.dataset.get_image_from_reference(sample_reference)
if image_path is None or image.path != image_path:
if image_path is not None:
wsm_writer.save()
tls_heat_writer.save()
image_path = image.path
wsm_writer = WholeSlideMaskWriter(callbacks=(PredictionTileCallback(),))
tls_heat_writer = WholeSlideMaskWriter(callbacks=(HeatmapTileCallback(heatmap_index=1),))
with WholeSlideImage(image_path) as wsi:
shape = wsi.shapes[wsi.get_level_from_spacing(spacing)]
real_spacing = wsi.get_real_spacing(spacing)
wsm_writer.write(path=output_folder / (image_path.stem + '_hooknet.tif') , spacing=real_spacing, dimensions=shape, tile_shape=(tile_size,tile_size))
tls_heat_writer.write(path=output_folder / (image_path.stem + '_hooknet_tls_heat.tif') , spacing=real_spacing, dimensions=shape, tile_shape=(tile_size,tile_size))
c, r = point.x-output_size//4, point.y-output_size//4
wsm_writer.write_tile(tile=prediction,coordinates=(int(c),int(r)), mask=y_batch[idx][0])
tls_heat_writer.write_tile(tile=prediction,coordinates=(int(c),int(r)), mask=y_batch[idx][0])
wsm_writer.save()
tls_heat_writer.save()
training_iterator.stop()
```
| github_jupyter |
```
# 计算每个点之间的距离
from collections import defaultdict
class Solution:
def shortestSuperstring(self, A):
def calc(word1, word2): # 计算两个单词之间的 distance
n1, n2 = len(word1), len(word2)
right = 0
for i in range(n1):
if word1[i] == word2[right]:
ans = n2
idx = i
while word1[idx] == word2[right] and idx < n1 and right < n2:
idx += 1
right += 1
ans -= 1
if idx == n1:
return ans
return n2
def get_min_path():
def dfs(arr, path):
if not arr:
res.append(path[:])
return
for i in range(len(arr)):
path.append(arr[i])
dfs(arr[:i] + arr[i+1:], path)
path.pop()
res = []
nums = [i for i in range(n)]
dfs(nums, [])
outs = []
min_dis = float('inf')
for per in res:
cur_dis = 0
for k in range(n-1):
cur_dis += dis[per[k]][per[k+1]]
if cur_dis < min_dis:
min_dis = cur_dis
outs = per
print(per)
return outs
n = len(A)
dis = defaultdict(dict)
for i in range(n): # 计算每个单词之间的距离
for j in range(i+1, n):
dis[i][j] = calc(A[i], A[j])
dis[j][i] = calc(A[j], A[i]) #计算两个单词之间的距离
print(dis)
res = ''
path = get_min_path()
for i in path:
res += A[i]
return res
class Solution:
def shortestSuperstring(self, A):
def calc_dis(w1, w2): # 计算w1 和 w2 之间的距离
n1, n2 = len(w1), len(w2)
for k in range(min(n1, n2), -1, -1):
if w1[-k:] == w2[:k]:
return n2 - k
return n2
def combine(cur_res, word): # 重构为 答案
n1, n2 = len(cur_res), len(word)
for k in range(min(n1, n2), -1, -1):
if cur_res[-k:] == word[:k]:
return cur_res + word[k:]
return cur_res + word
N = len(A)
M = 1 << N
dp = [[float('inf')] * N for _ in range(M)] # dp[0111][last]
parent = [[-1] * N for _ in range(M)]
# 计算每个城市之间的距离
dis = [[0] * N for _ in range(N)]
for i in range(N):
for j in range(N):
if i != j:
dis[i][j] = calc_dis(A[i], A[j])
# 如果只行走一个城市,那么dis == len(A[i])
for i in range(N):
dp[1 << i][i] = len(A[i])
for cur_state in range(M): # 遍历各种状态 0000 -> 1111: 每个城市都没走->全部都走过
for cur in range(N): # state: 0111, last: 可能是 B、C、D中的一个
if (cur_state & (1 << cur)) == 0: # 当前城市还没有走
continue
pre_state = cur_state - (1 << cur) # 当前状态 0111,last = B, 则 有可能是 0011[D], 0011[C] 转移到的B
if pre_state == 0: # 有可能只走了一个城市
continue
for prev in range(N):
if (pre_state & (1 << prev)) == 0: # 当前城市还没有走
continue
if dp[pre_state][prev] + dis[prev][cur] < dp[cur_state][cur]:
dp[cur_state][cur] = dp[pre_state][prev] + dis[prev][cur]
parent[cur_state][cur] = prev
start = None # 起点
min_dis = float('inf')
for i in range(N):
if dp[-1][i] < min_dis:
min_dis = dp[-1][i]
start = i
mask = M - 1 # 根据起点和 mask 回溯路径
path = [start]
while parent[mask][start] != -1:
nxt = parent[mask][start]
path.append(nxt)
print(bin(mask)[2:], start, 'before')
mask = mask ^ (1 << start)
print(bin(mask)[2:], start, 'after')
start = nxt
path = path[::-1] # 整合字符串
res = A[path[0]]
for i in range(1, len(path)):
res = combine(res, A[path[i]])
return res
```
### 12-10 第二遍
```
class Solution:
def shortestSuperstring(self, A):
def calc_dis(w1, w2): # 计算 w1 和 w2 之间的距离
n1, n2 = len(w1), len(w2)
for k in range(min(n1, n2), -1, -1):
if w1[-k:] == w2[:k]:
return n2 - k
return n2
def combine(cur_res, word): # 拼接字符串
n1, n2 = len(cur_res), len(word)
for k in range(min(n1, n2), -1, -1):
if cur_res[-k:] == word[:k]:
return cur_res + word[k:]
return cur_res + word
n = len(A)
m = 1 << n # 一共有多少种可能性
# dp[0011][cur_city] 经过 0011 -> (AB) CD 去过CD两地,并且最后一个去的地方是 cur_city
dp = [[float('inf')] * n for _ in range(m)]
par = [[-1] * n for _ in range(m)] # 记录达到此处时,上一处在哪里
for i in range(n): # 只去过一个城市的时候,cost == len(A[i])
dp[1 << i][i] = len(A[i])
dis = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if i != j:
dis[i][j] = calc_dis(A[i], A[j])
for state in range(m): # 当前的状态
for cur in range(n):
if (state & (1 << cur)) == 0:
continue
prev_state = state - (1 << cur)
if prev_state == 0: # 如果只去过一个城市,那么prev_state == 0, 是否只是去过一个城市
continue
for prev in range(n):
if (prev_state & (1 << prev)) == 0:
continue
if dp[prev_state][prev] + dis[prev][cur] < dp[state][cur]:
dp[state][cur] = dp[prev_state][prev] + dis[prev][cur]
par[state][cur] = prev
min_dis = float('inf')
for i in range(n):
if dp[-1][i] < min_dis:
last_city = i # 最后去过的city 在哪里
min_dis = dp[-1][i] # 最短路径为多少
# 回溯最短路径
path = [last_city]
mask = m - 1
while par[mask][last_city] != -1:
prev_city = par[mask][last_city]
path.append(prev_city)
mask = mask ^ (1 << last_city)
last_city = prev_city
path = path[::-1]
res = A[path[0]]
for i in range(1, len(path)):
res = combine(res, A[path[i]])
return res
solution = Solution()
solution.shortestSuperstring(["sssv","svq","dskss","sksss"])
a = 'abcd'
a[-1:]
len("dsksssvq")
bin(8)
```
| github_jupyter |
SOP0125 - Delete Key For Encryption At Rest
===========================================
Description
-----------
Use this notebook to connect to the `controller` database and delete a
key from controller database. For restoring keys with names which exist
in the target deployment, the key with the conflicting name needs to be
deleted before it can be restored. This notebook can be used to delete
keys with conflicting names.
Steps
-----
### Parameters
Set the `key_to_be_deleted` to the key name to be deleted from Big Data
Cluster.
```
key_to_be_deleted = "your_key_name_here"
print(f"Key with name '{key_to_be_deleted}' will be deleted.")
```
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
from IPython.display import Markdown
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
```
### Get the namespace for the big data cluster
Get the namespace of the Big Data Cluster from the Kuberenetes API.
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
```
### Python function queries `controller` database and return results.
### Create helper function to run `sqlcmd` against the controller database
```
import pandas
from io import StringIO
pandas.set_option('display.max_colwidth', -1)
name = 'controldb-0'
container = 'mssql-server'
def run_sqlcmd(query):
command=f"""export SQLCMDPASSWORD=$(cat /var/run/secrets/credentials/mssql-sa-password/password);
/opt/mssql-tools/bin/sqlcmd -b -S . -U sa -Q "SET NOCOUNT ON;
{query}" -d controller -s"^" -W > /tmp/out.csv; sed -i 2d /tmp/out.csv;
cat /tmp/out.csv"""
output=stream(api.connect_get_namespaced_pod_exec, name, namespace, command=['/bin/sh', '-c', command], container=container, stderr=True, stdout=True)
return str(output)
print("Function defined")
```
### Delete a key
The following cell will delete a key for HDFS encryption at rest. Once
the key is deleted, it cannot be undone. If the provided key name for
deletion exists, it will be deleted but if the key doesn’t exist, then
no operation will be performed.
```
select_query = """ select count(*) from Credentials where application_metadata like '%hdfsvault-svc%'
and type in ('2', '3') and account_name = '{key_name_for_deletion}' """.format(key_name_for_deletion=key_to_be_deleted)
result = run_sqlcmd(select_query)
if(result == "0"):
print(f"Key name not exists: {key_to_be_deleted}")
else:
delete_query = """delete from Credentials where application_metadata like '%hdfsvault-svc%'
and type in ('2', '3') and account_name = '{key_name_for_deletion}' """.format(key_name_for_deletion=key_to_be_deleted)
run_sqlcmd(delete_query)
print("Key with name {keyToBeDeleted} has been deleted.".format(keyToBeDeleted = key_to_be_deleted));
print("Delete Key(s) Operation Completed.")
print('Notebook execution complete.')
```
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/iconix/openai/blob/master/Interactive_textgenrnn_Demo_w_GPU.ipynb)
# Interactive textgenrnn Demo w/ GPU
by [Max Woolf](http://minimaxir.com)
Generate text using a pretrained neural network with a few lines of code, or easily train your own text-generating neural network of any size and complexity, **for free on a GPU using Collaboratory!**
For more about textgenrnn, you can visit [this GitHub repository](https://github.com/minimaxir/textgenrnn).
To get started:
1. Copy this notebook to your Google Drive to keep it and save your changes.
2. Make sure you're running the notebook in Google Chrome.
3. Run the cells below:
```
!pip install -q textgenrnn
from google.colab import files
from textgenrnn import textgenrnn
import os
```
Set the textgenrnn model configuration here. (see the [demo notebook](https://github.com/minimaxir/textgenrnn/blob/master/docs/textgenrnn-demo.ipynb) for more information about these parameters)
If you are using an input file where documents are line-delimited, set `line_delimited` to `True`.
```
model_cfg = {
'rnn_size': 128,
'rnn_layers': 4,
'rnn_bidirectional': True,
'max_length': 40,
'max_words': 10000,
'dim_embeddings': 100,
'word_level': True,
}
train_cfg = {
'line_delimited': False,
'num_epochs': 10,
'gen_epochs': 2,
'batch_size': 1024,
'train_size': 0.8,
'dropout': 0.0,
'max_gen_length': 300,
'validation': False,
'is_csv': False
}
```
After running the next cell, the cell will ask you to upload a file. Upload **any text file** and textgenrnn will start training and generating creative text based on that file!
The cell after that will start the training. And thanks to the power of Keras's CuDNN layers, training is super-fast! When the training is done, running the cell after this will automatically download the weights, the vocab, and the config.
(N.B. the uploaded file is only stored in the Colaboratory VM and no one else can see it)
```
uploaded = files.upload()
all_files = [(name, os.path.getmtime(name)) for name in os.listdir()]
latest_file = sorted(all_files, key=lambda x: -x[1])[0][0]
files.download('{}_weights.hdf5'.format(model_name))
files.download('{}_vocab.json'.format(model_name))
files.download('{}_config.json'.format(model_name))
all_files = [(name, os.path.getmtime(name)) for name in os.listdir()]
reviews_file = 'reviews_sample.txt'
model_name = 'colaboratory'
textgen = textgenrnn(name=model_name)
train_function = textgen.train_from_file if train_cfg['line_delimited'] else textgen.train_from_largetext_file
train_function(
file_path=reviews_file,
new_model=True,
num_epochs=train_cfg['num_epochs'],
gen_epochs=train_cfg['gen_epochs'],
batch_size=train_cfg['batch_size'],
train_size=train_cfg['train_size'],
dropout=train_cfg['dropout'],
max_gen_length=train_cfg['max_gen_length'],
validation=train_cfg['validation'],
is_csv=train_cfg['is_csv'],
rnn_layers=model_cfg['rnn_layers'],
rnn_size=model_cfg['rnn_size'],
rnn_bidirectional=model_cfg['rnn_bidirectional'],
max_length=model_cfg['max_length'],
dim_embeddings=model_cfg['dim_embeddings'],
word_level=model_cfg['word_level'])
```
To recreate the model on your own computer, you can do:
```
from textgenrnn import textgenrnn
textgen = textgenrnn(weights_path='colaboratory_weights.hdf5',
vocab_path='colaboratory_vocab.json',
config_path='colaboratory_config.json')
textgen.generate_samples(max_gen_length=1000)
textgen.generate_to_file('textgenrnn_texts.txt', max_gen_length=1000)
```
Have fun with your new model! :)
If the notebook has errors (e.g. GPU Sync Fail), force-kill the virtual machine with the command below:
```
!kill -9 -1
model_cfg = {
'rnn_size': 128,
'rnn_layers': 4,
'rnn_bidirectional': True,
'max_length': 40,
'max_words': 10000,
'dim_embeddings': 100,
'word_level': False,
}
train_cfg = {
'line_delimited': False,
'num_epochs': 5,
'gen_epochs': 2,
'batch_size': 64,
'train_size': 0.8,
'dropout': 0.0,
'max_gen_length': 300,
'validation': False,
'is_csv': False
}
all_files = [(name, os.path.getmtime(name)) for name in os.listdir()]
reviews_file = 'reviews_sample.txt'
model_name = 'colaboratory'
textgen = textgenrnn(name=model_name)
train_function = textgen.train_from_file if train_cfg['line_delimited'] else textgen.train_from_largetext_file
train_function(
file_path=reviews_file,
new_model=True,
num_epochs=train_cfg['num_epochs'],
gen_epochs=train_cfg['gen_epochs'],
batch_size=train_cfg['batch_size'],
train_size=train_cfg['train_size'],
dropout=train_cfg['dropout'],
max_gen_length=train_cfg['max_gen_length'],
validation=train_cfg['validation'],
is_csv=train_cfg['is_csv'],
rnn_layers=model_cfg['rnn_layers'],
rnn_size=model_cfg['rnn_size'],
rnn_bidirectional=model_cfg['rnn_bidirectional'],
max_length=model_cfg['max_length'],
dim_embeddings=model_cfg['dim_embeddings'],
word_level=model_cfg['word_level'])
```
| github_jupyter |
# About this Jupyter Notebook
@author: Yingding Wang
### Useful JupyterLab Basic
Before start, you may consider to update the jupyterlab with the command
<code>python
!{sys.executable} -m pip install --upgrade --user jupyterlab
</code>
1. Autocomplete syntax with "Tab"
2. View Doc String with "Shift + Tab"
3. mark the code snippet -> select with right mouse -> Show Contextual Help (see the function code)
```
# import sys, os
# %env
import sys, os
print(f"Sys version: {sys.version}")
# os.environ["KF_PIPELINES_SA_TOKEN_PATH"]="/var/run/secrets/kubernetes.io/serviceaccount/token"
# os.environ["KF_PIPELINES_SA_TOKEN_PATH"]="/var/run/secrets/kubeflow/pipelines/token"
!{sys.executable} -m pip show jupyterlab # 3.0.16
# !{sys.executable} -m pip show jupyter_contrib_nbextensions
# update the jupyter lab
#!{sys.executable} -m pip install --upgrade --user jupyterlab
"""upgrade the kfp server api version to 1.7.0 for KF 1.4"""
# !{sys.executable} -m pip uninstall -y kfp-server-api
# !{sys.executable} -m pip install --user --upgrade kfp-server-api==1.7.0
import sys
!{sys.executable} -m pip install --upgrade --user kfp==1.8.12
!{sys.executable} -m pip install --upgrade --user kubernetes==18.20.0
#!{sys.executable} -m pip install --upgrade --user kubernetes==21.7.0
```
# Restart the kernal
After update the kfp, restart this notebook kernel
Jupyter notebook: Meun -> Kernel -> restart kernel
## Check the KubeFlow Pipeline version on the server side
```
!{sys.executable} -m pip list | grep kfp
```
### Check my KubeFlow namespace total resource limits
```
# run command line to see the quota
!kubectl describe quota
```
## Setup
Example Pipeline from
https://github.com/kubeflow/examples/tree/master/pipelines/simple-notebook-pipeline
## Getting started with Python function-based components
https://www.kubeflow.org/docs/components/pipelines/sdk/python-function-components/
```
from platform import python_version
EXPERIMENT_NAME = 'core kf test' # Name of the experiment in the UI
EXPERIMENT_DESC = 'testing KF platform'
# BASE_IMAGE = f"library/python:{python_version()}" # Base image used for components in the pipeline, which has not root
BASE_IMAGE = "python:3.8.13"
NAME_SPACE = "kubeflow-kindfor" # change namespace if necessary
import kfp
import kubernetes
import kfp.dsl as dsl
import kfp.compiler as compiler
import kfp.components as components
```
## Connecting KFP Python SDK from Notebook to Pipeline
* https://www.kubeflow.org/docs/components/pipelines/sdk/connect-api/
```
print(kfp.__version__)
print(kubernetes.__version__)
def add(a: float, b: float) -> float:
'''Calculates sum of two arguments'''
print(a, '+', b, '=', a + b)
return a + b
```
### Create component from function
https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.components.html
```
# returns a task factory function
add_op = components.create_component_from_func(
add,
output_component_file='add_component.yaml',
base_image=BASE_IMAGE,
packages_to_install=None
)
```
### Add pod memory and cpu restriction
https://github.com/kubeflow/pipelines/pull/5695
```
'''
def pod_defaults_transformer(op: dsl.ContainerOp):
op.set_memory_request('100Mi') # op.set_memory_limit('1000Mi')
op.set_memory_limit('100Mi')
op.set_cpu_request('100m') # 1 core, # op.set_cpu_limit('1000m')
op.set_cpu_limit('1000m')
return op
'''
def pod_defaults_transformer(op: dsl.ContainerOp):
"""
op.set_memory_limit('1000Mi') = 1GB
op.set_cpu_limit('1000m') = 1 cpu core
"""
return op.set_memory_request('200Mi')\
.set_memory_limit('1000Mi')\
.set_cpu_request('2000m')\
.set_cpu_limit('2000m')
@dsl.pipeline(
name='Calculation pipeline',
description='A toy pipeline that performs arithmetic calculations.'
)
def calc_pipeline(
a: float =0,
b: float =7
):
# Passing pipeline parameter and a constant value as operation arguments
# first_add_task = add_op(a, 4)
first_add_task = pod_defaults_transformer(add_op(a, 4))
# no value taken from cache
first_add_task.execution_options.caching_strategy.max_cache_staleness = "P0D"
# second_add_task = add_op(first_add_task.output, b)
second_add_task = pod_defaults_transformer(add_op(first_add_task.output, b))
# no cache
second_add_task.execution_options.caching_strategy.max_cache_staleness = "P0D"
```
### (optional step) Compile the pipeline to see the settings
```
PIPE_LINE_FILE_NAME="calc_pipeline_with_resource_limit"
kfp.compiler.Compiler().compile(calc_pipeline, f"{PIPE_LINE_FILE_NAME}.yaml")
```
# Run Pipeline with Multi-user Isolation
https://www.kubeflow.org/docs/components/pipelines/multi-user/
```
# get the pipeline host from env set up be the notebook instance
client = kfp.Client()
# Make sure the volume is mounted /run/secrets/kubeflow/pipelines
# client.get_experiment(experiment_name=EXPERIMENT_NAME, namespace=NAME_SPACE)
# client.list_pipelines()
# print(NAME_SPACE)
# client.list_experiments(namespace=NAME_SPACE)
client.set_user_namespace(NAME_SPACE)
print(client.get_user_namespace())
exp = client.create_experiment(EXPERIMENT_NAME, description=EXPERIMENT_DESC)
# Specify pipeline argument values
arguments = {'a': '7', 'b': '8'}
# added a default pod transformer to all the pipeline ops
pipeline_config: dsl.PipelineConf = dsl.PipelineConf()
#pipeline_config.add_op_transformer(
# pod_defaults_transformer
#)
client.create_run_from_pipeline_func(pipeline_func=calc_pipeline, arguments=arguments,
experiment_name=EXPERIMENT_NAME, namespace=NAME_SPACE,
pipeline_conf=pipeline_config)
# The generated links below lead to the Experiment page and the pipeline run details page, respectively
```
| github_jupyter |
# Introduction to zfit
In this notebook, we will have a walk through the main components of zfit and their features. Especially the extensive model building part will be discussed separately.
zfit consists of 5 mostly independent parts. Other libraries can rely on this parts to do plotting or statistical inference, such as hepstats does. Therefore we will discuss two libraries in this tutorial: zfit to build models, data and a loss, minimize it and get a fit result and hepstats, to use the loss we built here and do inference.
<img src="attachment:screenshot%20from%202020-07-16%2014-29-15.png" style="max-width:50%">
## Data
This component in general plays a minor role in zfit: it is mostly to provide a unified interface for data.
Preprocessing is therefore not part of zfit and should be done beforehand. Python offers many great possibilities to do so (e.g. Pandas).
zfit `Data` can load data from various sources, most notably from Numpy, Pandas DataFrame, TensorFlow Tensor and ROOT (using uproot). It is also possible, for convenience, to convert it directly `to_pandas`. The constructors are named `from_numpy`, `from_root` etc.
```
import zfit
from zfit import z
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
```
A `Data` needs not only the data itself but also the observables: the human readable string identifiers of the axes (corresponding to "columns" of a Pandas DataFrame). It is convenient to define the `Space` not only with the observable but also with a limit: this can directly be re-used as the normalization range in the PDF.
First, let's define our observables
```
obs = zfit.Space('obs1', (-5, 10))
```
This `Space` has limits. Next to the effect of handling the observables, we can also play with the limits: multiple `Spaces` can be added to provide disconnected ranges. More importantly, `Space` offers functionality:
- limit1d: return the lower and upper limit in the 1 dimensional case (raises an error otherwise)
- rect_limits: return the n dimensional limits
- area(): calculate the area (e.g. distance between upper and lower)
- inside(): return a boolean Tensor corresponding to whether the value is _inside_ the `Space`
- filter(): filter the input values to only return the one inside
```
size_normal = 10000
data_normal_np = np.random.normal(size=size_normal, scale=2)
data_normal = zfit.Data.from_numpy(obs=obs, array=data_normal_np)
```
The main functionality is
- nevents: attribute that returns the number of events in the object
- data_range: a `Space` that defines the limits of the data; if outside, the data will be cut
- n_obs: defines the number of dimensions in the dataset
- with_obs: returns a subset of the dataset with only the given obs
- weights: event based weights
Furthermore, `value` returns a Tensor with shape `(nevents, n_obs)`.
To retrieve values, in general `z.unstack_x(data)` should be used; this returns a single Tensor with shape (nevents) or a list of tensors if `n_obs` is larger then 1.
```
print(f"We have {data_normal.nevents} events in our dataset with the minimum of {np.min(data_normal.unstack_x())}") # remember! The obs cut out some of the data
data_normal.n_obs
```
## Model
Building models is by far the largest part of zfit. We will therefore cover an essential part, the possibility to build custom models, in an extra chapter. Let's start out with the idea that you define your parameters and your observable space; the latter is the expected input data.
There are two types of models in zfit:
- functions, which are rather simple and "underdeveloped"; their usage is often not required.
- PDF that are function which are normalized (over a specified range); this is the main model and is what we gonna use throughout the tutorials.
A PDF is defined by
\begin{align}
\mathrm{PDF}_{f(x)}(x; \theta) = \frac{f(x; \theta)}{\int_{a}^{b} f(x; \theta)}
\end{align}
where a and b define the normalization range (`norm_range`), over which (by inserting into the above definition) the integral of the PDF is unity.
zfit has a modular approach to things and this is also true for models. While the normalization itself (e.g. what are parameters, what is normalized data) will already be pre-defined in the model, models are composed of functions that are transparently called inside. For example, a Gaussian would usually be implemented by writing a Python function `def gauss(x, mu, sigma)`, which does not care about the normalization and then be wrapped in a PDF, where the normalization and what is a parameter is defined.
In principle, we can go far by using simply functions (e.g. [TensorFlowAnalysis/AmpliTF](https://github.com/apoluekt/AmpliTF) by Anton Poluektov uses this approach quite successfully for Amplitude Analysis), but this design has limitations for a more general fitting library such as zfit (or even [TensorWaves](https://github.com/ComPWA/tensorwaves), being built on top of AmpliTF).
The main thing is to keep track of the different ordering of the data and parameters, especially the dependencies.
Let's create a simple Gaussian PDF. We already defined the `Space` for the data before, now we only need the parameters. This are a different object than a `Space`.
### Parameter
A `Parameter` (there are different kinds actually, more on that later) takes the following arguments as input:
`Parameter(human readable name, initial value[, lower limit, upper limit])` where the limits are recommended but not mandatory. Furthermore, `step_size` can be given (which is useful to be around the given uncertainty, e.g. for large yields or small values it can help a lot to set this). Also, a `floating` argument is supported, indicating whether the parameter is allowed to float in the fit or not (just omitting the limits does _not_ make a parameter constant).
Parameters have a unique name. This is served as the identifier for e.g. fit results. However, a parameter _cannot_ be retrieved by its string identifier (its name) but the object itself should be used. In places where a parameter maps to something, the object itself is needed, not its name.
```
mu = zfit.Parameter('mu', 1, -3, 3, step_size=0.2)
sigma_num = zfit.Parameter('sigma42', 1, 0.1, 10, floating=False)
```
These attributes can be changed:
```
print(f"sigma is float: {sigma_num.floating}")
sigma_num.floating = True
print(f"sigma is float: {sigma_num.floating}")
```
*PITFALL NOTEBOOKS: since the parameters have a unique name, a second parameter with the same name cannot be created; the behavior is undefined and therefore it raises an error.
While this does not pose a problem in a normal Python script, it does in a Jupyter-like notebook, since it is an often practice to "rerun" a cell as an attempt to "reset" things. Bear in mind that this does not make sense, from a logic point of view. The parameter already exists. Best practice: write a small wrapper, do not rerun the parameter creation cell or simply rerun the notebook (restart kernel & run all). For further details, have a look at the discussion and arguments [here](https://github.com/zfit/zfit/issues/186)*
Now we have everything to create a Gaussian PDF:
```
gauss = zfit.pdf.Gauss(obs=obs, mu=mu, sigma=sigma_num)
```
Since this holds all the parameters and the observables are well defined, we can retrieve them
```
gauss.n_obs # dimensions
gauss.obs
gauss.space
gauss.norm_range
```
As we've seen, the `obs` we defined is the `space` of Gauss: this acts as the default limits whenever needed (e.g. for sampling). `gauss` also has a `norm_range`, which equals by default as well to the `obs` given, however, we can explicitly change that with `set_norm_range`.
We can also access the parameters of the PDF in two ways, depending on our intention:
either by _name_ (the parameterization name, e.g. `mu` and `sigma`, as defined in the `Gauss`), which is useful if we are interested in the parameter that _describes_ the shape
```
gauss.params
```
or to retrieve all the parameters that the PDF depends on. As this now may sounds trivial, we will see later that models can depend on other models (e.g. sums) and parameters on other parameters. There is one function that automatically retrieves _all_ dependencies, `get_params`. It takes three arguments to filter:
- floating: whether to filter only floating parameters, only non-floating or don't discriminate
- is_yield: if it is a yield, or not a yield, or both
- extract_independent: whether to recursively collect all parameters. This, and the explanation for why independent, can be found later on in the `Simultaneous` tutorial.
Usually, the default is exactly what we want if we look for _all free parameters that this PDF depends on_.
```
gauss.get_params()
```
The difference will also be clear if we e.g. use the same parameter twice:
```
gauss_only_mu = zfit.pdf.Gauss(obs=obs, mu=mu, sigma=mu)
print(f"params={gauss_only_mu.params}")
print(f"get_params={gauss_only_mu.get_params()}")
```
## Functionality
PDFs provide a few useful methods. The main features of a zfit PDF are:
- `pdf`: the normalized value of the PDF. It takes an argument `norm_range` that can be set to `False`, in which case we retrieve the unnormalized value
- `integrate`: given a certain range, the PDF is integrated. As `pdf`, it takes a `norm_range` argument that integrates over the unnormalized `pdf` if set to `False`
- `sample`: samples from the pdf and returns a `Data` object
```
integral = gauss.integrate(limits=(-1, 3)) # corresponds to 2 sigma integral
integral
```
### Tensors
As we see, many zfit functions return Tensors. This is however no magical thing! If we're outside of models, than we can always safely convert them to a numpy array by calling `zfit.run(...)` on it (or any structure containing potentially multiple Tensors). However, this may not even be required often! They can be added just like numpy arrays and interact well with Python and Numpy:
```
np.sqrt(integral)
```
They also have shapes, dtypes, can be slices etc. So do not convert them except you need it. More on this can be seen in the talk later on about zfit and TensorFlow 2.0.
```
sample = gauss.sample(n=1000) # default space taken as limits
sample
sample.unstack_x()[:10]
sample.n_obs
sample.obs
```
We see that sample returns also a zfit `Data` object with the same space as it was sampled in. This can directly be used e.g.
```
probs = gauss.pdf(sample)
probs[:10]
```
**NOTE**: In case you want to do this repeatedly (e.g. for toy studies), there is a way more efficient way (see later on)
## Plotting
so far, we have a dataset and a PDF. Before we go for fitting, we can make a plot. This functionality is not _directly_ provided in zfit (but can be added to [zfit-physics](https://github.com/zfit/zfit-physics)). It is however simple enough to do it:
```
def plot_model(model, data, scale=1, plot_data=True): # we will use scale later on
nbins = 50
lower, upper = data.data_range.limit1d
x = tf.linspace(lower, upper, num=1000) # np.linspace also works
y = model.pdf(x) * size_normal / nbins * data.data_range.area()
y *= scale
plt.plot(x, y)
data_plot = zfit.run(z.unstack_x(data)) # we could also use the `to_pandas` method
if plot_data:
plt.hist(data_plot, bins=nbins)
plot_model(gauss, data_normal)
```
We can of course do better (and will see that later on, continuously improve the plots), but this is quite simple and gives us the full power of matplotlib.
### Different models
zfit offers a selection of predefined models (and extends with models from zfit-physics that contain physics specific models such as ARGUS shaped models).
```
print(zfit.pdf.__all__)
```
To create a more realistic model, we can build some components for a mass fit with a
- signal component: CrystalBall
- combinatorial background: Exponential
- partial reconstructed background on the left: Kernel Density Estimation
```
mass_obs = zfit.Space('mass', (0, 1000))
# Signal component
mu_sig = zfit.Parameter('mu_sig', 400, 100, 600)
sigma_sig = zfit.Parameter('sigma_sig', 50, 1, 100)
alpha_sig = zfit.Parameter('alpha_sig', 300, 100, 400)
n_sig = zfit.Parameter('n sig', 4, 0.1, 30)
signal = zfit.pdf.CrystalBall(obs=mass_obs, mu=mu_sig, sigma=sigma_sig, alpha=alpha_sig, n=n_sig)
# combinatorial background
lam = zfit.Parameter('lambda', -0.01, -0.05, -0.001)
comb_bkg = zfit.pdf.Exponential(lam, obs=mass_obs)
part_reco_data = np.random.normal(loc=200, scale=150, size=700)
part_reco_data = zfit.Data.from_numpy(obs=mass_obs, array=part_reco_data) # we don't need to do this but now we're sure it's inside the limits
part_reco = zfit.pdf.GaussianKDE1DimV1(obs=mass_obs, data=part_reco_data, bandwidth='adaptive')
```
## Composing models
We can also compose multiple models together. Here we'll stick to one dimensional models, the extension to multiple dimensions is explained in the "custom models tutorial".
Here we will use a `SumPDF`. This takes pdfs and fractions. If we provide n pdfs and:
- n - 1 fracs: the nth fraction will be 1 - sum(fracs)
- n fracs: no normalization attempt is done by `SumPDf`. If the fracs are not implicitly normalized, this can lead to bad fitting
behavior if there is a degree of freedom too much
```
sig_frac = zfit.Parameter('sig_frac', 0.3, 0, 1)
comb_bkg_frac = zfit.Parameter('comb_bkg_frac', 0.25, 0, 1)
model = zfit.pdf.SumPDF([signal, comb_bkg, part_reco], [sig_frac, comb_bkg_frac])
```
In order to have a corresponding data sample, we can just create one. Since we want to fit to this dataset later on, we will create it with slightly different values. Therefore, we can use the ability of a parameter to be set temporarily to a certain value with
```
print(f"before: {sig_frac}")
with sig_frac.set_value(0.25):
print(f"new value: {sig_frac}")
print(f"after 'with': {sig_frac}")
```
While this is useful, it does not fully scale up. We can use the `zfit.param.set_values` helper therefore.
(_Sidenote: instead of a list of values, we can also use a `FitResult`, the given parameters then take the value from the result_)
```
with zfit.param.set_values([mu_sig, sigma_sig, sig_frac, comb_bkg_frac, lam], [370, 34, 0.18, 0.15, -0.006]):
data = model.sample(n=10000)
plot_model(model, data);
```
Plotting the components is not difficult now: we can either just plot the pdfs separately (as we still can access them) or in a generalized manner by accessing the `pdfs` attribute:
```
def plot_comp_model(model, data):
for mod, frac in zip(model.pdfs, model.params.values()):
plot_model(mod, data, scale=frac, plot_data=False)
plot_model(model, data)
plot_comp_model(model, data)
```
Now we can add legends etc. Btw, did you notice that actually, the `frac` params are zfit `Parameters`? But we just used them as if they were Python scalars and it works.
```
print(model.params)
```
### Extended PDFs
So far, we have only looked at normalized PDFs that do contain information about the shape but not about the _absolute_ scale. We can make a PDF extended by adding a yield to it.
The behavior of the new, extended PDF does **NOT change**, any methods we called before will act the same. Only exception, some may require an argument _less_ now. All the methods we used so far will return the same values. What changes is that the flag `model.is_extended` now returns `True`. Furthermore, we have now a few more methods that we can use which would have raised an error before:
- `get_yield`: return the yield parameter (notice that the yield is _not_ added to the shape parameters `params`)
- `ext_{pdf,integrate}`: these methods return the same as the versions used before, however, multiplied by the yield
- `sample` is still the same, but does not _require_ the argument `n` anymore. By default, this will now equal to a _poissonian sampled_ n around the yield.
The `SumPDF` now does not strictly need `fracs` anymore: if _all_ input PDFs are extended, the sum will be as well and use the (normalized) yields as fracs
The preferred way to create an extended PDf is to use `PDF.create_extended(yield)`. However, since this relies on copying the PDF (which may does not work for different reasons), there is also a `set_yield(yield)` method that sets the yield in-place. This won't lead to ambiguities, as everything is supposed to work the same.
```
yield_model = zfit.Parameter('yield_model', 10000, 0, 20000, step_size=10)
model_ext = model.create_extended(yield_model)
```
alternatively, we can create the models as extended and sum them up
```
sig_yield = zfit.Parameter('sig_yield', 2000, 0, 10000, step_size=1)
sig_ext = signal.create_extended(sig_yield)
comb_bkg_yield = zfit.Parameter('comb_bkg_yield', 6000, 0, 10000, step_size=1)
comb_bkg_ext = comb_bkg.create_extended(comb_bkg_yield)
part_reco_yield = zfit.Parameter('part_reco_yield', 2000, 0, 10000, step_size=1)
part_reco.set_yield(part_reco_yield) # unfortunately, `create_extended` does not work here. But no problem, it won't change anyting.
part_reco_ext = part_reco
model_ext_sum = zfit.pdf.SumPDF([sig_ext, comb_bkg_ext, part_reco_ext])
```
# Loss
A loss combines the model and the data, for example to build a likelihood. Furthermore, it can contain constraints, additions to the likelihood. Currently, if the `Data` has weights, these are automatically taken into account.
```
nll_gauss = zfit.loss.UnbinnedNLL(gauss, data_normal)
```
The loss has several attributes to be transparent to higher level libraries. We can calculate the value of it using `value`.
```
nll_gauss.value()
```
Notice that due to graph building, this will take significantly longer on the first run. Rerun the cell above and it will be way faster.
Furthermore, the loss also provides a possibility to calculate the gradients or, often used, the value and the gradients.
We can access the data and models (and possible constraints)
```
nll_gauss.model
nll_gauss.data
nll_gauss.constraints
```
Similar to the models, we can also get the parameters via `get_params`.
```
nll_gauss.get_params()
```
### Extended loss
More interestingly, we can now build a loss for our composite sum model using the sampled data. Since we created an extended model, we can now also create an extended likelihood, taking into account a Poisson term to match the yield to the number of events.
```
nll = zfit.loss.ExtendedUnbinnedNLL(model_ext_sum, data)
nll.get_params()
```
# Minimization
While a loss is interesting, we usually want to minimize it. Therefore we can use the minimizers in zfit, most notably `Minuit`, a wrapper around the [iminuit minimizer](https://github.com/scikit-hep/iminuit).
The philosophy is to create a minimizer instance that is mostly _stateless_, e.g. does not remember the position (there are considerations to make it possible to have a state, in case you feel interested, [contact us](https://github.com/zfit/zfit#contact))
Given that iminuit provides us with a very reliable and stable minimizer, it is usually recommended to use this. Others are implemented as well and could easily be wrapped, however, the convergence is usually not as stable.
Minuit has a few options:
- `tolerance`: the Estimated Distance to Minimum (EDM) criteria for convergence (default 1e-3)
- `verbosity`: between 0 and 10, 5 is normal, 7 is verbose, 10 is maximum
- `use_minuit_grad`: if True, uses the Minuit numerical gradient instead of the TensorFlow gradient. This is usually more stable for smaller fits; furthermore the TensorFlow gradient _can_ (experience based) sometimes be wrong.
```
minimizer = zfit.minimize.Minuit(use_minuit_grad=True)
```
For the minimization, we can call `minimize`, which takes a
- loss as we created above
- optionally: the parameters to minimize
By default, `minimize` uses all the free floating parameters (obtained with `get_params`). We can also explicitly specify which ones to use by giving them (or better, objects that depend on them) to `minimize`; note however that non-floating parameters, even if given explicitly to `minimize` won 't be minimized.
## Pre-fit parts of the PDF
Before we want to fit the whole PDF however, it can be useful to pre-fit it. A way can be to fix the combinatorial background by fitting the exponential to the right tail.
Therefore we create a new data object with an additional cut and furthermore, set the normalization range of the background pdf to the range we are interested in.
```
values = z.unstack_x(data)
obs_right_tail = zfit.Space('mass', (700, 1000))
data_tail = zfit.Data.from_tensor(obs=obs_right_tail, tensor=values)
with comb_bkg.set_norm_range(obs_right_tail):
nll_tail = zfit.loss.UnbinnedNLL(comb_bkg, data_tail)
minimizer.minimize(nll_tail)
```
Since we now fit the lambda parameter of the exponential, we can fix it.
```
lam.floating = False
lam
result = minimizer.minimize(nll)
plot_comp_model(model_ext_sum, data)
```
# Fit result
The result of every minimization is stored in a `FitResult`. This is the last stage of the zfit workflow and serves as the interface to other libraries. Its main purpose is to store the values of the fit, to reference to the objects that have been used and to perform (simple) uncertainty estimation.
```
print(result)
```
This gives an overview over the whole result. Often we're mostly interested in the parameters and their values, which we can access with a `params` attribute.
```
print(result.params)
```
This is a `dict` which stores any knowledge about the parameters and can be accessed by the parameter (object) itself:
```
result.params[mu_sig]
```
'value' is the value at the minimum. To obtain other information about the minimization process, `result` contains more attributes:
- fmin: the function minimum
- edm: estimated distance to minimum
- info: contains a lot of information, especially the original information returned by a specific minimizer
- converged: if the fit converged
```
result.fmin
```
## Estimating uncertainties
The `FitResult` has mainly two methods to estimate the uncertainty:
- a profile likelihood method (like MINOS)
- Hessian approximation of the likelihood (like HESSE)
When using `Minuit`, this uses (currently) it's own implementation. However, zfit has its own implementation, which are likely to become the standard and can be invoked by changing the method name.
Hesse is also [on the way to implement](https://github.com/zfit/zfit/pull/244) the [corrections for weights](https://inspirehep.net/literature/1762842).
We can explicitly specify which parameters to calculate, by default it does for all.
```
result.hesse()
# result.hesse(method='hesse_np')
```
We get the result directly returned. This is also added to `result.params` for each parameter and is nicely displayed with an added column
```
print(result.params)
errors, new_result = result.errors(params=[sig_yield, part_reco_yield, mu_sig]) # just using three for speed reasons
# errors, new_result = result.errors(params=[yield_model, sig_frac, mu_sig], method='zfit_error')
print(errors)
print(result.params)
```
#### What is 'new_result'?
When profiling a likelihood, such as done in the algorithm used in `errors`, a new minimum can be found. If this is the case, this new minimum will be returned, otherwise `new_result` is `None`. Furthermore, the current `result` would be rendered invalid by setting the flag `valid` to `False`. _Note_: this behavior only applies to the zfit internal error estimator.
### A simple profile
There is no default function (yet) for simple profiling plot. However, again, we're in Python and it's simple enough to do that for a parameter. Let's do it for `sig_yield`
```
x = np.linspace(1600, 2000, num=50)
y = []
sig_yield.floating = False
for val in x:
sig_yield.set_value(val)
y.append(nll.value())
sig_yield.floating = True
zfit.param.set_values(nll.get_params(), result)
plt.plot(x, y)
```
We can also access the covariance matrix of the parameters
```
result.covariance()
```
# End of zfit
This is where zfit finishes and other libraries take over.
# Beginning of hepstats
`hepstats` is a library containing statistical tools and utilities for high energy physics. In particular you do statistical inferences using the models and likelhoods function constructed in `zfit`.
Short example: let's compute for instance a confidence interval at 68 % confidence level on the mean of the gaussian defined above.
```
from hepstats.hypotests.parameters import POIarray
from hepstats.hypotests.calculators import AsymptoticCalculator
from hepstats.hypotests import ConfidenceInterval
calculator = AsymptoticCalculator(input=result, minimizer=minimizer)
value = result.params[mu_sig]["value"]
error = result.params[mu_sig]["minuit_hesse"]["error"]
mean_scan = POIarray(mu_sig, np.linspace(value - 1.5*error, value + 1.5*error, 10))
ci = ConfidenceInterval(calculator, mean_scan)
ci.interval()
from utils import one_minus_cl_plot
ax = one_minus_cl_plot(ci)
ax.set_xlabel("mean")
```
There will be more of `hepstats` later.
| github_jupyter |
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################
# William M. Kirby, 2021
# Theory references: https://arxiv.org/abs/1904.02260, https://arxiv.org/abs/2002.05693, and https://arxiv.org/abs/2011.10027.
###################################################
```
# How to use cs_vqe.py...
In order to import cs_vqe as a module, you may need to add it to `sys.path` via
> `import sys`
> `sys.path.insert(1, 'replace/with/path/to/ContextualSubspaceVQE')`
```
import numpy as np
import cs_vqe as c
```
Let's use as our example the following 8-qubit Hamiltonian, expressed as a dict mapping Pauli terms to their coefficients.
Note that Pauli terms are expressed as strings composed of `'I'`, `'X'`, `'Y'`, and `'Z'`, which respectively denote single-qubit identity, $\sigma_x$, $\sigma_y$, and $\sigma_z$.
```
ham = {'IIIIIIII': -5.001425458221718, 'IIIIIIIX': -0.0005602367691690761, 'IIIIIIIZ': 1.0104418175624195, 'IIIIIIXI': 0.01115672755331298, 'IIIIIIXZ': 0.0002823107691970577, 'IIIIIIYY': -0.0018494520120760328, 'IIIIIIZI': 1.0104418175624192, 'IIIIIIZZ': 0.41455546243330654, 'IIIIIXIX': -0.02906042590649248, 'IIIIIXXI': 9.308532583860961e-05, 'IIIIIXYY': -0.0020610093726644778, 'IIIIIXZX': -0.015075372035589736, 'IIIIIXZZ': -0.03495312426599749, 'IIIIIYIY': -0.02906042590649248, 'IIIIIYYX': 0.0020610093726644778, 'IIIIIYZY': -0.015075372035589736, 'IIIIIZII': -0.11536854093063532, 'IIIIIZIZ': 0.0908717615562679, 'IIIIIZXZ': -0.0015283726900879773, 'IIIIIZZI': 0.09450446372057475, 'IIIIXIXI': 0.001783048244458605, 'IIIIXIZZ': -0.03290395741989794, 'IIIIXXIX': 0.001407793599187085, 'IIIIXXYY': -0.0036327021643068054, 'IIIIXYIY': 0.001407793599187085, 'IIIIXYYX': 0.0036327021643068054, 'IIIIXZIZ': 0.005477331994841993, 'IIIIXZXI': -0.015075372035589731, 'IIIIXZXZ': -0.02906042590649248, 'IIIIXZZI': 0.007538341367506473, 'IIIIXZZZ': 0.006252994193872944, 'IIIIYIYI': 0.001783048244458605, 'IIIIYXXY': 0.0036327021643068054, 'IIIIYYII': -0.030761705333347874, 'IIIIYYXX': -0.0036327021643068054, 'IIIIYZYI': -0.015075372035589731, 'IIIIYZYZ': -0.02906042590649248, 'IIIIYZZY': 9.308532583860961e-05, 'IIIIZIII': -0.11536854093063532, 'IIIIZIIZ': 0.09450446372057475, 'IIIIZIXZ': -0.002936166289275064, 'IIIIZIZI': 0.0908717615562679, 'IIIIZXZX': 0.0017830482444586048, 'IIIIZYZY': 0.0017830482444586048, 'IIIIZZII': 0.12338082226742374, 'IIIXIIII': 0.009009667437306818, 'IIIXIXII': 0.011777126885190555, 'IIIXIZZX': -0.004237537587205241, 'IIIXXIII': 0.007785194491376199, 'IIIXXIXX': -0.002876078434933135, 'IIIXXYYI': -0.0009055645739293965, 'IIIXYIIY': 0.00014546236418240159, 'IIIXYIYX': -0.002876078434933135, 'IIIXYYXI': 0.0009055645739293965, 'IIIXYYZZ': -0.008401378470806105, 'IIIXZIZX': -0.0033319730132758425, 'IIIXZXII': -0.007722138887627405, 'IIIXZXIZ': -3.4257263552726085e-05, 'IIIXZXXZ': -0.0003236922436263174, 'IIIXZXZI': -0.0029103356984858603, 'IIIXZZIX': -0.03443680350741975, 'IIIXZZXI': 0.001050560600682628, 'IIIXZZYY': -0.0003501492223109143, 'IIIXZZZX': -0.025703421671713647, 'IIIXZZZZ': 0.010904255360997208, 'IIIYIYII': 0.011777126885190555, 'IIIYIZZY': -0.004237537587205241, 'IIIYXIXY': -0.002876078434933135, 'IIIYXXYI': 0.0009055645739293965, 'IIIYYIIX': -0.00014546236418240159, 'IIIYYIYY': -0.002876078434933135, 'IIIYYXXI': -0.0009055645739293965, 'IIIYYXZZ': 0.008401378470806105, 'IIIYZIZY': -0.0033319730132758425, 'IIIYZYII': -0.007722138887627405, 'IIIYZYIZ': -3.4257263552726085e-05, 'IIIYZYXZ': -0.0003236922436263174, 'IIIYZYZI': -0.0029103356984858603, 'IIIYZZIY': -0.03443680350741975, 'IIIYZZYX': 0.0003501492223109143, 'IIIYZZZY': -0.025703421671713647, 'IIIZIIII': -0.19741969863049835, 'IIIZIIIZ': 0.09360099466745045, 'IIIZIIXZ': 0.00242624352162051, 'IIIZIIZI': 0.09898377244525777, 'IIIZIXZX': -0.002956741912550393, 'IIIZIYZY': -0.002956741912550393, 'IIIZIZII': 0.053512399991895616, 'IIIZXZXI': -0.002899815560653565, 'IIIZXZZZ': 0.002455848829893685, 'IIIZYZYI': -0.002899815560653565, 'IIIZZIII': 0.056570323857124016, 'IIIZZXII': -0.002455848829893685, 'IIIZZZZX': -0.00242624352162051, 'IIXIIIII': -0.010904255360997208, 'IIXIIIXX': -0.0003501492223109143, 'IIXIIYYI': 0.00014546236418240159, 'IIXIXIII': -0.0015766376238054121, 'IIXIXXII': -0.008401378470806105, 'IIXIXZZX': -0.0003236922436263174, 'IIXIZZXI': 0.0004952892952564533, 'IIXIZZZZ': -0.009009667437306818, 'IIXXIIIX': 0.0011230560926772924, 'IIXXIIYY': -0.005382777777807297, 'IIXXIXXI': 5.692635189682806e-05, 'IIXXIXZZ': -0.002135874095436016, 'IIXXYYII': -0.0030579238652283926, 'IIXXYZZY': 5.692635189682806e-05, 'IIXYIIIY': 0.0011230560926772924, 'IIXYIIYX': 0.005382777777807297, 'IIXYIYXI': 5.692635189682806e-05, 'IIXYIYZZ': -0.002135874095436016, 'IIXYYIII': 0.002135874095436016, 'IIXYYXII': 0.0030579238652283926, 'IIXYYZZX': -5.692635189682806e-05, 'IIXYZZYI': -0.0011230560926772924, 'IIXZIZXI': -0.0033319730132758425, 'IIXZIZZZ': 0.00434212837164487, 'IIXZXIII': -0.0077221388876273925, 'IIXZXIIZ': -0.0029103356984858603, 'IIXZXIXZ': -0.0013717943100752413, 'IIXZXIZI': -3.4257263552726085e-05, 'IIXZXXZX': 0.0009055645739293965, 'IIXZXYZY': 0.0009055645739293965, 'IIXZXZII': 0.011777126885190555, 'IIXZYIYZ': -0.0009026397022665242, 'IIXZZIXI': -0.004237537587205241, 'IIXZZIZZ': 0.012743506842450984, 'IIXZZXIX': -0.001048102066448925, 'IIXZZXYY': -0.002876078434933135, 'IIXZZYIY': -0.001048102066448925, 'IIXZZYYX': 0.002876078434933135, 'IIXZZZIZ': -0.004704676047649981, 'IIXZZZXI': -0.025703421671713637, 'IIXZZZXZ': -0.03443680350741975, 'IIXZZZZI': -0.004354526825339065, 'IIXZZZZZ': 0.014552460686322212, 'IIYIIIIY': 0.001050560600682628, 'IIYIIIYX': -0.0003501492223109143, 'IIYIIYXI': -0.00014546236418240159, 'IIYIIYZZ': 0.007785194491376198, 'IIYIYIII': -0.0015766376238054121, 'IIYIYXII': -0.008401378470806105, 'IIYIYZZX': -0.0003236922436263174, 'IIYIZZYI': 0.0004952892952564533, 'IIYXIIXY': 0.005382777777807297, 'IIYXIXYI': 5.692635189682806e-05, 'IIYXXYII': 0.0030579238652283926, 'IIYXXZZY': -5.692635189682806e-05, 'IIYYIIII': -0.006577661355747096, 'IIYYIIXX': -0.005382777777807297, 'IIYYIYYI': 5.692635189682806e-05, 'IIYYXIII': -0.002135874095436016, 'IIYYXXII': -0.0030579238652283926, 'IIYYXZZX': 5.692635189682806e-05, 'IIYYZZXI': 0.0011230560926772924, 'IIYYZZZZ': -0.006577661355747096, 'IIYZIZYI': -0.0033319730132758425, 'IIYZYIII': -0.0077221388876273925, 'IIYZYIIZ': -0.0029103356984858603, 'IIYZYIXZ': -0.0004691546078087188, 'IIYZYIZI': -3.4257263552726085e-05, 'IIYZYXZX': 0.0009055645739293965, 'IIYZYYZY': 0.0009055645739293965, 'IIYZYZII': 0.011777126885190555, 'IIYZZIYI': -0.004237537587205241, 'IIYZZXXY': 0.002876078434933135, 'IIYZZYII': 0.007785194491376198, 'IIYZZYXX': -0.002876078434933135, 'IIYZZZYI': -0.025703421671713637, 'IIYZZZYZ': -0.03443680350741975, 'IIYZZZZY': 0.001050560600682628, 'IIZIIIII': -0.1974196986304983, 'IIZIIIIZ': 0.09898377244525777, 'IIZIIIXZ': 0.001303187428943218, 'IIZIIIZI': 0.09360099466745045, 'IIZIIXZX': -0.002899815560653565, 'IIZIIYZY': -0.002899815560653565, 'IIZIIZII': 0.056570323857124016, 'IIZIXZXI': -0.002956741912550393, 'IIZIXZZZ': 0.0045917229253296995, 'IIZIYZYI': -0.002956741912550393, 'IIZIZIII': 0.053512399991895616, 'IIZIZXII': -0.0045917229253296995, 'IIZIZZZX': -0.001303187428943218, 'IIZXIIII': -0.014552460686322212, 'IIZXIIIZ': 0.004704676047649981, 'IIZXIIXZ': 0.001050560600682628, 'IIZXIIZI': 0.004354526825339065, 'IIZXIXZX': -0.0013717943100752413, 'IIZXIYZY': -0.0009026397022665242, 'IIZXIZII': -0.00434212837164487, 'IIZXXZXI': -0.001048102066448925, 'IIZXXZZZ': 0.007785194491376199, 'IIZXYZYI': -0.001048102066448925, 'IIZXZIII': -0.012743506842450984, 'IIZXZXII': -0.0015766376238054121, 'IIZXZZZX': 0.0004952892952564533, 'IIZYIYZX': -0.0004691546078087188, 'IIZYZYII': -0.0015766376238054121, 'IIZYZZZY': 0.0004952892952564533, 'IIZZIIII': 0.08468170939963947, 'IIZZIXII': 0.03290395741989794, 'IIZZIZZX': 0.0015283726900879773, 'IIZZXIII': 0.03495312426599749, 'IIZZXIXX': -0.0020610093726644778, 'IIZZXYYI': -0.0014077935991870852, 'IIZZYIIY': 9.308532583860961e-05, 'IIZZYIYX': -0.0020610093726644778, 'IIZZYYXI': 0.0014077935991870852, 'IIZZYYZZ': -0.030761705333347874, 'IIZZZIZX': 0.002936166289275064, 'IIZZZXII': -0.006252994193872958, 'IIZZZXIZ': -0.005477331994841993, 'IIZZZXXZ': 9.308532583860961e-05, 'IIZZZXZI': -0.007538341367506473, 'IIZZZZIX': -0.011156727553312975, 'IIZZZZXI': 0.0005602367691690761, 'IIZZZZYY': -0.0018494520120760328, 'IIZZZZZX': -0.0002823107691970594, 'IIZZZZZZ': 0.11404443186403874, 'IXIIIIII': 0.004877026695939432, 'IXIIIIIX': -0.0015024523676698824, 'IXIIIIXX': 0.0024547625653344985, 'IXIIIIYY': -0.0024547625653344985, 'IXIIIXXI': 0.0018915758280655941, 'IXIIIXZZ': 0.004882949606014626, 'IXIIIYYI': -0.0018915758280655941, 'IXIIXXII': 0.00598108191541999, 'IXIIXZZX': -0.0018915758280655941, 'IXIIYYII': -0.00598108191541999, 'IXIIYZZY': 0.0018915758280655941, 'IXIXXIII': -0.0048038632604819175, 'IXIXZZXI': 0.0025611654019874314, 'IXIXZZZZ': 0.0034636381008790766, 'IXIYYIII': 0.0048038632604819175, 'IXIYZZYI': -0.0025611654019874314, 'IXXIIIII': -0.003463638100879079, 'IXXXIIII': 0.010327309992822217, 'IXXZZXII': 0.004803863260481915, 'IXXZZZZX': -0.0025611654019874314, 'IXYYIIII': -0.010327309992822217, 'IXYZZYII': -0.004803863260481915, 'IXYZZZZY': 0.0025611654019874314, 'IXZZXIII': -0.004882949606014626, 'IXZZZZXI': 0.0015024523676698824, 'IXZZZZZZ': 0.004877026695939432, 'IYIIIIIY': -0.0015024523676698824, 'IYIIIIXY': 0.0024547625653344985, 'IYIIIIYX': 0.0024547625653344985, 'IYIIIXYI': 0.0018915758280655941, 'IYIIIYXI': 0.0018915758280655941, 'IYIIIYZZ': 0.004882949606014626, 'IYIIXYII': 0.00598108191541999, 'IYIIXZZY': -0.0018915758280655941, 'IYIIYXII': 0.00598108191541999, 'IYIIYZZX': -0.0018915758280655941, 'IYIXYIII': -0.0048038632604819175, 'IYIXZZYI': 0.0025611654019874314, 'IYIYXIII': -0.0048038632604819175, 'IYIYZZXI': 0.0025611654019874314, 'IYIYZZZZ': 0.0034636381008790766, 'IYXYIIII': 0.010327309992822217, 'IYXZZYII': 0.004803863260481915, 'IYXZZZZY': -0.0025611654019874314, 'IYYIIIII': -0.003463638100879079, 'IYYXIIII': 0.010327309992822217, 'IYYZZXII': 0.004803863260481915, 'IYYZZZZX': -0.0025611654019874314, 'IYZZYIII': -0.004882949606014626, 'IYZZZZYI': 0.0015024523676698824, 'IZIIIIII': -0.4579624763565425, 'IZIIIIIZ': 0.19570005174416316, 'IZIIIIXZ': 0.0016203413844881676, 'IZIIIIZI': 0.19570005174416316, 'IZIIIXZX': -0.00417410539426637, 'IZIIIYZY': -0.00417410539426637, 'IZIIIZII': 0.1313183119904171, 'IZIIXZXI': -0.00417410539426637, 'IZIIXZZZ': 0.0008821359371306427, 'IZIIYZYI': -0.00417410539426637, 'IZIIZIII': 0.1313183119904171, 'IZIXZXII': 0.002385574658706054, 'IZIXZZZX': -0.0050332548281435945, 'IZIYZYII': 0.002385574658706054, 'IZIYZZZY': -0.0050332548281435945, 'IZIZIIII': 0.13077016464474733, 'IZIZIZIZ': 0.12971451752077234, 'IZXZIZIZ': 0.004210307119683613, 'IZXZXIII': 0.002385574658706054, 'IZXZZZXI': -0.0050332548281435945, 'IZXZZZZZ': -0.004210307119683613, 'IZYZYIII': 0.002385574658706054, 'IZYZZZYI': -0.0050332548281435945, 'IZZIIIII': 0.13077016464474733, 'IZZIXZIZ': -0.0008821359371306427, 'IZZIZIXZ': -0.0016203413844881676, 'IZZIZIZI': 0.12971451752077234, 'IZZIZIZX': 0.0016203413844881676, 'IZZIZXIZ': 0.0008821359371306427, 'IZZXIIII': 0.004210307119683613, 'IZZXIZIZ': -0.004210307119683613, 'IZZZZXII': -0.0008821359371306427, 'IZZZZZZX': -0.0016203413844881676, 'XIIIIIII': 0.004877026695939432, 'XIIIIIIX': -0.0015024523676698824, 'XIIIIIXX': 0.0024547625653344985, 'XIIIIIYY': -0.0024547625653344985, 'XIIIIXXI': 0.0018915758280655941, 'XIIIIXZZ': 0.004882949606014626, 'XIIIIYYI': -0.0018915758280655941, 'XIIIXXII': 0.00598108191541999, 'XIIIXZZX': -0.0018915758280655941, 'XIIIYYII': -0.00598108191541999, 'XIIIYZZY': 0.0018915758280655941, 'XIIXXIII': -0.0048038632604819175, 'XIIXZZXI': 0.0025611654019874314, 'XIIXZZZZ': 0.0034636381008790766, 'XIIYYIII': 0.0048038632604819175, 'XIIYZZYI': -0.0025611654019874314, 'XIXIIIII': -0.003463638100879079, 'XIXXIIII': 0.010327309992822217, 'XIXZZXII': 0.004803863260481915, 'XIXZZZZX': -0.0025611654019874314, 'XIYYIIII': -0.010327309992822217, 'XIYZZYII': -0.004803863260481915, 'XIYZZZZY': 0.0025611654019874314, 'XIZZXIII': -0.004882949606014626, 'XIZZZZXI': 0.0015024523676698824, 'XIZZZZZZ': 0.004877026695939432, 'XXIIIIII': 0.008434569756845523, 'XZIZIZIZ': -0.004877026695939432, 'XZXZIZIZ': 0.003463638100879079, 'XZZIXZIZ': 0.004882949606014626, 'XZZIZIXZ': -0.0015024523676698824, 'XZZIZIZI': -0.004877026695939432, 'XZZIZIZX': 0.0015024523676698824, 'XZZIZXIZ': -0.004882949606014626, 'XZZXIZIZ': -0.0034636381008790766, 'YIIIIIIY': -0.0015024523676698824, 'YIIIIIXY': 0.0024547625653344985, 'YIIIIIYX': 0.0024547625653344985, 'YIIIIXYI': 0.0018915758280655941, 'YIIIIYXI': 0.0018915758280655941, 'YIIIIYZZ': 0.004882949606014626, 'YIIIXYII': 0.00598108191541999, 'YIIIXZZY': -0.0018915758280655941, 'YIIIYXII': 0.00598108191541999, 'YIIIYZZX': -0.0018915758280655941, 'YIIXYIII': -0.0048038632604819175, 'YIIXZZYI': 0.0025611654019874314, 'YIIYXIII': -0.0048038632604819175, 'YIIYZZXI': 0.0025611654019874314, 'YIIYZZZZ': 0.0034636381008790766, 'YIXYIIII': 0.010327309992822217, 'YIXZZYII': 0.004803863260481915, 'YIXZZZZY': -0.0025611654019874314, 'YIYIIIII': -0.003463638100879079, 'YIYXIIII': 0.010327309992822217, 'YIYZZXII': 0.004803863260481915, 'YIYZZZZX': -0.0025611654019874314, 'YIZZYIII': -0.004882949606014626, 'YIZZZZYI': 0.0015024523676698824, 'YYIIIIII': 0.008434569756845523, 'YZYZIZIZ': 0.003463638100879079, 'YZZIYZIZ': 0.004882949606014626, 'YZZIZIYZ': -0.0015024523676698824, 'YZZIZIZY': 0.0015024523676698824, 'YZZIZYIZ': -0.004882949606014626, 'YZZYIZIZ': -0.0034636381008790766, 'ZIIIIIII': -0.4579624763565428, 'ZIIIIIIZ': 0.19570005174416316, 'ZIIIIIXZ': 0.0016203413844881676, 'ZIIIIIZI': 0.19570005174416316, 'ZIIIIXZX': -0.00417410539426637, 'ZIIIIYZY': -0.00417410539426637, 'ZIIIIZII': 0.1313183119904171, 'ZIIIXZXI': -0.00417410539426637, 'ZIIIXZZZ': 0.0008821359371306427, 'ZIIIYZYI': -0.00417410539426637, 'ZIIIZIII': 0.1313183119904171, 'ZIIXZXII': 0.002385574658706054, 'ZIIXZZZX': -0.0050332548281435945, 'ZIIYZYII': 0.002385574658706054, 'ZIIYZZZY': -0.0050332548281435945, 'ZIIZIIII': 0.13077016464474733, 'ZIIZIZIZ': 0.12971451752077234, 'ZIXZIZIZ': 0.004210307119683613, 'ZIXZXIII': 0.002385574658706054, 'ZIXZZZXI': -0.0050332548281435945, 'ZIXZZZZZ': -0.004210307119683613, 'ZIYZYIII': 0.002385574658706054, 'ZIYZZZYI': -0.0050332548281435945, 'ZIZIIIII': 0.13077016464474733, 'ZIZIXZIZ': -0.0008821359371306427, 'ZIZIZIXZ': -0.0016203413844881676, 'ZIZIZIZI': 0.12971451752077234, 'ZIZIZIZX': 0.0016203413844881676, 'ZIZIZXIZ': 0.0008821359371306427, 'ZIZXIIII': 0.004210307119683613, 'ZIZXIZIZ': -0.004210307119683613, 'ZIZZZXII': -0.0008821359371306427, 'ZIZZZZZX': -0.0016203413844881676, 'ZXIZIZIZ': -0.004877026695939432, 'ZXXZIZIZ': 0.003463638100879079, 'ZXZIXZIZ': 0.004882949606014626, 'ZXZIZIXZ': -0.0015024523676698824, 'ZXZIZIZI': -0.004877026695939432, 'ZXZIZIZX': 0.0015024523676698824, 'ZXZIZXIZ': -0.004882949606014626, 'ZXZXIZIZ': -0.0034636381008790766, 'ZYYZIZIZ': 0.003463638100879079, 'ZYZIYZIZ': 0.004882949606014626, 'ZYZIZIYZ': -0.0015024523676698824, 'ZYZIZIZY': 0.0015024523676698824, 'ZYZIZYIZ': -0.004882949606014626, 'ZYZYIZIZ': -0.0034636381008790766, 'ZZIIIIII': 0.2707726623751819, 'ZZIIIZIZ': 0.05391367939397465, 'ZZIIXZIZ': -0.0045917229253296995, 'ZZIIZIXZ': -0.001303187428943218, 'ZZIIZIZI': 0.05391367939397465, 'ZZIIZIZX': 0.00242624352162051, 'ZZIIZXIZ': 0.002455848829893685, 'ZZIXIZIZ': -0.009009667437306818, 'ZZIXXZIZ': -0.007785194491376199, 'ZZIXZIXZ': -0.001050560600682628, 'ZZIXZIZI': -0.010904255360997208, 'ZZIXZIZX': 0.0038911166426645587, 'ZZIXZXIZ': -0.0027874839498146504, 'ZZIYZIZY': 0.0038911166426645587, 'ZZIYZYIZ': -0.0027874839498146504, 'ZZIZIIIZ': 0.08349406473042321, 'ZZIZIXXZ': -9.308532583860961e-05, 'ZZIZIXZI': 0.03495312426599749, 'ZZIZIXZX': -0.0009548391315205742, 'ZZIZIYZY': -0.0009548391315205742, 'ZZIZIZII': 0.08857801254613343, 'ZZIZIZIX': 0.0005602367691690761, 'ZZIZIZIZ': -0.39690605678468144, 'ZZIZIZXI': -0.0005602367691690761, 'ZZIZIZYY': 0.0018494520120760328, 'ZZIZIZZZ': 0.09042746455820946, 'ZZIZXIXZ': 0.0010479244573591837, 'ZZIZXIZI': -0.03495312426599749, 'ZZIZYIYZ': 0.0010479244573591837, 'ZZIZYIZY': -9.308532583860961e-05, 'ZZIZYYIZ': 0.030761705333347874, 'ZZIZZZIZ': 0.11425577006377113, 'ZZXIIZIZ': 0.009009667437306818, 'ZZXIXZIZ': 0.010572678441190855, 'ZZXIZIXZ': -0.002840556041981928, 'ZZXIZIZI': 0.010904255360997208, 'ZZXXZIZX': -0.0011230560926772924, 'ZZXXZXIZ': 0.002135874095436016, 'ZZXYYZIZ': -0.002135874095436016, 'ZZXYZIYZ': 0.0011230560926772924, 'ZZXYZIZY': -0.0011230560926772924, 'ZZXYZYIZ': 0.002135874095436016, 'ZZXZIIIZ': -0.012743506842450984, 'ZZXZIXZX': 0.001048102066448925, 'ZZXZIYYZ': -0.00014546236418240159, 'ZZXZIYZY': 0.001048102066448925, 'ZZXZIZII': 0.004354526825339065, 'ZZXZIZIZ': -0.014552460686322212, 'ZZXZIZXI': 0.0038911166426645587, 'ZZXZIZXX': 0.0003501492223109143, 'ZZXZIZZZ': 0.004704676047649981, 'ZZXZXIXZ': -0.0013717943100752413, 'ZZXZXIZI': -0.0027874839498146504, 'ZZXZXIZX': 0.0003236922436263174, 'ZZXZXXIZ': 0.008401378470806105, 'ZZXZYIYZ': -0.0009026397022665242, 'ZZXZZZIZ': -0.00434212837164487, 'ZZYIYZIZ': 0.010572678441190855, 'ZZYIZIYZ': -0.002840556041981928, 'ZZYIZIZY': -0.001050560600682628, 'ZZYIZYIZ': -0.007785194491376198, 'ZZYYIZIZ': 0.006577661355747096, 'ZZYYXZIZ': 0.002135874095436016, 'ZZYYZIXZ': -0.0011230560926772924, 'ZZYYZIZI': 0.006577661355747096, 'ZZYZIYXZ': 0.00014546236418240159, 'ZZYZIYZI': -0.007785194491376198, 'ZZYZIZIY': -0.001050560600682628, 'ZZYZIZYI': 0.0038911166426645587, 'ZZYZIZYX': 0.0003501492223109143, 'ZZYZYIXZ': -0.0004691546078087188, 'ZZYZYIZI': -0.0027874839498146504, 'ZZYZYIZX': 0.0003236922436263174, 'ZZYZYXIZ': 0.008401378470806105, 'ZZZIIIXZ': 0.002936166289275064, 'ZZZIIIZI': 0.08349406473042321, 'ZZZIIIZX': -0.0015283726900879773, 'ZZZIIXIZ': -0.03290395741989794, 'ZZZIXIIZ': 0.03290395741989794, 'ZZZIXXZX': -0.001407793599187085, 'ZZZIXYYZ': 0.0014077935991870852, 'ZZZIXYZY': -0.001407793599187085, 'ZZZIXZII': -0.007538341367506473, 'ZZZIXZIZ': -0.006252994193872944, 'ZZZIXZXI': -0.0009548391315205742, 'ZZZIXZXX': 0.0020610093726644778, 'ZZZIXZZZ': -0.005477331994841993, 'ZZZIYYXZ': -0.0014077935991870852, 'ZZZIYYZI': 0.030761705333347874, 'ZZZIYZIY': -9.308532583860961e-05, 'ZZZIYZYI': -0.0009548391315205742, 'ZZZIYZYX': 0.0020610093726644778, 'ZZZIZIII': 0.08857801254613343, 'ZZZIZIIX': 0.011156727553312975, 'ZZZIZIXI': -0.01115672755331298, 'ZZZIZIXZ': -0.0002823107691970577, 'ZZZIZIYY': 0.0018494520120760328, 'ZZZIZIZI': -0.39690605678468144, 'ZZZIZIZX': 0.0002823107691970594, 'ZZZIZIZZ': 0.09042746455820946, 'ZZZIZXII': 0.005477331994841993, 'ZZZIZXIX': 0.0010479244573591837, 'ZZZIZXIZ': 0.006252994193872958, 'ZZZIZXXI': -9.308532583860961e-05, 'ZZZIZXYY': 0.0020610093726644778, 'ZZZIZXZZ': 0.007538341367506473, 'ZZZIZYIY': 0.0010479244573591837, 'ZZZIZYYX': -0.0020610093726644778, 'ZZZIZZXZ': 0.0015283726900879773, 'ZZZIZZZI': 0.11425577006377113, 'ZZZIZZZX': -0.002936166289275064, 'ZZZXIIIZ': 0.00434212837164487, 'ZZZXIXXZ': 0.0003236922436263174, 'ZZZXIXZI': 0.010572678441190855, 'ZZZXIXZX': -0.0013717943100752413, 'ZZZXIYZY': -0.0009026397022665242, 'ZZZXIZII': -0.004704676047649981, 'ZZZXIZIX': -0.002840556041981928, 'ZZZXIZIZ': 0.014552460686322212, 'ZZZXIZXI': -0.001050560600682628, 'ZZZXIZYY': 0.0003501492223109143, 'ZZZXIZZZ': -0.004354526825339065, 'ZZZXXIXZ': 0.001048102066448925, 'ZZZXXIZI': -0.007785194491376199, 'ZZZXYIYZ': 0.001048102066448925, 'ZZZXYIZY': -0.00014546236418240159, 'ZZZXYYIZ': 0.008401378470806105, 'ZZZXZZIZ': 0.012743506842450984, 'ZZZYIYXZ': 0.0003236922436263174, 'ZZZYIYZI': 0.010572678441190855, 'ZZZYIYZX': -0.0004691546078087188, 'ZZZYIZIY': -0.002840556041981928, 'ZZZYIZYX': -0.0003501492223109143, 'ZZZYYIZX': 0.00014546236418240159, 'ZZZYYXIZ': -0.008401378470806105, 'ZZZZIZIZ': 0.060491340749721755, 'ZZZZXZIZ': -0.002455848829893685, 'ZZZZZIXZ': -0.00242624352162051, 'ZZZZZIZI': 0.060491340749721755, 'ZZZZZIZX': 0.001303187428943218, 'ZZZZZXIZ': 0.0045917229253296995}
```
## Testing contextuality
To test whether this Hamiltonian is contextual, run the following function, which takes $O(N^3)$ time where $N$ is the number of terms in the Hamiltonian.
See <https://arxiv.org/abs/1904.02260> for the definition of contextuality.
```
c.contextualQ_ham(ham)
```
You should find that the Hamiltonian is contextual. If you just want to check whether a list S of Pauli terms expressed as strings is contextual, use `c.contextualQ(S)` instead.
To find a large noncontextual sub-Hamiltonian (i.e., supported on a subset of the terms), run the following cell.
The second argument is the number of seconds to continue the depth-first search for (note that the depth-first search only starts after the initial candidate sub-Hamiltonian is found, so the total runtime may be longer). The third, optional argument can be either `'weight'` or `'size'` (the default is `'weight'`), determining whether the largest noncontextual subset is chosen by total term weight (sum of magnitudes of coefficients) or number of terms.
```
print(c.greedy_dfs(ham, 10, criterion='weight'))
```
The output is a list of subsets of the terms in `ham`, each of which is noncontextual.
Every time the depth-first search identifies a new largest noncontextual subset, it is added to the end of the output list, so the final item in the list is the largest noncontextual subset found.
Assuming you ran the above code and only obtained one nontrivial subset, it should give the following noncontextual Hamiltonian:
```
terms_noncon = ['IIIIIIII', 'IIIIIIIZ', 'IIIIIIZI', 'ZIIIIIII', 'IZIIIIII', 'IIIIIIZZ', 'ZZIZIZIZ', 'ZZZIZIZI', 'ZZIIIIII', 'IIIZIIII', 'IIZIIIII', 'IZIIIIIZ', 'IZIIIIZI', 'ZIIIIIIZ', 'ZIIIIIZI', 'IZIIIZII', 'IZIIZIII', 'ZIIIIZII', 'ZIIIZIII', 'IZIZIIII', 'IZZIIIII', 'ZIIZIIII', 'ZIZIIIII', 'IZIZIZIZ', 'IZZIZIZI', 'ZIIZIZIZ', 'ZIZIZIZI', 'IIIIZZII', 'IIIIIZII', 'IIIIZIII', 'ZZIZZZIZ', 'ZZZIZZZI', 'IIZZZZZZ', 'IIIZIIZI', 'IIZIIIIZ', 'IIIIIZZI', 'IIIIZIIZ', 'IIIZIIIZ', 'IIZIIIZI', 'IIIIIZIZ', 'IIIIZIZI', 'ZZIZIZZZ', 'ZZZIZIZZ', 'ZZIZIZII', 'ZZZIZIII', 'IIZZIIII', 'ZZIZIIIZ', 'ZZZIIIZI', 'ZZZZIZIZ', 'ZZZZZIZI', 'IIIZZIII', 'IIZIIZII', 'ZZIIIZIZ', 'ZZIIZIZI', 'IIIZIZII', 'IIZIZIII', 'IIIIIXZZ', 'ZZIZIXZI', 'IIZZIXII', 'ZZZIIXIZ', 'IIZZZXZI', 'ZZZIZXZZ', 'IIZZZXII', 'ZZZIZXIZ', 'IIZZZXIZ', 'ZZZIZXII', 'IIZIZXII', 'ZZZZZXIZ', 'IIIZZXII', 'ZZIIZXIZ', 'IZZIZXIZ', 'IZZZZXII', 'ZIZIZXIZ', 'ZIZZZXII']
ham_noncon = {p:ham[p] for p in terms_noncon}
```
Let's make sure it's really noncontextual:
```
print(c.contextualQ(terms_noncon))
print(c.contextualQ_ham(ham_noncon))
```
## Noncontextual models
cs_vqe.py provides three functions for generating and handling noncontextual (quasi-quantized) models of noncontextual Hamiltonians (as described in <https://arxiv.org/abs/2002.05693>).
The first is `quasi_model(ham_noncon)`:
```
model = c.quasi_model(ham_noncon)
print('commuting generators:',model[0], '\n')
print('anticommuting generators:',model[1], '\n')
print('term reconstruction:',model[2], '\n')
```
`quasi_model(ham_noncon)` returns a list containing three elements (printed above), which are:
* list of the commuting generators ($G$) for the noncontextual model,
* list of the anticommuting generators (the $C_{i1}$) for the noncontextual model,
* dict mapping the terms in `ham_noncon` to their reconstructions in terms of the generators. Each term $P$ is mapped to a list containing three entries:
1. list of the commuting generators that are factors of $P$,
2. list of the anticommuting generators that are factors of $P$ (should contain at most one element),
3. overall sign ($\pm1$).
Second, `energy_function_form(ham_noncon, model)` returns an encoding of the Hamiltonian expectation value expressed as a function of the parameters of the noncontextual model. The argument `model` must be the output of `quasi_model(ham_noncon)`.
```
fn_form = c.energy_function_form(ham_noncon, model)
print('dim of q: ', fn_form[0])
print('dim of r: ', fn_form[1])
print('encoding of function: ', fn_form[2], '\n')
```
`energy_function_form(ham_noncon, model)` returns a list containing three elements (printed above), which are:
* the number of parameters $q_j$ (number of commuting generators),
* the number of parameters $r_i$ (number of anticommuting generators),
* a list containing one element per term $P$ in `ham_noncon`, whose elements have the form `[coeff, commuting_generators, anticommuting_generators, P]`, where
1. `coeff` is the coefficient of $P$,
2. `commuting_generators` is a list of the indices of the commuting generators that are factors in $P$,
3. `anticommuting_generators` is a list containing the index of the anticommuting generator that is a factor in $P$ (if any),
4. `P` is the string representation of $P$.
Third, `energy_function(fn_form)` returns an explicit function for the expectation value of the Hamiltonian. The argument `fn_form` must be the output of `energy_function_form(ham_noncon, model)`.
```
energy = c.energy_function(fn_form)
print(energy, '\n')
print(energy(1, 1, 1, -1, -1, 1, -1, 1/np.sqrt(2), -1/np.sqrt(2)))
```
`energy_function_form(fn_form)` returns a function of `fn_form[0] + fn_form[1]` parameters, i.e., the total number of parameters for a noncontextual state. The first `fn_form[0]` of these parameters are the values $q_j$, which must be $\pm1$, and the remainder are the $r_i$, which must form the components of a unit vector $\vec{r}$.
Note: letting the parameters $q_j$ vary continuously in $[-1,1]$ doesn't break anything, and may be useful depending on the optimization scheme. The resulting energies will still be possible expectation values of the Hamiltonian, but the parameter settings for $q_j\neq\pm1$ may not correspond to individual term expectation values that are realized by any actual quantum state.
Using the noncontextual model, `find_gs_noncon(ham_noncon, method = 'differential_evolution', model = None, fn_form = None, energy = None, timer = False)` approximates the ground state energy of a noncontextual Hamiltonian `ham_noncon` by brute-force search over the parameters $q_j=\pm1$, and numerical minimization over the unit vector $\vec{r}$.
Optional arguments:
* `method` specifies the numerical optimization method for $\vec{r}$,
* `model`, `fn_form`, and `energy` are the outputs of `quasi_model`, `energy_function_form`, and `energy_function`, respectively, if these have already been computed,
* the boolean `timer` determines whether the runtime of the optimization should be output as a second return.
```
gs_noncon = c.find_gs_noncon(ham_noncon)
print('ground state energy estimate:', gs_noncon[0], '\n')
print('energy-minimizing parameter setting:', gs_noncon[1])
ep_state = gs_noncon[1]
```
`find_gs_noncon(ham_noncon)` returns a list whose first element is the estimated ground state energy of `ham_noncon`, whose second element is the energy-minimizing parameter setting for $[\vec{q},\vec{r}]$, and whose remaining elements are the outputs of `quasi_model` and `energy_function_form`.
## Computing quantum (contextual) corrections to noncontextual ground state approximations
See <https://arxiv.org/abs/2011.10027>.
Given an arbitrary Hamiltonian `ham` whose noncontextual part `ham_noncon` has a quasi-quantized model given by
> `model = quasi_model(ham_noncon)`,
> `fn_form = energy_function_form(ham_noncon, model)`,
> `ep_state` given by the energy-minimizing parameter setting for `ham_noncon`, i.e., `ep_state = find_gs_noncon(ham_noncon)[1]`,
the function `quantum_correction(ham, model, fn_form, ep_state)` returns the minimum energy for the full Hamiltonian `ham` in the subspace corresponding to the noncontextual ground state of `ham_noncon`. This is equivalent to returning the classically-estimated ground state energy of `ham_noncon` plus the energy of the remaining terms in `ham` minimized within the noncontextual ground space, which would be computed on the quantum computer in the actual protocol.
```
c.quantum_correction(ham, model, fn_form, ep_state)
```
Using our current working example, the quantum correction so far should not provide any improvement over the put noncontextual approximation `gs_noncon[0]` that we obtained above:
```
gs_noncon[0]
```
That is because for this Hamiltonian, the noncontextual states turn out to uniquely identify quantum states, in which case no quantum correction is possible (see <https://arxiv.org/abs/2011.10027>, Corollary 1.1). That provides the motivation for expanding the search space via CS-VQE...
## CS-VQE
The function `get_reduced_hamiltonians(ham, model, fn_form, ep_state, order)` returns a list of the quantum correction Hamiltonians for CS-VQE using $0,1,2,...,n$ qubits on the quantum computer (for `ham` a Hamiltonian on $n$ qubits).
These Hamiltonians include the noncontextual approximation to the ground state energy in their constant terms, so the CS-VQE approximation for each number of qubits on the quantum computer is obtained by finding the ground state energy of each Hamiltonian in this list.
See <https://arxiv.org/abs/2011.10027> for theory.
The first four arguments are as we have defined above, while `order` is a list specifying the order in which the qubits should be moved from the noncontextual part to the quantum part.
```
order = [0,1,2,3,4,5,6,7]
reduced_hamiltonians = c.get_reduced_hamiltonians(ham, model, fn_form, ep_state, order)
print('pure noncontextual approximation:',reduced_hamiltonians[0],'\n')
print('using four qubits on quantum processor:',reduced_hamiltonians[4])
```
The function `contextual_subspace_approximations(ham, model, fn_form, ep_state, order)` returns a list of the approximations obtained by applying CS-VQE using 0, 1, 2,... ,$n$ qubits on the quantum processor (for `ham` a Hamiltonian on $n$ qubits).
See <https://arxiv.org/abs/2011.10027> for theory.
The first four arguments are as we have defined above, while `order` is a list specifying the order in which the qubits should be moved from the noncontextual part to the quantum part.
The quantum part of the algorithm is simulated classically, so this function does take **exponential time** in the number of qubits.
If only a partial order is specified, the method is only simulated up to moving those specified qubits to the quantum part.
```
# Full order example:
order = [0,1,2,3,4,5,6,7]
c.contextual_subspace_approximations(ham, model, fn_form, ep_state, order)
```
For a full order, the first entry in the output of `contextual_subspace_approximations` should be the pure noncontextual approximation, while the last entry should be the true ground state energy of the full Hamiltonian `ham` (up to numerical precision), which in our working example is:
```
true_gs = -8.933276065676191
# Partial order example:
order = [0,1,2,3,4]
c.contextual_subspace_approximations(ham, model, fn_form, ep_state, order)
```
cs_vqe.py also provides classical simulation of a heuristic `csvqe_approximations_heuristic` that can be used to generate `order`s for `contextual_subspace_approximations`. See <https://arxiv.org/abs/2011.10027> for details of how it works. The arguments are
* `ham`,
* `ham_noncon`,
* `n_qubits`, the number of qubits that the Hamiltonians act on, and
* `true_gs`, the actual ground state energy of the full Hamiltonian `ham`.
This function takes **exponential time** in the number of qubits, since the quantum parts of the algorithm are simulated classically.
For our working example and on a laptop, you should expect runtime of at most about a minute for the cell below.
```
n_qubits = 8
csvqe = c.csvqe_approximations_heuristic(ham, ham_noncon, n_qubits, true_gs)
print('true ground state energy:', csvqe[0], '\n')
print('CS-VQE approximations:', csvqe[1], '\n')
print('CS-VQE errors:', csvqe[2], '\n')
print('chosen order:', csvqe[3])
```
Finally, to get the numbers of terms in the reduced contextual sub-Hamiltonians (i.e., the numbers of terms for the quantum corrections), use `num_of_terms(ham, ham_noncon, order, ep_state)`. Using the order output by the above heuristic...
```
order = [7, 4, 2, 3, 1, 0, 6, 5]
c.num_of_terms(ham, ham_noncon, order, ep_state)
```
The output is a dict mapping the number of qubits used on the quantum computer to the number of terms simulated on the quantum computer.
## Reproducing the results in <https://arxiv.org/abs/2011.10027>
The file `'hamiltonians.txt'` contains all of the Hamiltonians that were used to test CS-VQE in <https://arxiv.org/abs/2011.10027>, plus a few extras that we left out of the paper because they are either too small, or the full Hamiltonians themselves are noncontextual (and thus not interesting for CS-VQE).
To load the Hamiltonians, run the following:
```
import ast
import matplotlib
import matplotlib.pyplot as plt
f = open("hamiltonians.txt","r")
hamiltonians = ast.literal_eval(f.read())
f.close()
```
`hamiltonians` is now a dict mapping species names to information about them.
Here is a list of all of the species:
```
for k in hamiltonians.keys():
print(k)
```
If we pick out a particular one of these, we can read off the information about it as follows:
```
speciesname = 'H2-S1_STO-3G_singlet'
encoding = hamiltonians[speciesname][0] # in this dataset, always 'JW' for Jordan-Wigner, but leaves room for trying Bravyi-Kitaev as well
n_qubits = hamiltonians[speciesname][1] # number of qubits (all of these Hamiltonians have been tapered for molecular symmetries)
ham = hamiltonians[speciesname][2] # full Hamiltonian
ham_noncon = hamiltonians[speciesname][3] # noncontextual part of Hamiltonian, found by greedy DFS
true_gs = hamiltonians[speciesname][4] # ground state energy of full Hamiltonian (in Hartree)
gs_noncon = hamiltonians[speciesname][5] # list containing information about noncontextual ground state: zeroth entry is ground state energy of noncontextual part of Hamiltonian
print('number of qubits:', n_qubits)
print('true ground state energy:', true_gs)
print('noncontextual approximation to ground state energy:', gs_noncon[0])
```
You can now run `csvqe_approximations_heuristic(ham, ham_noncon, n_qubits, true_gs)` to reproduce the results of CS-VQE for the above Hamiltonian. Alternatively, you can run the loop below to iterate over all of the Hamiltonians. **Beware:** the largest Hamiltonians (18 qubits) take a few days to run on my laptop (everything else runs in a few hours).
```
## CAUTION: only run this cell if you actually want to reproduce the results of CS-VQE
## for all of the Hamiltonians in <https://arxiv.org/abs/2011.10027>!
## This takes a while.
## It will go in order of increasing qubits, though, so you can abort part way through or cap the number of qubits
## that you want to try.
csvqe_results = {}
max_n_qubits = 18 # max number of qubits to run for: the largest Hamiltonian is 18 qubits
for n in range(max_n_qubits+1):
for speciesname in hamiltonians.keys():
n_qubits = hamiltonians[speciesname][1]
if n_qubits == n:
ham = hamiltonians[speciesname][2]
ham_noncon = hamiltonians[speciesname][3]
true_gs = hamiltonians[speciesname][4]
print(speciesname,n_qubits)
csvqe_out = c.csvqe_approximations_heuristic(ham, ham_noncon, n_qubits, true_gs)
csvqe_results[speciesname] = csvqe_out
print(' best order:',csvqe_out[3])
print(' resulting errors:',csvqe_out[2],'\n')
# Uncomment and run the code below to save your results:
# f = open('csvqe_results_new.txt', 'w')
# f.write(str(csvqe_results))
# f.close()
```
If instead you just want to load the results that we computed and reproduce our plot as in Fig. 2 in <https://arxiv.org/abs/2011.10027>, you can run the following cells:
```
f = open('csvqe_results.txt', 'r')
csvqe_results = ast.literal_eval(f.read())
f.close()
fig, axs = plt.subplots(nrows = 2, ncols = 3, figsize = (21,12))
for i in range(6):
n_qubits = [[5,10],[9,14,17],[8,13],[6,15],[16],[18]][i]
loca = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2)][i]
for filename in csvqe_results.keys():
errors_heuristic = csvqe_results[filename][2]
l = len(filename)
if len(errors_heuristic)-1 in n_qubits:
axs[loca[0],loca[1]].plot([i for i in range(len(errors_heuristic))],errors_heuristic,label=filename,marker='.',markeredgecolor='k')
max_range = int(max(n_qubits))
axs[loca[0],loca[1]].plot([x for x in np.arange(-max_range/20,21*max_range/20+1)],[0.0016 for x in np.arange(-max_range/20,21*max_range/20+1)],'-',c='k')
axs[loca[0],loca[1]].set_xlim((-max_range/20,21*max_range/20))
axs[loca[0],loca[1]].set_xticks([2*i for i in range(int((max_range+2)/2))])
if loca[0] == 1:
axs[loca[0],loca[1]].set_xlabel('Qubits',fontsize=22)
if loca[1] == 0:
axs[loca[0],loca[1]].set_ylabel('Error (Ha)',fontsize=22)
axs[loca[0],loca[1]].tick_params(labelsize=18)
axs[loca[0],loca[1]].legend(fontsize=20)
fig.show()
```
| github_jupyter |
**Chapter 10 – Introduction to Artificial Neural Networks with Keras**
_This notebook contains all the sample code and solutions to the exercises in chapter 10._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/10_neural_nets_with_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
```
# Perceptrons
**Note**: we set `max_iter` and `tol` explicitly to avoid warnings about the fact that their default value will change in future versions of Scikit-Learn.
```
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2, 3)] # petal length, petal width
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(max_iter=1000, tol=1e-3, random_state=42)
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
y_pred
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]
axes = [0, 5, 0, 2]
x0, x1 = np.meshgrid(
np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")
plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)
save_fig("perceptron_iris_plot")
plt.show()
```
# Activation functions
```
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=1, label="Step")
plt.plot(z, sigmoid(z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=1, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(sigmoid, z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("activation_functions_plot")
plt.show()
def heaviside(z):
return (z >= 0).astype(z.dtype)
def mlp_xor(x1, x2, activation=heaviside):
return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)
z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)
plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)
```
# Building an Image Classifier
First let's import TensorFlow and Keras.
```
import tensorflow as tf
from tensorflow import keras
tf.__version__
keras.__version__
```
Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in `keras.datasets`. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set:
```
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
```
The training set contains 60,000 grayscale images, each 28x28 pixels:
```
X_train_full.shape
```
Each pixel intensity is represented as a byte (0 to 255):
```
X_train_full.dtype
```
Let's split the full training set into a validation set and a (smaller) training set. We also scale the pixel intensities down to the 0-1 range and convert them to floats, by dividing by 255.
```
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.
```
You can plot an image using Matplotlib's `imshow()` function, with a `'binary'`
color map:
```
plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()
```
The labels are the class IDs (represented as uint8), from 0 to 9:
```
y_train
```
Here are the corresponding class names:
```
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
```
So the first image in the training set is a coat:
```
class_names[y_train[0]]
```
The validation set contains 5,000 images, and the test set contains 10,000 images:
```
X_valid.shape
X_test.shape
```
Let's take a look at a sample of the images in the dataset:
```
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_train[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_plot', tight_layout=False)
plt.show()
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.layers
model.summary()
keras.utils.plot_model(model, "my_fashion_mnist_model.png", show_shapes=True)
hidden1 = model.layers[1]
hidden1.name
model.get_layer(hidden1.name) is hidden1
weights, biases = hidden1.get_weights()
weights
weights.shape
biases
biases.shape
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"])
```
This is equivalent to:
```python
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=[keras.metrics.sparse_categorical_accuracy])
```
```
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid))
history.params
print(history.epoch)
history.history.keys()
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
save_fig("keras_learning_curves_plot")
plt.show()
model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_proba = model.predict(X_new)
y_proba.round(2)
y_pred = model.predict_classes(X_new)
y_pred
np.array(class_names)[y_pred]
y_new = y_test[:3]
y_new
plt.figure(figsize=(7.2, 2.4))
for index, image in enumerate(X_new):
plt.subplot(1, 3, index + 1)
plt.imshow(image, cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_test[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_images_plot', tight_layout=False)
plt.show()
```
# Regression MLP
Let's load, split and scale the California housing dataset (the original one, not the modified one as in chapter 2):
```
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_pred = model.predict(X_new)
plt.plot(pd.DataFrame(history.history))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
y_pred
```
# Functional API
Not all neural network models are simply sequential. Some may have complex topologies. Some may have multiple inputs and/or multiple outputs. For example, a Wide & Deep neural network (see [paper](https://ai.google/research/pubs/pub45413)) connects all or part of the inputs directly to the output layer.
```
np.random.seed(42)
tf.random.set_seed(42)
input_ = keras.layers.Input(shape=X_train.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input_)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input_], outputs=[output])
model.summary()
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
y_pred = model.predict(X_new)
```
What if you want to send different subsets of input features through the wide or deep paths? We will send 5 features (features 0 to 4), and 6 through the deep path (features 2 to 7). Note that 3 features will go through both (features 2, 3 and 4).
```
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="output")(concat)
model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:]
X_new_A, X_new_B = X_test_A[:3], X_test_B[:3]
history = model.fit((X_train_A, X_train_B), y_train, epochs=20,
validation_data=((X_valid_A, X_valid_B), y_valid))
mse_test = model.evaluate((X_test_A, X_test_B), y_test)
y_pred = model.predict((X_new_A, X_new_B))
```
Adding an auxiliary output for regularization:
```
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="main_output")(concat)
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2)
model = keras.models.Model(inputs=[input_A, input_B],
outputs=[output, aux_output])
model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit([X_train_A, X_train_B], [y_train, y_train], epochs=20,
validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid]))
total_loss, main_loss, aux_loss = model.evaluate(
[X_test_A, X_test_B], [y_test, y_test])
y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])
```
# The subclassing API
```
class WideAndDeepModel(keras.models.Model):
def __init__(self, units=30, activation="relu", **kwargs):
super().__init__(**kwargs)
self.hidden1 = keras.layers.Dense(units, activation=activation)
self.hidden2 = keras.layers.Dense(units, activation=activation)
self.main_output = keras.layers.Dense(1)
self.aux_output = keras.layers.Dense(1)
def call(self, inputs):
input_A, input_B = inputs
hidden1 = self.hidden1(input_B)
hidden2 = self.hidden2(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
main_output = self.main_output(concat)
aux_output = self.aux_output(hidden2)
return main_output, aux_output
model = WideAndDeepModel(30, activation="relu")
model.compile(loss="mse", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit((X_train_A, X_train_B), (y_train, y_train), epochs=10,
validation_data=((X_valid_A, X_valid_B), (y_valid, y_valid)))
total_loss, main_loss, aux_loss = model.evaluate((X_test_A, X_test_B), (y_test, y_test))
y_pred_main, y_pred_aux = model.predict((X_new_A, X_new_B))
model = WideAndDeepModel(30, activation="relu")
```
# Saving and Restoring
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
model.save("my_keras_model.h5")
model = keras.models.load_model("my_keras_model.h5")
model.predict(X_new)
model.save_weights("my_keras_weights.ckpt")
model.load_weights("my_keras_weights.ckpt")
```
# Using Callbacks during Training
```
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_keras_model.h5", save_best_only=True)
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb])
model = keras.models.load_model("my_keras_model.h5") # rollback to best model
mse_test = model.evaluate(X_test, y_test)
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
early_stopping_cb = keras.callbacks.EarlyStopping(patience=10,
restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb])
mse_test = model.evaluate(X_test, y_test)
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print("\nval/train: {:.2f}".format(logs["val_loss"] / logs["loss"]))
val_train_ratio_cb = PrintValTrainRatioCallback()
history = model.fit(X_train, y_train, epochs=1,
validation_data=(X_valid, y_valid),
callbacks=[val_train_ratio_cb])
```
# TensorBoard
```
root_logdir = os.path.join(os.curdir, "my_logs")
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
run_logdir = get_run_logdir()
run_logdir
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
```
To start the TensorBoard server, one option is to open a terminal, if needed activate the virtualenv where you installed TensorBoard, go to this notebook's directory, then type:
```bash
$ tensorboard --logdir=./my_logs --port=6006
```
You can then open your web browser to [localhost:6006](http://localhost:6006) and use TensorBoard. Once you are done, press Ctrl-C in the terminal window, this will shutdown the TensorBoard server.
Alternatively, you can load TensorBoard's Jupyter extension and run it like this:
```
%load_ext tensorboard
%tensorboard --logdir=./my_logs --port=6006
run_logdir2 = get_run_logdir()
run_logdir2
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=0.05))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir2)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
```
Notice how TensorBoard now sees two runs, and you can compare the learning curves.
Check out the other available logging options:
```
help(keras.callbacks.TensorBoard.__init__)
```
# Hyperparameter Tuning
```
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=[8]):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation="relu"))
model.add(keras.layers.Dense(1))
optimizer = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
keras_reg.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
mse_test = keras_reg.score(X_test, y_test)
y_pred = keras_reg.predict(X_new)
np.random.seed(42)
tf.random.set_seed(42)
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_hidden": [0, 1, 2, 3],
"n_neurons": np.arange(1, 100),
"learning_rate": reciprocal(3e-4, 3e-2),
}
rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
rnd_search_cv.best_params_
rnd_search_cv.best_score_
rnd_search_cv.best_estimator_
rnd_search_cv.score(X_test, y_test)
model = rnd_search_cv.best_estimator_.model
model
model.evaluate(X_test, y_test)
```
# Exercise solutions
## 1. to 9.
See appendix A.
## 10.
*Exercise: Train a deep MLP on the MNIST dataset (you can load it using `keras.datasets.mnist.load_data()`. See if you can get over 98% precision. Try searching for the optimal learning rate by using the approach presented in this chapter (i.e., by growing the learning rate exponentially, plotting the loss, and finding the point where the loss shoots up). Try adding all the bells and whistles—save checkpoints, use early stopping, and plot learning curves using TensorBoard.*
Let's load the dataset:
```
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
```
Just like for the Fashion MNIST dataset, the MNIST training set contains 60,000 grayscale images, each 28x28 pixels:
```
X_train_full.shape
```
Each pixel intensity is also represented as a byte (0 to 255):
```
X_train_full.dtype
```
Let's split the full training set into a validation set and a (smaller) training set. We also scale the pixel intensities down to the 0-1 range and convert them to floats, by dividing by 255, just like we did for Fashion MNIST:
```
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.
```
Let's plot an image using Matplotlib's `imshow()` function, with a `'binary'`
color map:
```
plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()
```
The labels are the class IDs (represented as uint8), from 0 to 9. Conveniently, the class IDs correspond to the digits represented in the images, so we don't need a `class_names` array:
```
y_train
```
The validation set contains 5,000 images, and the test set contains 10,000 images:
```
X_valid.shape
X_test.shape
```
Let's take a look at a sample of the images in the dataset:
```
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(y_train[index], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()
```
Let's build a simple dense network and find the optimal learning rate. We will need a callback to grow the learning rate at each iteration. It will also record the learning rate and the loss at each iteration:
```
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
```
We will start with a small learning rate of 1e-3, and grow it by 0.5% at each iteration:
```
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
expon_lr = ExponentialLearningRate(factor=1.005)
```
Now let's train the model for just 1 epoch:
```
history = model.fit(X_train, y_train, epochs=1,
validation_data=(X_valid, y_valid),
callbacks=[expon_lr])
```
We can now plot the loss as a functionof the learning rate:
```
plt.plot(expon_lr.rates, expon_lr.losses)
plt.gca().set_xscale('log')
plt.hlines(min(expon_lr.losses), min(expon_lr.rates), max(expon_lr.rates))
plt.axis([min(expon_lr.rates), max(expon_lr.rates), 0, expon_lr.losses[0]])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
```
The loss starts shooting back up violently around 3e-1, so let's try using 2e-1 as our learning rate:
```
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=2e-1),
metrics=["accuracy"])
run_index = 1 # increment this at every run
run_logdir = os.path.join(os.curdir, "my_mnist_logs", "run_{:03d}".format(run_index))
run_logdir
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[early_stopping_cb, checkpoint_cb, tensorboard_cb])
model = keras.models.load_model("my_mnist_model.h5") # rollback to best model
model.evaluate(X_test, y_test)
```
We got over 98% accuracy. Finally, let's look at the learning curves using TensorBoard:
```
%tensorboard --logdir=./my_mnist_logs --port=6006
```
| github_jupyter |
# Iterators and Generators
In the section on loops we introduced the `range` function, and said that you should think about it as creating a list of numbers. In Python `2.X` this is exactly what it does. In Python `3.X` this is *not* what it does. Instead it creates the numbers one at a time. The difference in speed and memory usage is enormous for very large lists - examples are given [here](http://justindailey.blogspot.se/2011/09/python-range-vs-xrange.html) and [here](https://asmeurer.github.io/python3-presentation/slides.html#42).
We can recreate one of the examples from [Meuer's slides](https://asmeurer.github.io/python3-presentation/slides.html#44) in detail:
```
def naivesum_list(N):
"""
Naively sum the first N integers
"""
A = 0
for i in list(range(N + 1)):
A += i
return A
```
We will now see how much memory this uses:
```
%load_ext memory_profiler
%memit naivesum_list(10**4)
%memit naivesum_list(10**5)
%memit naivesum_list(10**6)
%memit naivesum_list(10**7)
%memit naivesum_list(10**8)
```
We see that the memory usage is growing very rapidly - as the list gets large it's growing as $N$.
Instead we can use the `range` function that yields one integer at a time:
```
def naivesum(N):
"""
Naively sum the first N integers
"""
A = 0
for i in range(N + 1):
A += i
return A
%memit naivesum(10**4)
%memit naivesum(10**5)
%memit naivesum(10**6)
%memit naivesum(10**7)
%memit naivesum(10**8)
```
We see that the *memory* usage is unchanged with $N$, making it practical to run much larger calculations.
## Iterators
The `range` function is returning an [*iterator*](https://docs.python.org/3/glossary.html#term-iterator) here. This is an object - a general thing - that represents a stream, or a sequence, of data. The iterator knows how to create the first element of the stream, and it knows how to get the next element. It does not, in general, need to know all of the elements at once.
As we've seen above this can save a lot of memory. It can also save time: the code does not need to construct all of the members of the sequence before starting, and it's quite possible you don't need all of them (think about the "Shortest published mathematical paper" exercise).
An iterator such as `range` is very useful, and there's a lot more useful ways to work with iterators in the `itertools` module. These functions that return iterators, such as `range`, are called [*generators*](https://docs.python.org/3/glossary.html#term-generator), and it's useful to be able to make your own.
## Making your own generators
Let's look at an example: finding all primes less than $N$ that can be written in the form $4 k - 1$, where $k$ is an integer.
We're going to need to calculate all prime numbers less than or equal to $N$. We could write a function that returns all these numbers as a list. However, if $N$ gets large then this will be expensive, both in time and memory. As we only need one number at a time, we can use a generator.
```
def all_primes(N):
"""
Return all primes less than or equal to N.
Parameters
----------
N : int
Maximum number
Returns
-------
prime : generator
Prime numbers
"""
primes = []
for n in range(2, N+1):
is_n_prime = True
for p in primes:
if n%p == 0:
is_n_prime = False
break
if is_n_prime:
primes.append(n)
yield n
```
This code needs careful examination. First it defines the list of all prime numbers that it currently knows, `primes` (which is initially empty). Then it loops through all integers $n$ from $2$ to $N$ (ignoring $1$ as we know it's not prime).
Inside this loop it initially assumes that $n$ is prime. It then checks if any of the known primes exactly divides $n$ (`n%p == 0` checks if $n \bmod p = 0$). As soon as it finds such a prime divisor it knows that $n$ is not prime it resets the assumption with this new knowledge, then `break`s out of the loop. This statement stops the `for p in primes` loop early, as we don't need to look at later primes.
If no known prime ever divides $n$ then at the end of the `for p in primes` loop we will still have `is_n_prime` being `True`. In this case we must have $n$ being prime, so we add it to the list of known primes and return it.
It is precisely this point which makes the code above define a generator. We return the value of the prime number found
1. using the `yield` keyword, not the `return` keyword, and
2. we return the value as soon as it is known.
It is the use of the `yield` keyword that makes this function a generator.
This means that only the latest prime number is stored for return.
To use the iterator within a loop, we code it in the same way as with the `range` function:
```
print("All prime numbers less than or equal to 20:")
for p in all_primes(20):
print(p)
```
To see what the generator is actually doing, we can step through it one call at a time using the built in `next` function:
```
a = all_primes(10)
next(a)
next(a)
next(a)
next(a)
next(a)
```
So, when the generator gets to the end of its iteration it raises an exception. As seen in previous sections, we could surround the `next` call with a `try` block to capture the `StopIteration` so that we can continue after it finishes. This is effectively what the `for` loop is doing.
We can now find all primes (less than or equal to 100, for example) that have the form $4 k - 1$ using
```
for p in all_primes(100):
if (1+p)%4 == 0:
print("The prime {} is 4 * {} - 1.".format(p, int((1+p)/4)))
```
## Exercise : twin primes
A *twin prime* is a pair $(p_1, p_2)$ such that both $p_1$ and $p_2$ are prime and $p_2 = p_1 + 2$.
### Exercise 1
Write a generator that returns twin primes. You can use the generators above, and may want to look at the [itertools](https://docs.python.org/3/library/itertools.html) module together with [its recipes](https://docs.python.org/3/library/itertools.html#itertools-recipes), particularly the `pairwise` recipe.
### Exercise 2
Find how many twin primes there are with $p_2 < 1000$.
### Exercise 3
Let $\pi_N$ be the number of twin primes such that $p_2 < N$. Plot how $\pi_N / N$ varies with $N$ for $N=2^k$ and $k = 4, 5, \dots 16$. (You should use a logarithmic scale where appropriate!)
## Exercise : a basis for the polynomials
In the section on classes we defined a `Monomial` class to represent a polynomial with leading coefficient $1$. As the $N+1$ monomials $1, x, x^2, \dots, x^N$ form a basis for the vector space of polynomials of order $N$, $\mathbb{P}^N$, we can use the `Monomial` class to return this basis.
### Exercise 1
Define a generator that will iterate through this basis of $\mathbb{P}^N$ and test it on $\mathbb{P}^3$.
### Exercise 2
An alternative basis is given by the monomials
$$ \begin{aligned} p_0(x) &= 1, \\ p_1(x) &= 1-x, \\ p_2(x) &= (1-x)(2-x), \\ \dots & \quad \dots, \\ p_N(x) &= \prod_{n=1}^N (n-x). \end{aligned} $$
Define a generator that will iterate through this basis of $\mathbb{P}^N$ and test it on $\mathbb{P}^4$.
### Exercise 3
Use these generators to write another generator that produces a basis of $\mathbb{P}^3 \times \mathbb{P}^4$.
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
#!pip install tensorflow keras gensim scikit-learn
import numpy as np
import tensorflow as tf
from keras.datasets import imdb
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from gensim.models import Word2Vec
from gensim.models import FastText
def load_data(vocab_size,max_len):
"""
Loads the keras imdb dataset
Args:
vocab_size = {int} the size of the vocabulary
max_len = {int} the maximum length of input considered for padding
Returns:
X_train = tokenized train data
X_test = tokenized test data
"""
INDEX_FROM = 3
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True)
(X_train,y_train),(X_test,y_test) = imdb.load_data(num_words = vocab_size,index_from = INDEX_FROM)
# restore np.load for future normal usage
np.load = np_load_old
print(len(X_train), len(X_test), len(y_train), len(y_test), "#####################################")
return X_train,X_test,y_train,y_test
def prepare_data_for_word_vectors_imdb(X_train):
"""
Prepares the input
Args:
X_train = tokenized train data
Returns:
sentences = {list} sentences containing words as tokens
word_index = {dict} word and its indexes in whole of imdb corpus
"""
INDEX_FROM = 3
word_to_index = imdb.get_word_index()
word_to_index = {k:(v+INDEX_FROM) for k,v in word_to_index.items()}
word_to_index["<START>"] =1
word_to_index["<UNK>"]=2
index_to_word = {v:k for k,v in word_to_index.items()}
sentences = []
for i in range(len(X_train)):
temp = [index_to_word[ids] for ids in X_train[i]]
sentences.append(temp)
"""
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
word_indexes = tokenizer.word_index
"""
#print(sentences[:10],word_to_index,"sentences[:10],word_to_index[:10]*********************************************")
return sentences,word_to_index
def building_word_vector_model(option,sentences,embed_dim,window):
"""
Builds the word vector
Args:
option = {bool} 0 for Word2vec. 1 for gensim Fastext. 2 for Fasttext 2018.
sentences = {list} list of tokenized words
embed_dim = {int} embedding dimension of the word vectors
window = {int} max distance between current and predicted word
Returns:
model = Word2vec/Gensim fastText/ Fastext_2018 model trained on the training corpus
"""
if option == 0:
print("Training a word2vec model")
model = Word2Vec(sentences=sentences, size = embed_dim, window = window)
print("Training complete")
elif option == 1:
print("Training a Gensim FastText model")
model = FastText(sentences=sentences, size = embed_dim, window = window) # workers = workers,
print("Training complete")
return model
# specify “option” as 0 – Word2vec, 1 – FastText
option = 1
embed_dim = 300
max_len= 200
vocab_size= 1000
window = 1
x_train,x_test,y_train,y_test = load_data(vocab_size,max_len)
sentences,word_ix = prepare_data_for_word_vectors_imdb(x_train)
model_wv = building_word_vector_model(option,sentences,embed_dim, window)
x_train.shape, y_train.shape, len(x_train[0]), y_train[0], x_train[0]
def padding_input(X_train,X_test,maxlen):
"""
Pads the input upto considered max length
Args:
X_train = tokenized train data
X_test = tokenized test data
Returns:
X_train_pad = padded tokenized train data
X_test_pad = padded tokenized test data
"""
print(X_train.shape, X_test.shape, "before padding")
X_train_pad = pad_sequences(X_train,maxlen=maxlen,padding="post")
X_test_pad = pad_sequences(X_test,maxlen=maxlen,padding="post")
print(X_train_pad.shape, X_test_pad.shape, "after padding")
return X_train_pad,X_test_pad
x_train_pad,x_test_pad = padding_input(x_train,x_test,max_len)
embedding_matrix = np.zeros((vocab_size,embed_dim))
for word, i in word_ix.items():
try:
embedding_vector = w2vmodel[word]
except:
pass
try:
if embedding_vector is not None:
embedding_matrix[i]=embedding_vector
except:
pass
print(embedding_matrix.shape ,"embedding_matrix")
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import Flatten
from keras.initializers import Constant
print('Training model.')
# define the model
model = Sequential()
model.add(Embedding(vocab_size,
embed_dim,
embeddings_initializer=Constant(embedding_matrix),
input_length=max_len,
trainable=False))
model.add(Flatten())
model.add(Dense(512, activation='tanh'))
model.add(Dense(256, activation='tanh'))
model.add(Dense(128, activation='tanh'))
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
# summarize the model
print(model.summary())
model.fit(x_train_pad,y_train,
batch_size=2048,
epochs=1,
validation_data=(x_test_pad,y_test))
# loss, accuracy = model.evaluate(x_test, y_test, verbose=0)
# print('Accuracy: %f' % (accuracy))
# print('Loss: %f' % (loss))
model.predict(x_test_pad[6].reshape(1, x_test_pad.shape[1]))
y_test[6]
```
| github_jupyter |
# Python syntax cheat sheet
This is a quick overview and reference for many of the fundamentals that we might not have time to stop and go through in detail:
- [Basic data types](#Basic-data-types)
- [Strings](#Strings)
- [Numbers](#Numbers)
- [Booleans](#Booleans)
- [Variable assignment](#Variable-assignment)
- [String methods](#String-methods)
- [Comments](#Comments)
- [The print() function](#The-print()-function)
- [Doing math in Python](#Doing-math-in-Python)
- [Collections of data](#Collections-of-data)
- [Lists](#Lists)
- [Dictionaries](#Dictionaries)
- [`for` loops](#for-loops)
- [`if` statements](#if-statements)
- [List comprehensions](#List-comprehensions)
- [Dealing with errors](#Dealing-with-errors)
### Basic data types
Just like Excel and other data processing software, Python recognizes a variety of data types, including three we'll focus on here:
- Strings (text)
- Numbers (integers, numbers with decimals and more)
- Booleans (`True` and `False`).
You can use the [`type()`](https://docs.python.org/3/library/functions.html#type) function to check the data type of a value.
```
type(5)
type('Hello friends')
type(True)
```
#### Strings
A string is a group of characters -- letters, numbers, whatever -- enclosed within single or double quotes (doesn't matter as long as they match). The code in these notebooks uses single quotes. (The Python style guide doesn't recommend one over the other: ["Pick a rule and stick to it."](https://www.python.org/dev/peps/pep-0008/#string-quotes))
If your string _contains_ apostrophes or quotes, you have two options: _Escape_ the offending character with a forward slash `\`:
```python
'Isn\'t it nice here?'
```
... or change the surrounding punctuation:
```python
"Isn't it nice here?"
```
The style guide recommends the latter over the former.
When you check the `type()` of a string, Python will return `str`.
Calling [`str()`](https://docs.python.org/3/library/stdtypes.html#str) on a value will return the string version of that value (see example below).
```
'Investigative Reporters and Editors'
type('hello!')
str(45)
str(True)
```
If you "add" strings together with a plus sign `+`, it will concatenate them:
```
'IRE' + '/' + 'NICAR'
```
#### Numbers
Python recognizes a variety of numeric data types. Two of the most common are integers (whole numbers) and floats (numbers with decimals).
Calling `int()` on a piece of numeric data will attempt to coerce it to an integer; calling `float()` will try to convert it to a float.
```
12
12.4
type(12)
type(12.4)
int(35.6)
float(46)
```
#### Booleans
Just like in Excel, which has `TRUE` and `FALSE` data types, Python has boolean data types. They are `True` and `False` -- note that only the first letter is capitalized and the word is not surrounded by quotes or apostrophes.
Boolean values are typically returned when you're evaluating a logical statement.
```
True
False
4 > 6
'ell' in 'Hello'
type(True)
```
### Variable assignment
The `=` sign assigns a value to a variable name that you choose. Later, you can retrieve that value by referencing its variable name. Variable names can be pretty much anything you want ([as long as you follow some basic rules](https://thehelloworldprogram.com/python/python-variable-assignment-statements-rules-conventions-naming/)).
In a Jupyter notebook, any value assigned to a variable will be available once you _run_ the cell. Otherwise it won't be available.
This can be a tricky concept at first! For more detail, [here's a pretty good explainer from Digital Ocean](https://www.digitalocean.com/community/tutorials/how-to-use-variables-in-python-3).
```
my_name = 'Cody'
my_name
```
You can also _reassign_ a different value to a variable, though it's better practice to create a new variable.
```
my_name = 'Jacob'
my_name
```
For reference, here's a list of "Python keywords" that you should not use as variable names:
```
import keyword
print(keyword.kwlist)
```
### String methods
Let's go back to strings for a second. String objects have a number of useful [methods](https://docs.python.org/3/library/stdtypes.html#string-methods) -- let's use an example string to demonstrate a few common ones.
```
my_cool_string = ' Hello, Newport Beach!'
```
`upper()` converts the string to uppercase:
```
my_cool_string.upper()
```
`lower()` converts to lowercase:
```
my_cool_string.lower()
```
`replace()` will replace a piece of text with other text that you specify:
```
my_cool_string.replace('Newport', 'Long')
```
`count()` will count the number of occurrences of a character or group of characters:
```
my_cool_string.count('H')
```
Note that `count()` is case-sensitive. If your task is "count all the h's," convert your original string to upper or lowercase first:
```
my_cool_string.upper().count('H')
```
[`split()`](https://docs.python.org/3/library/stdtypes.html#str.split) will split the string into a [_list_](#Lists) (more on these in a second) on a given delimiter (if you don't specify a delimiter, it'll default to splitting on a space):
```
my_cool_string.split()
my_cool_string.split(',')
my_cool_string.split('Pitt')
```
`strip()` removes whitespace from either side of your string (but not internal whitespace):
```
my_cool_string.strip()
```
You can use a cool thing called "method chaining" to combine methods -- just tack 'em onto the end. Let's say we wanted to strip whitespace from our string _and_ make it uppercase:
```
my_cool_string.strip().upper()
```
Notice, however, that our original string is unchanged:
```
my_cool_string
```
Why? Because we haven't assigned the results of anything we've done to a variable. A common thing to do, especially when you're cleaning data, would be to assign the results to a new variable:
```
my_cool_string_clean = my_cool_string.strip().upper()
my_cool_string_clean
```
### Comments
A line with a comment -- a note that you don't want Python to interpret -- starts with a `#` sign. These are notes to collaborators and to your future self about what's happening at this point in your script, and why.
Typically you'd put this on the line right above the line of code you're commenting on:
```
# coercing this to an int because we don't need any decimal precision
avg_settlement = 40827348.34328237
int(avg_settlement)
```
Multi-line comments are sandwiched between triple quotes (or triple apostrophes):
`'''
this
is a long
comment
'''`
or
`"""
this
is a long
comment
"""`
Here's a real-live comment I used in a script:
```
'''
Given a price, a base year index and the current year index, this will return the adjusted value
See: https://www.bls.gov/cpi/factsheets/cpi-math-calculations.pdf#page=2
Ctrl+F for "constant dollars"
'''
```
### The `print()` function
So far, we've just been running the notebook cells to get the last value returned by the code we write. Using the [`print()`](https://docs.python.org/3/library/functions.html#print) function is a way to print specific things in your script to the screen.
To print multiple things on the same line, separate them with a comma.
```
print('Hello!')
print(my_name)
print('Hello,', my_name)
```
### Doing math in Python
You can do [basic math](https://www.digitalocean.com/community/tutorials/how-to-do-math-in-python-3-with-operators) in Python. You can also do [more advanced math](https://docs.python.org/3/library/math.html).
```
4+2
10-9
5*10
1000/10
# ** raises a number to the power of another number
5**2
# % returns the remainder of a division problem
100 % 8
# divmod() returns the quotient ~and~ the remainder
divmod(100, 8)
```
## Collections of data
Now we're going to talk about two ways you can use Python to group data into a collection: lists and dictionaries.
### Lists
A _list_ is a comma-separated list of items inside square brackets: `[]`.
Here's a list of ingredients, each one a string, that together makes up a salsa recipe.
```
salsa_ingredients = ['tomato', 'onion', 'jalapeño', 'lime', 'cilantro']
```
To get an item out of a list, you'd refer to its numerical position in the list -- its _index_ (1, 2, 3, etc.) -- inside square brackets immediately following your reference to that list. In Python, as in many other programming languages, counting starts at 0. That means the first item in a list is item `0`.
```
salsa_ingredients[0]
salsa_ingredients[1]
```
You can use _negative indexing_ to grab things from the right-hand side of the list -- and in fact, `[-1]` is a common idiom for getting "the last item in a list" when it's not clear how many items are in your list.
```
salsa_ingredients[-1]
```
If you wanted to get a slice of multiple items out of your list, you'd use colons (just like in Excel, kind of!).
If you wanted to get the first three items, you'd do this:
```
salsa_ingredients[0:3]
```
You could also have left off the initial 0 -- when you leave out the first number, Python defaults to "the first item in the list." In the same way, if you leave off the last number, Python defaults to "the last item in the list."
```
salsa_ingredients[:3]
```
Note, too, that this slice is giving us items 0, 1 and 2. The `3` in our slice is the first item we _don't_ want. That can be kind of confusing at first. Let's try a few more:
```
# everything in the list except the first item
salsa_ingredients[1:]
# the second, third and fourth items
salsa_ingredients[1:4]
# the last two items
salsa_ingredients[-2:]
```
To see how many items are in a list, use the `len()` function:
```
len(salsa_ingredients)
```
To add an item to a list, use the [`append()`](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists) method:
```
salsa_ingredients
salsa_ingredients.append('mayonnaise')
salsa_ingredients
```
Haha _gross_. To remove an item from a list, use the `pop()` method. If you don't specify the index number of the item you want to pop out, it will default to "the last item."
```
salsa_ingredients.pop()
salsa_ingredients
```
You can use the [`in` and `not in`](https://docs.python.org/3/reference/expressions.html#membership-test-operations) expressions, among others, to test membership in a list (they'll return a boolean):
```
'lime' in salsa_ingredients
'cilantro' not in salsa_ingredients
```
### Dictionaries
A _dictionary_ is a comma-separated list of key/value pairs inside curly brackets: `{}`. Let's make an entire salsa recipe:
```
salsa = {
'ingredients': salsa_ingredients,
'instructions': 'Chop up all the ingredients and cook them for awhile.',
'oz_made': 12
}
```
To retrieve a value from a dictionary, you'd refer to the name of its key inside square brackets `[]` immediately after your reference to the dictionary:
```
salsa['oz_made']
salsa['ingredients']
```
You can also use the `get()` method to retrieve an item from a dictionary. The benefit of using `get()` instead of square brackets: Your script won't throw an error if the key doesn't exist, and this is sometimes what you want. Instead, it will return `None`.
```
salsa.get('instructions')
print(salsa['cooking_duration'])
print(salsa.get('cooking_duration'))
```
To add a new key/value pair to a dictionary, assign a new key to the dictionary inside square brackets and set the value of that key with `=`:
```
salsa['tastes_great'] = True
salsa
```
To delete a key/value pair out of a dictionary, use the `del` command and reference the key:
```
del salsa['tastes_great']
salsa
```
### Indentation
Whitespace matters in Python. Sometimes you'll need to indent bits of code to make things work. This can be confusing! `IndentationError`s are common even for experienced programmers. (FWIW, Jupyter will try to be helpful and insert the correct amount of "significant whitespace" for you.)
You can use tabs or spaces, just don't mix them. [The Python style guide](https://www.python.org/dev/peps/pep-0008/) recommends indenting your code in groups of four spaces, so that's what we'll use.
### `for` loops
You would use a `for` loop to iterate over a collection of things. The statement begins with the keyword `for` (lowercase), then a temporary `variable_name` of your choice to represent the items in the thing you're looping over, then the Python keyword `in`, then the collection you're looping over (or its variable name), then a colon, then the indented block of code with instructions about what to do with each item in the collection.
Let's say we have a list of numbers, `ls`.
```
ls = [1, 2, 3, 4, 5, 6]
```
We could loop over the list and print out each number:
```
for number in ls:
print(number)
```
We could print out each number _times 6_:
```
for number in ls:
print(number*6)
```
... whatever you need to do in you loop. Note that the variable name `number` in our loop is totally arbitrary. This also would work:
```
for banana in ls:
print(banana)
```
It can be hard, at first, to figure out what's a "Python word" and what's a variable name that you get to define. This comes with practice.
Strings are iterable, too. Let's loop over the letters in a sentence:
```
sentence = 'We are in Newport Beaaaaaaaach!'
for letter in sentence:
print(letter)
```
To this point: Because strings are iterable, like lists, you can use the same kinds of methods:
```
# get the first five characters
sentence[:5]
# get the length of the sentence
len(sentence)
'Newport' in sentence
```
You can iterate over dictionaries, too -- just remember that dictionaries _don't keep track of the order that items were added to it_.
When you're looping over a dictionary, the variable name in your `for` loop will refer to the keys. Let's loop over our `salsa` dictionary from up above to see what I mean.
```
for key in salsa:
print(key)
```
To get the _value_ of a dictionary item in a for loop, you'd need to use the key to retrieve it from the dictionary:
```
for key in salsa:
print(key, '=>', salsa[key])
```
### `if` statements
Just like in Excel, you can use the "if" keyword to handle conditional logic.
These statements begin with the keyword `if` (lowercase), then the condition to evaluate, then a colon, then a new line with a block of indented code to execute if the condition resolves to `True`.
```
if 4 < 6:
print('4 is less than 6')
```
You can also add an `else` statement (and a colon) with an indented block of code you want to run if the condition resolves to `False`.
```
if 4 > 6:
print('4 is greater than 6?!')
else:
print('4 is not greater than 6.')
```
If you need to, you can add multiple conditions with `elif`.
```
HOME_SCORE = 6
AWAY_SCORE = 8
if HOME_SCORE > AWAY_SCORE:
print('we won!')
elif HOME_SCORE == AWAY_SCORE:
print('we tied!')
else:
print('we lost!')
```
### List comprehensions
Sometimes, you want to _do something_ to a list of data but you need to save the results of your operation under a new variable. A common scenario would be filtering a list or transforming the items somehow.
A list comprehension happens inside square brackets and includes the keywords `for` and `in`. It also has placeholder variable names (that you define) to stand in for each item in your list.
```python
[item for item in your_list]
```
Let's say you want to strip whitespace and upcase every item in your list.
```
my_gross_list = [' McDonalds Corp.', 'ARBYS ', ' wendys', ' tHe KrUsTy KrAb ']
```
You could do someting like this:
```
my_clean_list = [x.upper().strip() for x in my_gross_list]
my_clean_list
```
You can also add _conditions_ to your list comprehensions. Let's say we want to keep only list items that end with 'S' (using a string method called [`endswith()`](https://docs.python.org/3/library/stdtypes.html#str.endswith)):
```
ends_with_s = [x for x in my_clean_list if x.endswith('S')]
ends_with_s
```
### Dealing with errors
Run the code in the following cell:
```
print(salsa_ingredients[0])
print(salsa_ingredients[-1])
print(salsa_ingredients[100])
```
Hooray! Our first error (maybe). Errors are extremely common, happen to literally every person who writes code and is not evidence that you are dumb or that this kind of work isn't for you or whatever other terrible thing you tell yourself when errors pop up.
They can be frustrating, though! There is a strategy for solving them, though. Let's see if we can figure this one out.
First thing: Read error messages (called "tracebacks") from the bottom up. We're getting something called an `IndexError`, and it's saying "list index out of range."
Moving upward: The error message points to the offending line of code: 3.
Maybe, from here, we can figure out the error. (Answer: We don't have 100 items in our list.) If not, I would Google the exact text of the error on the first line we read, and maybe the word "python": ["IndexError: list index out of range" python](https://www.google.com/search?q=%22IndexError%3A+list+index+out+of+range%22+python). You'll get _very_ acquainted with StackOverflow.
| github_jupyter |
## Comparing Image Data Structures
```
# OpenCV supports reading of images in most file formats, such as JPEG, PNG, and TIFF. Most image and
# video analysis requires converting images into grayscale first. This simplifies the image and reduces
# noise allowing for improved analysis. Let's write some code that reads an image of as person, Floyd
# Mayweather and converts it into greyscale.
# First we will import the open cv package cv2
import cv2 as cv
# We'll load the floyd.jpg image
img = cv.imread('readonly/floyd.jpg')
# And we'll convert it to grayscale using the cvtColor image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Now, before we get to the result, lets talk about docs. Just like tesseract, opencv is an external
# package written in C++, and the docs for python are really poor. This is unfortunatly quite common
# when python is being used as a wrapper. Thankfully, the web docs for opencv are actually pretty good,
# so hit the website docs.opencv.org when you want to learn more about a particular function. In this
# case cvtColor converts from one color space to another, and we are convering our image to grayscale.
# Of course, we already know at least two different ways of doing this, using binarization and PIL
# color spaces conversions
# Lets instpec this object that has been returned.
import inspect
inspect.getmro(type(gray))
# We see that it is of type ndarray, which is a fundamental list type coming from the numerical
# python project. That's a bit surprising - up until this point we have been used to working with
# PIL.Image objects. OpenCV, however, wants to represent an image as a two dimensional sequence
# of bytes, and the ndarray, which stands for n dimensional array, is the ideal way to do this.
# Lets look at the array contents.
gray
# The array is shown here as a list of lists, where the inner lists are filled with integers.
# The dtype=uint8 definition indicates that each of the items in an array is an 8 bit unsigned
# integer, which is very common for black and white images. So this is a pixel by pixel definition
# of the image.
#
# The display package, however, doesn't know what to do with this image. So lets convert it
# into a PIL object to render it in the browser.
from PIL import Image
# PIL can take an array of data with a given color format and convert this into a PIL object.
# This is perfect for our situation, as the PIL color mode, "L" is just an array of luminance
# values in unsigned integers
image = Image.fromarray(gray, "L")
display(image)
# Lets talk a bit more about images for a moment. Numpy arrays are multidimensional. For
# instance, we can define an array in a single dimension:
import numpy as np
single_dim = np.array([25, 50 , 25, 10, 10])
# In an image, this is analagous to a single row of 5 pixels each in grayscale. But actually,
# all imaging libraries tend to expect at least two dimensions, a width and a height, and to
# show a matrix. So if we put the single_dim inside of another array, this would be a two
# dimensional array with element in the height direction, and five in the width direction
double_dim = np.array([single_dim])
double_dim
# This should look pretty familiar, it's a lot like a list of lists! Lets see what this new
# two dimensional array looks like if we display it
display(Image.fromarray(double_dim, "L"))
# Pretty unexciting - it's just a little line. Five pixels in a row to be exact, of different
# levels of black. The numpy library has a nice attribute called shape that allows us to see how
# many dimensions big an array is. The shape attribute returns a tuple that shows the height of
# the image, by the width of the image
double_dim.shape
# Lets take a look at the shape of our initial image which we loaded into the img variable
img.shape
# This image has three dimensions! That's because it has a width, a height, and what's called
# a color depth. In this case, the color is represented as an array of three values. Lets take a
# look at the color of the first pixel
first_pixel=img[0][0]
first_pixel
# Here we see that the color value is provided in full RGB using an unsigned integer. This
# means that each color can have one of 256 values, and the total number of unique colors
# that can be represented by this data is 256 * 256 *256 which is roughly 16 million colors.
# We call this 24 bit color, which is 8+8+8.
#
# If you find yourself shopping for a television, you might notice that some expensive models
# are advertised as having 10 bit or even 12 bit panels. These are televisions where each of
# the red, green, and blue color channels are represented by 10 or 12 bits instead of 8. For
# ten bit panels this means that there are 1 billion colors capable, and 12 bit panels are
# capable of over 68 billion colors!
# We're not going to talk much more about color in this course, but it's a fun subject. Instead,
# lets go back to this array representation of images, because we can do some interesting things
# with this.
#
# One of the most common things to do with an ndarray is to reshape it -- to change the number
# of rows and columns that are represented so that we can do different kinds of operations.
# Here is our original two dimensional image
print("Original image")
print(gray)
# If we wanted to represent that as a one dimensional image, we just call reshape
print("New image")
# And reshape takes the image as the first parameter, and a new shape as the second
image1d=np.reshape(gray,(1,gray.shape[0]*gray.shape[1]))
print(image1d)
# So, why are we talking about these nested arrays of bytes, we were supposed to be talking
# about OpenCV as a library. Well, I wanted to show you that often libraries working on the
# same kind of principles, in this case images stored as arrays of bytes, are not representing
# data in the same way in their APIs. But, by exploring a bit you can learn how the internal
# representation of data is stored, and build routines to convert between formats.
#
# For instance, remember in the last lecture when we wanted to look for gaps in an image so
# that we could draw lines to feed into kraken? Well, we use PIL to do this, using getpixel()
# to look at individual pixels and see what the luminosity was, then ImageDraw.rectangle to
# actually fill in a black bar separator. This was a nice high level API, and let us write
# routines to do the work we wanted without having to understand too much about how the images
# were being stored. But it was computationally very slow.
#
# Instead, we could write the code to do this using matrix features within numpy. Lets take
# a look.
import cv2 as cv
# We'll load the 2 column image
img = cv.imread('readonly/two_col.png')
# And we'll convert it to grayscale using the cvtColor image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Now, remember how slicing on a list works, if you have a list of number such as
# a=[0,1,2,3,4,5] then a[2:4] will return the sublist of numbers at position 2 through 4
# inclusive - don't forget that lists start indexing at 0!
# If we have a two dimensional array, we can slice out a smaller piece of that using the
# format a[2:4,1:3]. You can think of this as first slicing along the rows dimension, then
# in the columns dimension. So in this example, that would be a matrix of rows 2, and 3,
# and columns 1, and 2. Here's a look at our image.
gray[2:4,1:3]
# So we see that it is all white. We can use this as a "window" and move it around our
# our big image.
#
# Finally, the ndarray library has lots of matrix functions which are generally very fast
# to run. One that we want to consider in this case is count_nonzero(), which just returns
# the number of entries in the matrix which are not zero.
np.count_nonzero(gray[2:4,1:3])
# Ok, the last benefit of going to this low level approach to images is that we can change
# pixels very fast as well. Previously we were drawing rectangles and setting a fill and line
# width. This is nice if you want to do something like change the color of the fill from the
# line, or draw complex shapes. But we really just want a line here. That's really easy to
# do - we just want to change a number of luminosity values from 255 to 0.
#
# As an example, lets create a big white matrix
white_matrix=np.full((12,12),255,dtype=np.uint8)
display(Image.fromarray(white_matrix,"L"))
white_matrix
# looks pretty boring, it's just a giant white square we can't see. But if we want, we can
# easily color a column to be black
white_matrix[:,6]=np.full((1,12),0,dtype=np.uint8)
display(Image.fromarray(white_matrix,"L"))
white_matrix
# And that's exactly what we wanted to do. So, why do it this way, when it seems so much
# more low level? Really, the answer is speed. This paradigm of using matricies to store
# and manipulate bytes of data for images is much closer to how low level API and hardware
# developers think about storing files and bytes in memory.
#
# How much faster is it? Well, that's up to you to discover; there's an optional assignment
# for this week to convert our old code over into this new format, to compare both the
# readability and speed of the two different approaches.
```
| github_jupyter |
# Object oriented programming
## Using an object
Below is the definition of an object. Run the cell and create at least two instances of it.
```
class Car:
def __init__(self, make, model, year, mpg=25, tank_capacity=30.0, miles=0):
self.make = make
self.model = model
self.year = year
self.mpg = mpg
self.gallons_in_tank = tank_capacity # cars start with a full tank
self.tank_capacity = tank_capacity
self.miles = miles
def __str__(self):
return "{} {} ({}), {} miles and {} gallons in tank".format(self.make,
self.model,
self.year,
self.miles,
self.gallons_in_tank)
def drive(self, new_miles):
"""Drive the car X miles and return number of miles driven.
If there is not enough fuel, drive 0 miles."""
fuel_need = new_miles/self.mpg
if fuel_need <= self.gallons_in_tank:
self.miles = self.miles + new_miles
self.gallons_in_tank = self.gallons_in_tank - fuel_need
return new_miles
else:
return 0
```
## Simple modification to class
OK, our car has a major problem: it can't be filled up.
Add a method called `fill_up()` to your class. It is up to you if you want to enable filling by an arbitary number or only back to the full state.
If you allow arbitary amounts of liquid remember to consider overfilling the tank.
Once you edit your class, the old objects do not automatically adopt to the changes you made. You will need to re-create them.
## Exceptions
Now make a modification to the `drive`-method: if an attempt is made to drive more than the gas will allow, create and raise an exception.
Instead of creating your own exception you may use a [ValueError](https://docs.python.org/3/library/exceptions.html#ValueError) for this case, as it is a logical choice.
Then add a try-except clause to the following:
```
suv = Car("Ford", "Escape", 2017, mpg=18, tank_capacity=30)
suv.drive(600)
```
## Bonus exercises
### Magic methods
Create a class called element for storing the following data
* name
* symbol
* atomic number
* molecular weight
You can use the following data for creating instances of a few elements:
| Element | symbol | atomic number | molecular weight |
|----------|--------|---------------|---------------|
| Hydrogen | H | 1 | 1.01 |
| Iron | Fe | 26 | 55.85 |
| Silver | Ag | 47 | 107.87 |
Next, we would like to be able to sort elements according to their atomic number. In order to do this, let's implement magic methods ``__lt__`` and ``__eq__`` as described [here](https://docs.python.org/3.5/reference/datamodel.html#object.__lt__).
Once finished, store few instances of a elements in a list, and try to sort it using list's ``sort`` method.
| github_jupyter |
```
#Import the math function for calculations
import math
import tensorflow as tf
import numpy as np
#Class that defines the behavior of the RBM
class RBM(object):
def __init__(self, input_size, output_size, lr=1.0, batchsize=100):
"""
m: Number of neurons in visible layer
n: number of neurons in hidden layer
"""
#Defining the hyperparameters
self._input_size = input_size #Size of Visible
self._output_size = output_size #Size of outp
self.learning_rate = lr #The step used in gradient descent
self.batchsize = batchsize #The size of how much data will be used for training per sub iteration
#Initializing weights and biases as matrices full of zeroes
self.w = tf.zeros([input_size, output_size], tf.float32) #Creates and initializes the weights with 0
self.hb = tf.zeros([output_size], tf.float32) #Creates and initializes the hidden biases with 0
self.vb = tf.zeros([input_size], tf.float32) #Creates and initializes the visible biases with 0
#Fits the result from the weighted visible layer plus the bias into a sigmoid curve
def prob_h_given_v(self, visible, w, hb):
#Sigmoid
return tf.nn.sigmoid(tf.matmul(visible, w) + hb)
#Fits the result from the weighted hidden layer plus the bias into a sigmoid curve
def prob_v_given_h(self, hidden, w, vb):
return tf.nn.sigmoid(tf.matmul(hidden, tf.transpose(w)) + vb)
#Generate the sample probability
def sample_prob(self, probs):
return tf.nn.relu(tf.sign(probs - tf.random.uniform(tf.shape(probs))))
#Training method for the model
def train(self, X, epochs=5):
loss = []
for epoch in range(epochs):
#For each step/batch
for start, end in zip(range(0, len(X), self.batchsize),range(self.batchsize,len(X), self.batchsize)):
batch = X[start:end]
#Initialize with sample probabilities
h0 = self.sample_prob(self.prob_h_given_v(batch, self.w, self.hb))
v1 = self.sample_prob(self.prob_v_given_h(h0, self.w, self.vb))
h1 = self.prob_h_given_v(v1, self.w, self.hb)
#Create the Gradients
positive_grad = tf.matmul(tf.transpose(batch), h0)
negative_grad = tf.matmul(tf.transpose(v1), h1)
#Update learning rates
self.w = self.w + self.learning_rate *(positive_grad - negative_grad) / tf.dtypes.cast(tf.shape(batch)[0],tf.float32)
self.vb = self.vb + self.learning_rate * tf.reduce_mean(batch - v1, 0)
self.hb = self.hb + self.learning_rate * tf.reduce_mean(h0 - h1, 0)
#Find the error rate
err = tf.reduce_mean(tf.square(batch - v1))
print ('Epoch: %d' % epoch,'reconstruction error: %f' % err)
loss.append(err)
return loss
#Create expected output for our DBN
def rbm_output(self, X):
out = tf.nn.sigmoid(tf.matmul(X, self.w) + self.hb)
return out
def rbm_reconstruct(self,X):
h = tf.nn.sigmoid(tf.matmul(X, self.w) + self.hb)
reconstruct = tf.nn.sigmoid(tf.matmul(h, tf.transpose(self.w)) + self.vb)
return reconstruct
RESHAPED = 784
NB_CLASSES = 10 # number of outputs = number of digits
(train_data, Y_train), (test_data, Y_test) = tf.keras.datasets.mnist.load_data()
train_data = train_data/np.float32(255)
train_data = np.reshape(train_data, (train_data.shape[0], RESHAPED))
Y_train = tf.keras.utils.to_categorical(Y_train, NB_CLASSES)
Y_test = tf.keras.utils.to_categorical(Y_test, NB_CLASSES)
test_data = test_data/np.float32(255)
test_data = np.reshape(test_data, (test_data.shape[0], RESHAPED))
RBM_hidden_sizes = [500, 200 , 50 ] #create 2 layers of RBM with size 400 and 100
#Since we are training, set input as training data
inpX = train_data
#Create list to hold our RBMs
rbm_list = []
#Size of inputs is the number of inputs in the training set
input_size = train_data.shape[1]
#For each RBM we want to generate
for i, size in enumerate(RBM_hidden_sizes):
print ('RBM: ',i,' ',input_size,'->', size)
rbm_list.append(RBM(input_size, size))
input_size = size
#For each RBM in our list
for rbm in rbm_list:
print ('New RBM:')
#Train a new one
rbm.train(tf.cast(inpX,tf.float32))
#Return the output layer
inpX = rbm.rbm_output(inpX)
```
| github_jupyter |
# Кратчайшие пути
Существуют вариации в зависимости от конкретного случая, но обычно базовой задачей о кратчайших путях считают следующую: дана вершина $s$, найти пути миниальной длины (длина пути -- сумма длин образующих его ребер) до всех остальных вершин. В случае, если длины всех ребер одинаковы, то эта задача решается с помощью базового обхода в ширину, в общем случае его недостаточно, одна есть похожий по простоте метод, на котором основано подавляющее большинство методов нахождения кратчайших путей.
### Обозначения
\begin{align}
u\rightarrow v~-~& ребро~из~u~в~v \\
u\rightsquigarrow v~-~& путь~из~u~в~v \\
u\rightsquigarrow z\rightsquigarrow v~-~& путь~из~u~в~v,~проходящий~через~z \\
u\rightsquigarrow z\rightarrow v~-~& путь~из~u~в~v,~в~котором~последнее~ребро~начинается~в~z \\
u\rightarrow z\rightsquigarrow v~-~& путь~из~u~в~v,~в~котором~первое~ребро~ведет~в~z \\
u_i\rightsquigarrow_{i=1}^{n-1} u_{i+1}=u_1\rightarrow\ldots\rightarrow u_n ~-~& путь,~состоящий~из~ребер~u_1\rightarrow u_2,~u_2\rightarrow u_3,\ldots,~u_{n-1}\rightarrow~u_n. \\
\omega(u\rightarrow v)~-~ & длина~ребра~u\rightarrow v. \\
\omega(u_i\rightsquigarrow_{i=1}^{n-1} u_{i+1})=\sum_{i=1}^{n-1}\omega(u_i\rightarrow u_{i+1})~-~& длина~пути, состоящего~из~ребер~u_1\rightarrow u_2,~u_2\rightarrow u_3,\ldots,~u_{n-1}\rightarrow~u_n. \\
d(u, v)~-~& минимальная~длина~пути~из~u~в~v.
\end{align}
Так как путей из одной вершины в другую может быть много, то обозначение $u\rightsquigarrow v$ предполагает какой-то путь. Если нужно будет уточнить промежуточные вершины в этом пути, то будут использованы правила конкатенации, указанные выше. Длина пути определяется как сумма длин ребер и, соответственно, используется только для путей с явным указанием последовательности ребер.
## Дерево кратчайших путей и динамическое программирование
У кратчайших путей есть одно чрезвычайно важное свойство: если $u_1\rightarrow\ldots\rightarrow u_n$ -- кратчайший путь из $u_1$ в $u_n$, то для всех $1\leq i<j\leq n$: $u_i\rightarrow\ldots\rightarrow u_j$ -- кратчайший путь из $u_i$ в $u_j$, иначе говоря любой кусок оптимального пути также является оптимальным, это свойство обычно называют оптимальной подструктурой. Для решений задач с подобным свойством оказался очень успешным метод динамического программирования, который в целом можно описать следующими принципами:
* Вместе с $d(u, v)$ вычисляем еще какие-то вспомагательные величины, также обозначающие кратчайшие пути, но обычно дополнительно чем-то ограниченые.
* Последовательно вычислять вспомагательные величины начиная с коротких путей постепенно переходя к более длинным.
Подробней уже в конкретных алгоритмах.
Теперь предположим, что мы каким-то образом научились вычислять $d$ для одной пары вершин или нескольких, обычно нам не только интересно само расстояние, но и сам путь. Возникает следующая проблема: $d(u, v)$ -- это одно число, а вот путь -- это уже последовательность. Вопрос в том, можно компактно представить себе этот набор оптимальных путей? В случае, если нас интересуют оптимальные пути от одной вершины до всех остальных, то это удается сделать довольно изящно, очень сильно помагает оптимальная подструктура: если
$$
d(u_1, u_n)=\omega(u_1\rightarrow\ldots\rightarrow u_n),
$$
то
\begin{align}
d(u_1, u_n)=\omega(u_1\rightarrow\ldots\rightarrow u_n)=\omega(u_1\rightarrow\ldots\rightarrow u_{n-1})+\omega(u_{n-1}\rightarrow u_n)=d(u_1, u_{n-1})+\omega(u_{n-1}\rightarrow u_n)
\end{align}
По сути мы просто выделили последнее ребро на оптимальном пути. Предположим, что мы как-то умеем выделять оптимальный путь $u_1\rightsquigarrow u_{n-1}$, тогда чтобы выделить оптимальный путь $u_1\rightsquigarrow u_{n}$, то достаточно запомнить последнее ребро на этом оптимальном пути $u_{n-1}\rightarrow u_n$ и дописать его в конец оптимального пути $u_1\rightsquigarrow u_{n-1}$. Пример этой концепции покажу чуть позже
## Сканирующий метод
Базовый метод для нахождения кратчайших путей заключается в следующем:
\begin{align}
&scan\_arc(v\rightarrow u): \\
&~ ~ ~ ~if (l(v) + \omega(v\rightarrow u) < l(u)) \\
&~ ~ ~ ~ ~ ~ ~ ~l(u)\leftarrow l(v)+\omega(v\rightarrow u) \\
&~ ~ ~ ~ ~ ~ ~ ~p(u)\leftarrow v \\
&~ ~ ~ ~ ~ ~ ~ ~mark(u)\leftarrow labelled \\
& \\
&scan\_vertex(v): \\
&~ ~ ~ ~for~u~such~that~v\rightarrow u~exists: \\
&~ ~ ~ ~~ ~ ~ ~scan\_arc(v\rightarrow u) \\
&~ ~ ~ ~mark(v)\leftarrow scanned \\
& \\
& l(s)\leftarrow 0,~mark(s)\leftarrow labelled \\
& while~exists~v~with~mark(v)=labelled:\\
&~ ~ ~ ~scan\_vertex(v)
\end{align}
Здесь $l(v)$ -- это некоторое расстояние до вершины $v$, которое изменяется по ходу работы алгоритма какждый раз немного улучшаясь, $p(v)$ обновляется вместе с $l$ и запоминает предыдущую вершиу на пути, который мы сейчас считаем минимальным, $mark(v)$ -- пометка вершины: labelled означает, что с момента последнего сканирования этой вершины расстояние до нее изменилось (т.е. уменьшилось), а значит нужно её отсканировать заново; scanned означает, что мы отсканировали вершину, попытались с помощью неё уменьшить расстояния до её соседей, и при этом после этого расстояние до этой вершины не изменялось.
Большинство известных методов нахождения кратчайших путей являются частыми случаями сканирующего метода и отличаются только тем, как выбирать вершины для сканирования в цикле.
### Анализ сканирующего метода
Основные два утверждения касательно сканирующего метода заключается в том, что
* Вне зависимости от выбора вершина для сканирования алгоритм завершается тогда и только тогда, когда в графе нет отрицательных циклов. Это также является необходимым и достаточным условием корректности задачи о кратчайших путях.
* По завершению алгоритма для всех $v$ выполняется
$$
l(v)=d(s, v)
$$
* По завершению $p(v)$ содежит последнее ребро на кратчайшем пути из $s$ в $v$, соответственно ребра $p(v)\rightarrow v$ образуют дерево кратчайших путей.
Первый факт обосновывается следующим образом: если нет отрицательных циклов, значит минимальный путь существует и является простым (без повторений вершин); промежуточные пути в алгоритме являются простыми, вершина помечается $labelled$ только если путь до неё уменьшился, а значит количество раз, когда мы вызовем сканирование вершины конечно.
Обоснования второго факта: для начала заметим, что сканирующий метод обязательно просмотрит все достижимые вершины, так как если запретить повторно сканировать вершину, то сканирущий метод вырождается в BFS. Для недостижимых вершин $l(v)=d(s, v)=+\infty$. Далее заметим, что так как $l(v)$ соответствует длине какого-то пути $s\rightsquigarrow v$, то по определению $d$
$$
l(v)\geq d(s, v).
$$
Пусть оптимальный путь $s\rightsquigarrow$ состоит из вершин $s=u_1, \ldots, u_k=v$, каждая из этих вершин была просканирована хотя бы раз, а значит по завершению работы алгорима выполняется
$$
l(u_i)-l(u_{i-1})\leq \omega(u_{i-1}\rightarrow u_i)
$$
(так как $l(u_i)$ может только уменьшаться и при этом после последнего сканирования $u_{i-1}$ это неравенство выполнялось). Просуммировав эти неравенства получаем
$$
l(v)=l(v)-l(s)\leq \sum_{i=1}^{n}\omega(u_{i-1}\rightarrow u_i)=d(s, v).
$$
Третее утверждение вытекает из второго и того факта, что $p(v)$ всегда обновляется вместе с $l(v)$.
```
import random
import graphviz
from interactive_visualization.graph_utils import Graph, Arc, Node
from interactive_visualization.animation_utils import animate_list
def mark_labelled(node):
node.SetColor('red')
def mark_scanned(node):
node.SetColor('green')
def process_node(node):
node.SetColor('blue')
def set_previous(arc):
arc.SetColor('green')
def unset_previous(arc):
arc.SetColor('black')
def scan_arc(graph, arc, l, p, mark):
if l[arc.end] > l[arc.beginning] + arc.weight:
l[arc.end] = l[arc.beginning] + arc.weight
if p[arc.end] is not None:
unset_previous(p[arc.end])
# Сохраняем arc, а не arc.beginning, чтобы было больше информации
p[arc.end] = arc
set_previous(p[arc.end])
mark[arc.end] = True
mark_labelled(graph.nodes[arc.end])
def scan_node(graph, node_id, l, p, mark):
for arc in graph.nodes[node_id].arcs:
scan_arc(graph, arc, l, p, mark)
mark[node_id] = False
mark_scanned(graph.nodes[node_id])
def random_choice(l, mark):
labelled = [node_id for node_id, value in mark.items() if value == True]
if len(labelled) == 0:
return None
return random.choice(labelled)
def base_scanning_method(graph, s, choice_function):
l = {key: float('Inf') for key in graph.nodes.keys()}
p = {key: None for key in graph.nodes.keys()}
mark = {key: False for key in graph.nodes.keys()}
l[s] = 0
mark[s] = True
mark_labelled(graph.nodes[s])
out_lst = []
while True:
node_id = choice_function(l, mark)
if node_id is None:
break
process_node(graph.nodes[node_id])
out_lst.append(graph.Visualize(l))
scan_node(graph, node_id, l, p, mark)
out_lst.append(graph.Visualize(l))
return l, p, out_lst
arcs = [
Arc(1, 3, 3),
Arc(1, 4, 7),
Arc(4, 3, 2),
Arc(4, 5, 3),
Arc(1, 5, 2),
Arc(6, 4, 2),
Arc(5, 6, 2),
Arc(6, 7, 1),
Arc(7, 2, 7),
Arc(4, 2, 2),
Arc(3, 2, 5)
]
Graph(arcs).Visualize()
graph = Graph(arcs)
random_scanning_shortest_path_lst = []
l, p, random_scanning_shortest_path_lst = \
base_scanning_method(graph, 1, random_choice)
animate_list(random_scanning_shortest_path_lst);
```
### Кратчайшие пути на ациклических графах
В случае, если в графе нет не только отрицательных циклов, но вообще любых других, то возникает интересная ситуация: если обрабатывать вершины в топологическом порядке, то каждая вершина будет просканирована ровно один раз. Доказать это очень просто: если в графе допустим топологический порядок, то при сканировании вершины $v$ расстояние обновится только для тех вершин, которые в топологическом порядке идут позже $v$.
Если вершины в графе пронумерованы в топологическом порядке, то в указанной выше процедуре для этого достаточно будет просто вместо случайной вершины выбирать наименьшую по номеру
```
def least_id_choice(l, mark):
labelled = [node_id for node_id, value in mark.items() if value == True]
if len(labelled) == 0:
return None
return min(labelled)
graph = Graph([
Arc(0, 1, 4),
Arc(0, 2, 2),
Arc(1, 2, 5),
Arc(2, 3, 3),
Arc(1, 4, 10),
Arc(3, 4, 4),
Arc(4, 5, 11)
])
l, p, topological_scanning_shortest_path_lst = \
base_scanning_method(graph, 0, least_id_choice)
animate_list(topological_scanning_shortest_path_lst);
```
Это один из двух случаев, когда достигается асимптотическая оценка в $\mathcal{O}(E)$ ($E$ -- количество ребер): во-первых, топологическая сортировка осуществляется обходом в глубину за $\mathcal{O}(E)$; во-вторых каждая вершины сканируется ровно один раз, а значит какждое ребро также сканируется ровно один раз; в-третьих если вместо выбора минимальной вершины просто итерироваться в по всем вершинам в топологическом порядке пропуская недостижимые, то результат будет тот же, и при этом суммарно на это мы тратим $\mathcal{O}(V)$ ($V$ -- количество вершин).
На практике ациклические графы часто возникают при моделировании каких-либо протекающих во времени событий. Например одним из промежуточных результатов распознавания речи называе "словные сетки" -- это граф, в котором вершины помечены временнОй меткой, а на ребрах написано слово и информация об акустическом/языковом правдоподобие; ребра всегда ведут из вершин с меньшей временной отметков в вершину с большей и означают, что на этом интервале с такой-то вероятностью было произнесено такое-то слово. Пути в этом графе соответствуют временному интервалу, начинающемуся от временной отметки первой вершины, заканчивающемуся во временной отметке последней вершины. Forward-backward алгоритмы по сути являются частными случаями нахождения кратчайших путей в ациклическом графа, в том числе и алгоритм Витерби для нахождения наиболее правдоподобной последовательности состояний в марковской цепи.
### Обход в ширину и алгоритм Дейкстры
Второй случай, в котором оказывается возможной минимальная оценка для поиска кратчайших путей -- это случай единичных весов, т.е. когда длины всех ребер одинаковы. С точки зрения сканирующего метода в этом случае можно добиться того, чтобы каждая вершина сканировалась ровно один раз, все, что для этого нужно сделать, а это выбирать для очередного сканирования вершину с минимальным расстоянием.
```
def least_distance_choice(l, mark):
labelled = [node_id for node_id, value in mark.items() if value == True]
if len(labelled) == 0:
return None
return min(labelled, key=lambda x: l[x])
graph = Graph([
Arc(1, 3, 1),
Arc(1, 4, 1),
Arc(4, 3, 1),
Arc(4, 5, 1),
Arc(1, 5, 1),
Arc(6, 4, 1),
Arc(5, 6, 1),
Arc(6, 7, 1),
Arc(7, 2, 1),
Arc(4, 2, 1),
Arc(3, 2, 1)
])
l, p, bfs_shortest_path_lst = \
base_scanning_method(graph, 1, least_distance_choice)
animate_list(bfs_shortest_path_lst);
```
На самом деле для того, чтобы при выборе вершины с минимальным расстоянием каждая вершина сканировалась единожды, достаточно более слабого условия -- чтобы в графе не было ребер с отрицательным весом. Ключевое соображение: кратчайший путь до вершины $v$ может проходить по тем вершинам, которые ближе к $s$, чем $v$. Из этого утверждения следует другое, чуть менее очевидное: если мы уже каким-то образом нашли первые $k$ ближайших вершин и посчитали минимальные пути, проходящие только через них, то расстояние до ближайшей из оставшихся вершин посчитано корректно. Если посмотреть на величины значения $l(v)$, которые мы получаем после $k$-ого сканирования (обозначим её за $l(v, k)$), то получится величина минимального пути от $s$ до $v$, имеющая в качестве промежуточных вершин только $k$ ближайших к $s$. Если обозначить за $u(k)$ -- $k$-ую ближайшую вершину к $s$, то мы получаем следующее соотношение
$$
l(v, k)=\min\{l(v, k-1), l(u(k), k-1)+\omega(u(k)\rightarrow v)\}
$$
Левая величина в минимуме -- это минимум среди путей, которые используют $u(k)$, правая -- минимум среди путей, которые не используют $u(k)$.
```
graph = Graph(arcs)
l, p, dijkstra_shortest_path_lst = \
base_scanning_method(graph, 1, least_distance_choice)
animate_list(dijkstra_shortest_path_lst);
```
В общем виде этот метод впервые этого методы обосновал Эдсгер Дейкстра. Ключевым вопросом в этом алгоритме является нахождение минимальной вершины для сканирования. В случае, если все длины одинаковы, то это можно сделать обычной очередью: сохраняем в очереди все вершины с пометкой $labelled$, при сканировании ребра помещаем вершину в конец очереди если расстояние уменьшилось, при выборе новой вершины берем первую из очереди. В общем то в этом случае алгоритм вырождается в обход в глубину.
В общем случае для работы с вершинами, помеченными $labelled$ необходима специальная структура данных, позволяющая быстро делать следующие операции:
* Добавить вершину в множество
* Обновить расстояние для вершины из множества
* Найти миимальную по расстоянию вершину из множества
* Удалить вершину из множества
Одним из простых способов реализовать все эти операции -- использовании бинарной кучи или бинарного дерева, все операции выполняются за $\mathcal{O}(\log N)$, где $N$ -- размер множества, что ведет к общей сложности алгоритма в $\mathcal{O}(E\log V)$.
Наиболее асимптотически оптимальный известный алгоритм использует [кучу фибоначчи](https://en.wikipedia.org/wiki/Fibonacci_heap), которая умеет делать первые три операции за $\mathcal{O}(1)$ и последнюю за $\mathcal{O}(\log N)$, что ведет к общей сложности $\mathcal{O}(E+V\log V)$
К сожалению все описанные рассуждения ломаются, если в графе возникают ребра отрицательной длины, например единственность сканирования пропадает например на следующем примере
```
graph = Graph([
Arc(1, 2, 2),
Arc(1, 3, 3),
Arc(3, 2, -2),
Arc(2, 4, 1),
])
l, p, dijkstra_shortest_path_lst = \
base_scanning_method(graph, 1, least_distance_choice)
animate_list(dijkstra_shortest_path_lst);
```
### Алгоритм Форда-Беллмана
Частично этот алгоритм похож на перидыдущий тем, что по сути является просто применением обычной очереди для общего случая. Обычно алгоритм Форда-Беллмана имеет следующий вид
```
def ford_bellman(graph, s):
l = {key: float('Inf') for key in graph.nodes.keys()}
p = {key: None for key in graph.nodes.keys()}
mark = {key: False for key in graph.nodes.keys()}
l[s] = 0
mark[s] = True
mark_labelled(graph.nodes[s])
out_lst = [graph.Visualize(l)]
for k in range(len(graph.nodes)-1):
for node in graph.nodes.values():
scan_node(node)
out_lst.append(graph.Visualize(l))
return l, p, out_lst
```
Здесь я специально сделал так, что внешний цикл итерируется фиксированное число раз -- $V-1$, при этом переменная $i$ не используется. Внутренний цикл сканирует каждую вершину по одному разу, это эквивалентно тому, чтобы просканировать по одному разу каждое ребро. Оказывается, что такого количества проходов всегда достаточно, даже если в графе есть отрицательные ребра. Идея следующая: давайте рассмотрим величину $l(v, k)$ -- минимальная длина пути от $s$ до $v$, состоящего из не более, чем $k$ ребер, тогда для неё справедливо следующее соотношение
$$
l(v, k)=\min \{l(v, k-1), \min_u[l(u, k-1)+\omega(u\rightarrow v)]\}.
$$
Иначе говоря, либо оптимальный путь использует меньше, чем $k$ ребер, либо из него можно выделить последнее ребро, а оставшаяся часть использует $k-1$ ребро.
Один проход по всем ребрам гарантирует нам переход от $l(v, k-1)$ к $l(v, k)$, однако из-за того, что мы не считаем непосредственно эти величины, а храним их в одном массиве $l$, то получается что на итерации $k$ у нас обычно чуть лучше, чем $l(v, k)$, охватывают пути не только длины $k$, но гарантировать мы может охват только таких путей. Наконец, после $V-1$ итераций мы обязательно охватим все простые пути. Если же оказалось, что после $V-1$ итерации сканирование ребер продолжит уменьшать вес, то в графе есть отрицательный цикл. Этот факт позволяет использовать Алгоритм Форда-Беллмана для нахождения отрицательных циклов.
Возвращаясь к сканирующему методу: мы можем спокойно пропускать в цикле вершины, которые помечены $scanned$. С точки зрения вызовов $scan\_node$ это будет эквивалентно тому, чтобы использовать в сканирующем методе обычную очередь, но при этом использование очереди эффективнее.
# Примеры из МО
### Алгоритм Витерби
Одна из основных задач для марковских цепей заключается в том, чтобы найти последовательность состояний в марковской цепи наиболее правдоподобно соответствующую некоторой последовательности наблюдений
$$
H(O)=argmax_{v=\{v_0, \ldots, v_n\}}\prod_{i=1}^nP(v_{i-1}\rightarrow v_i~|~O_i)
$$
Например в распознавании речи $P(u\rightarrow v~|~O)$ -- это акустическая модель, классифицирующая кусок звука по нескольким языковым единицам, например фонемам. Из наиболее правдоподобной последовательности состояний можно извлечь последовательность фонем и в конце концов последовательность слов.
Если немного переписать величину выше, можно получить в точности задачу о кратчайшем пути
$$
argmax_{v=\{v_0, \ldots, v_n\}}\prod_{i=1}^nP(v_{i-1}\rightarrow v_i~|~O_i)=argmax_{v=\{v_0, \ldots, v_n\}}\sum_{i=1}^n\mathcal \log{P}(v_{i-1}\rightarrow v_i~|~O_i)=argmin_{v=\{v_0, \ldots, v_n\}}\sum_{i=1}^n(-\log P(v_{i-1}\rightarrow v_i~|~O_i))
$$
Стоит отметить, если $P$ -- вероятность, то что $-\log P\geq 0$, что, как мы уже обсудили, имеет значение для кратчайших путей, но не в этом случае.
Пока что мы перешли от умножения к сложению, и от максимума к минимуму, но мы пока еще не получили задачу о кратчайшем пути: пока что у нас задача нахождения минимального пути, содержащую фиксированное число переходов, котором веса ребер зависят от времени. Эта задачу можно свести к обычной задаче о кратчачйшем пути на так расгиренном графе как на следующих примерах.
```
graph = Graph([
Arc(0, 1),
Arc(1, 2),
Arc(2, 0),
Arc(1, 1),
Arc(2, 2),
Arc(0, 0)
])
graph.Visualize()
expanded_graph = graph.expand_in_time(4)
expanded_graph.Visualize()
```
## Отслеживание объекта на плоскости
Предположим, что у нас есть набор из $m$ сенсоров, расположенныз в точках $x_1, \ldots, x_m$ соответственно, которые умеют измерять расстояния до объекта (не важно, каким образом) и выдавать его в качестве функции правдоподобия (если сенсор выдает одно число, то можно считать, что он выдает индикатор-функцию для этого числа). Допустим мы снимаем показание на равных промежутках времени, на замере $t$ мы получаем функции правдоподобия $g_i(t, \|x-x_i\|)$. Оценка максимального правдоподобия положения объекта в пространстве c учетом сделанных замеров выглядит как-то так
\begin{align*}
x&=argmax_{y}\sum_{i=1}^m\log P(y~|~g_i) \\
&=argmax_{y}\sum_{i=1}^m\log [P(g_i~|~y)P(y)] \\
&=argmax_{y}\sum_{i=1}^m\sum_{t=0}^T\log [g_i(t, \|y(t)-x_i\|)P(y)]
\end{align*}
Если не накладывать никакие априорные ограничения ограничения на $x$, то вероятность $P(y)$ можно убрать и получить, что
$$
x(t)=argmax_{z}\sum_{i=1}^T \log g_i(t, \|z-x_i\|),
$$
т.е. оценка координат происходит раздельно раздельно. Такой подход например может привести к тому, что оценка будет сильно болтаться, в то время, как мы ожидаем, что между двумя последовательными оценками разница должна быть небольшой.
```
import numpy as np
import scipy, scipy.stats, scipy.optimize
from scipy.stats import chi2
class Sensor:
def __init__(self, x, y, r=10, scale=4):
self.position = np.array([x, y])
self.max_distortion_range = r
self.distortion_scale = scale
def loglikelihood(self, x, y, x_real, y_real):
k = int(np.linalg.norm(self.position- np.array([x_real, y_real])))
p = random.randint(1, self.max_distortion_range)
return chi2.logpdf(np.linalg.norm(self.position - np.array([x, y])), k) + self.distortion_scale * chi2.logpdf(np.linalg.norm(self.position - np.array([x, y])), p)
```
Здесь сенсоры моделируются с помощью $\chi$-квадрат распределения. Используется тот факт, что максимум
```
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 15
plt.rcParams["figure.figsize"] = [11,11]
path = [np.array([5, 5])]
for i in range(30):
dx = 2 * np.random.rand(2) - 1
path.append(path[-1] + dx)
def paths(path):
figures = []
for i in range(len(path)):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.set(xlim=(-10, 20), ylim=(-10, 20))
ax.plot([x for x, y in path[:i+1]], [y for x, y in path[:i+1]])
ax.scatter([x for x, y in path[i:i+1]], [y for x, y in path[i:i+1]], color='black')
plt.close(fig)
figures.append(fig)
return figures
animate_list(paths(path));
distortion_level = 0.25
distortion_radius = 20
bottom_left = -10
top_right = 20
sensors = [
Sensor(bottom_left, bottom_left, r = distortion_radius, scale=distortion_level),
Sensor(bottom_left, top_right, r = distortion_radius,scale=distortion_level),
Sensor(top_right, bottom_left, r = distortion_radius, scale=distortion_level),
Sensor(top_right, top_right, r = distortion_radius, scale=distortion_level)
]
def minimize_simple(f, x_min, y_min, x_max, y_max):
best = (x_min, y_min, float('inf'))
for i in range(10):
for j in range(10):
x = x_min + i * (x_max - x_min) / 10
y = y_min + j * (y_max - y_min) / 10
cost = f([x, y])
if best[2] > cost:
best = (x, y, cost)
for i in range(10):
step = 1.0 / (i + 1)
dx, dy = step * (np.random.rand(2) * 2 - 1)
cost = f([best[0] + dx, best[1] + dy])
if cost < best[2]:
best = (best[0] + dx, best[1] + dy, cost)
return best[:2]
estimates = []
prev = [0, 0]
for x in path:
def J(y):
return -sum([sensor.loglikelihood(y[0], y[1], x[0], x[1]) for sensor in sensors])
#estimates.append(scipy.optimize.minimize(J, prev, method='Nelder-Mead').x)
estimates.append(minimize_simple(J, bottom_left, bottom_left, top_right, top_right))
cur = estimates[-1]
#print(J(cur), J([cur[0] - 1, cur[1]]), J([cur[0], cur[1] - 1]), J([cur[0] + 1, cur[1]]), J([cur[0], cur[1] + 1]))
prev = estimates[-1]
def two_paths(path, estimates):
figures = []
for i in range(len(path)):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.set(xlim=(-10, 20), ylim=(-10, 20))
ax.plot([x for x, y in path[:i+1]], [y for x, y in path[:i+1]])
ax.plot([x for x, y in estimates[:i+1]], [y for x, y in estimates[:i+1]])
ax.scatter([x for x, y in path[i:i+1]], [y for x, y in path[i:i+1]], color='black')
ax.scatter([x for x, y in estimates[i:i+1]], [y for x, y in estimates[i:i+1]], color='black')
plt.close(fig)
figures.append(fig)
return figures
max_like_distances = []
for x, y in zip(path, estimates):
max_like_distances.append(np.linalg.norm(x - y))
animate_list(two_paths(path, estimates));
```
Из-за помех получается так себе. Тем не менее мы можем попытаться как-то ограничить эти скачки, так как в реальном мире движение происходит планомерно. простой способ сделать это -- сузbть поиск максимума правдоподобия на фиксированном расстоянии от предыдущей оценки.
```
estimates = []
prev = [0, 0]
for x in path:
def J(y):
return -sum([sensor.loglikelihood(y[0], y[1], x[0], x[1]) for sensor in sensors])
#estimates.append(scipy.optimize.minimize(J, prev, method='Nelder-Mead').x)
estimates.append(minimize_simple(J, prev[0] - 1, prev[1] - 1, prev[0] + 1, prev[1] + 1))
cur = estimates[-1]
prev = estimates[-1]
animate_list(two_paths(path, estimates));
base_distances = []
for x, y in zip(path, estimates):
base_distances.append(np.linalg.norm(x - y))
```
Уже лучше, но есть проблема, этот алгоритм все-таки жадный и учитывает только предыдущую оценку. Можно ограничить возможные точки поиска до заранее посчитанной сетки и смоделировать $P(y)$ марковской цепью на этой сетке: переходы разрешаются только на узлах, расстояние между которыми не больше некоторого порога. Таким образом мы получаем задачу поиска наиболее правдоподобной последовательности переходов в марковской цепи, где вероятноть перехода соответствует правдоподобию нахждения в узле на конце этого перехода
```
pre_grid = [[(bottom_left + i * (top_right - bottom_left) / 30, bottom_left + j * (top_right - bottom_left) / 30) for j in range(31)] for i in range(31)]
grid = []
for row in pre_grid:
grid.extend(row)
allowed_transitions = []
for i in range(len(grid)):
allowed_transitions.append([])
for j in range(len(grid)):
if np.linalg.norm(np.array(grid[i]) - np.array(grid[j])) <= 1.0:
allowed_transitions[-1].append(j)
beam = 5.0
history_importance = 1.0
estimates = []
for i in range(len(grid)):
if abs(grid[i][0]) < 0.5 and abs(grid[i][1]) < 0.5:
token = (i, 0.0, None)
break
tokens = [token]
for x in path:
def J(y):
return -sum([sensor.loglikelihood(y[0], y[1], x[0], x[1]) for sensor in sensors])
new_tokens = dict()
J_cache = dict()
best_cost = float('inf')
for token in tokens:
state, cost, prev = token
for j in allowed_transitions[state]:
if j not in J_cache:
new_cost = J(grid[j])
J_cache[j] = new_cost
else:
new_cost = J_cache[j]
if new_cost + history_importance * cost > best_cost + beam:
continue
if j not in new_tokens or new_tokens[j][1] > new_cost + cost:
new_tokens[j] = (j, new_cost + history_importance * cost, token)
best_cost = min(best_cost, new_cost + history_importance * cost)
tokens = new_tokens.values()
#print(best_cost)
reversed_estimates = []
tmp = min(tokens, key=lambda x: x[1])
while tmp is not None:
reversed_estimates.append(grid[tmp[0]])
tmp = tmp[2]
estimates = list(reversed(reversed_estimates))
markov_beam_search_distances = []
for x, y in zip(path, estimates):
markov_beam_search_distances.append(np.linalg.norm(x - y))
animate_list(two_paths(path, estimates));
```
К сожалению довольно часто этот код уводит приближение на бесконечность, скорее всего это происходит просто из-за ошибок вычисления и помех
```
plt.plot([i for i in range(len(base_distances))], max_like_distances, label='Расстояние оценом МП')
plt.plot([i for i in range(len(base_distances))], base_distances, label='Расстояние в простом режиме')
plt.plot([i for i in range(len(base_distances))], markov_beam_search_distances, label='Расстояние при лучевом поиске')
plt.legend()
plt.show()
```
| github_jupyter |
# Model factory
Single-link models can be easily generated using a few parameters.
```
from pcg_gazebo.generators.creators import create_models_from_config
from pcg_gazebo.task_manager import Server
import os
# Start an empty world Gazebo simulation
server = Server()
server.create_simulation('default')
simulation = server.get_simulation('default')
simulation.create_gazebo_empty_world_task()
print(simulation.get_task_list())
print('Is Gazebo running: {}'.format(
simulation.is_task_running('gazebo')))
simulation.run_all_tasks()
# Create a Gazebo proxy
gazebo_proxy = simulation.get_gazebo_proxy()
import random
def create_and_spawn(config, pos=None):
models = create_models_from_config(config)
for model in models:
model.spawn(
gazebo_proxy=gazebo_proxy,
robot_namespace=model.name,
pos=pos if pos is not None else [
20 * random.random() - 10,
20 * random.random() - 10,
2 * random.random()
])
```
## Extruded models
```
from pcg_gazebo.generators.shapes import random_rectangle, \
random_points_to_triangulation, random_rectangles
config = [
dict(
type='extrude',
args=dict(
polygon=random_rectangle(),
height=0.2,
extrude_boundaries=False,
name='extruded_poly_random_rectangle',
color=None,
)
)
]
create_and_spawn(config, [-20, 0, 0.1])
config = [
dict(
type='extrude',
args=dict(
polygon=random_points_to_triangulation(),
height=0.8,
thickness=0.1,
extrude_boundaries=True,
name='extruded_poly_triangulation',
color='random'
)
)
]
create_and_spawn(config, [0, 0, 0.4])
config = [
dict(
type='extrude',
args=dict(
polygon=random_rectangles(),
height=0.5,
thickness=0.15,
extrude_boundaries=True,
name='extruded_poly_walls',
color='xkcd'
)
)
]
create_and_spawn(config, [20, 0, 0.25])
```
## Box-shaped models
```
config = [
dict(
type='box_factory',
args=dict(
size=[
[0.1, 0.4, 0.5],
[1, 2, 3]
],
name='box_static_var_size',
use_permutation=True,
color='xkcd'
)
),
dict(
type='box_factory',
args=dict(
size=[
[0.1, 0.4, 0.5],
[1, 2, 3]
],
mass=12,
name='box_dynamic_var_size',
use_permutation=False,
color='xkcd'
)
),
dict(
type='box_factory',
args=dict(
size=[
[0.2, 0.4, 0.15],
[1.2, 0.25, 0.7]
],
mass=[5, 2],
name='box_dynamic_permutate_size_mass',
use_permutation=True,
color='xkcd'
)
)
]
create_and_spawn(config)
```
Creating multiple boxes with lambda arguments
```
config = [
dict(
type='box_factory',
args=dict(
size="__import__('numpy').random.random((2, 3))",
use_permutation=True,
name='box_static_lambdas',
color='random'
)
),
dict(
type='box_factory',
args=dict(
size="__import__('numpy').random.random((4, 3))",
mass="__import__('numpy').arange(1, 10, 2)",
use_permutation=True,
name='box_dynamic_lambdas',
color='random'
)
)
]
create_and_spawn(config)
```
## Cylinder-shaped models
```
config = [
dict(
type='cylinder',
args=dict(
radius=3,
length=2,
mass=10,
name='cylinder',
pose=[0, 0, 1, 0, 0, 0]
)
),
dict(
type='cylinder_factory',
args=dict(
length=[0.3, 0.5],
radius=[0.2, 0.4],
mass=[5, 2],
name='cylinder_dynamic_permutate_radius_length_mass',
use_permutation=True,
color='xkcd'
)
),
dict(
type='cylinder_factory',
args=dict(
length="__import__('numpy').linspace(0.1, 10, 2)",
radius="__import__('numpy').random.random(2)",
mass="__import__('numpy').arange(1, 2, 1)",
use_permutation=True,
name='cylinder_dynamic_lambdas',
color='xkcd'
)
)
]
create_and_spawn(config)
```
## Sphere-shaped models
```
config = [
dict(
type='sphere',
args=dict(
radius=3,
mass=10,
name='sphere',
pose=[0, 0, 1.5, 0, 0, 0]
)
),
dict(
type='sphere_factory',
args=dict(
radius=[0.3, 0.9],
mass=[5, 2],
name='sphere_dynamic_permutate_radius_mass',
use_permutation=True,
color='xkcd'
)
),
dict(
type='sphere_factory',
args=dict(
radius="__import__('numpy').random.random(2) * 3",
mass="__import__('numpy').arange(1, 4, 1)",
use_permutation=True,
name='sphere_dynamic_lambdas',
color='xkcd'
)
)
]
create_and_spawn(config)
```
## Mesh models
```
mesh_filename = os.path.abspath('./meshes/monkey_offset.stl')
config = [
dict(
type='mesh',
args=dict(
visual_mesh=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=False,
mass=10,
name='monkey_dynamic_no_approx_collision',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
approximated_collision_model='box',
mass=20,
name='monkey_dynamic_with_approx_collision_box',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
mass=15,
approximated_collision_model='cylinder',
name='monkey_dynamic_with_approx_collision_cylinder',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
mass=3,
approximated_collision_model='sphere',
name='monkey_dynamic_with_approx_collision_sphere',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
mass=3,
approximated_collision_model='sphere',
name='monkey_dynamic_defined_inertia',
color='xkcd',
use_approximated_inertia=False,
inertia=dict(
ixx=0.1,
iyy=0.1,
izz=0.1
)
)
)
]
create_and_spawn(config)
for name in gazebo_proxy.get_model_names():
print('\t - {}'.format(name))
simulation.kill_all_tasks()
```
The final result of the creation of models can be seen below

| github_jupyter |
### - Split Compounds into Train & Test data based on the number of MOAs that are attributed to them.
```
import os
import pathlib
import requests
import pickle
import argparse
import pandas as pd
import numpy as np
import re
from os import walk
from collections import Counter
import random
import shutil
from split_compounds import split_cpds_moas
# Load common compounds
common_file = pathlib.Path(
"..", "..", "6.paper_figures", "data", "significant_compounds_by_threshold_both_assays.tsv.gz"
)
common_df = pd.read_csv(common_file, sep="\t")
common_compounds = common_df.compound.unique()
print(len(common_compounds))
print(common_df.shape)
common_df.head(2)
data_path = '../../1.Data-exploration/Profiles_level4/cell_painting/cellpainting_lvl4_cpd_replicate_datasets/'
df_level4_cp = pd.read_csv(
os.path.join(data_path, 'cp_level4_cpd_replicates.csv.gz'),
compression='gzip',
low_memory = False
)
data_path = '../../1.Data-exploration/Profiles_level4/L1000/L1000_lvl4_cpd_replicate_datasets/'
df_level4_L1 = pd.read_csv(
os.path.join(data_path, 'L1000_level4_cpd_replicates.csv.gz'),
compression='gzip',
low_memory = False
)
### We are interested in compounds found both in L1000 and Cell painting
cp_cpd = df_level4_cp['pert_iname'].unique().tolist()
L1_cpd = df_level4_L1['pert_iname'].unique().tolist()
df_level4_cp = df_level4_cp.loc[df_level4_cp['pert_iname'].isin(common_df.compound)].reset_index(drop=True)
df_level4_L1 = df_level4_L1.loc[df_level4_L1['pert_iname'].isin(common_df.compound)].reset_index(drop=True)
for cpd in df_level4_cp['pert_iname'].unique():
if cpd not in df_level4_L1['pert_iname'].unique():
print('Something is Wrong!!')
len(df_level4_cp['pert_iname'].unique())
len(df_level4_L1['pert_iname'].unique())
##Exclude DMSO
df_level4_cp = df_level4_cp[df_level4_cp['pert_iname'] != 'DMSO'].reset_index(drop=True)
df_level4_L1 = df_level4_L1[df_level4_L1['pert_iname'] != 'DMSO'].reset_index(drop=True)
df_level4_cp.shape
df_level4_L1.shape
df_level4_cp['moa'] = df_level4_cp['moa'].apply(lambda x: x.lower())
df_level4_L1['moa'] = df_level4_L1['moa'].apply(lambda x: x.lower())
#compounds and their respective MOAs -- using either df_level4_cp or df_level4_L1 is okay
df_cpds_moas = df_level4_cp.drop_duplicates(['pert_iname','moa'])[['pert_iname','moa']]
cpds_moa = dict(zip(df_cpds_moas['pert_iname'], df_cpds_moas['moa']))
len(cpds_moa)
df_pert_cpds_moas = split_cpds_moas(cpds_moa)
df_pert_cpds_moas
len(df_pert_cpds_moas[df_pert_cpds_moas['test']]['moa'].unique()) ##moas in the test data
def get_moa_count(df):
"""
Get the number of compounds MOAs are present in, for both train and test data
"""
df_moa_ct = df.drop(['pert_iname'], axis=1).groupby(['moa']).agg(['sum'])
df_moa_ct.columns = df_moa_ct.columns.droplevel(1)
df_moa_ct.reset_index(inplace=True)
return df_moa_ct
def get_test_ratio(df):
if df['test'] > 0:
return df["train"] / df["test"]
return 0
df_moa_count = get_moa_count(df_pert_cpds_moas)
df_moa_count['test_ratio'] = df_moa_count.apply(get_test_ratio, axis=1)
##All MOAs found in test should be found in train data, so this should output nothing...GOOD!
df_moa_count[(df_moa_count['train'] == 0) & (df_moa_count['test'] >= 1)]
##moas that are represented in more than one compounds (> 1), present in train set but not present in test set
df_moa_count[(df_moa_count['train'] > 1) & (df_moa_count['test'] == 0)]
len(df_pert_cpds_moas[df_pert_cpds_moas['train']]['pert_iname'].unique()) ##no of compounds in train data
len(df_pert_cpds_moas[df_pert_cpds_moas['test']]['pert_iname'].unique()) ##no of compounds in test data
def save_to_csv(df, path, file_name, compress=None):
"""saves dataframes to csv"""
if not os.path.exists(path):
os.mkdir(path)
df.to_csv(os.path.join(path, file_name), index=False, compression=compress)
save_to_csv(df_pert_cpds_moas, "data", 'split_moas_cpds.csv')
```
| github_jupyter |
# Basic MNIST Example
This basic example shows loading from a YAML file. You can specify all the parameters in the yaml file, but we're going to load the raw data using tensorflow.
```
import numpy as np
import tensorflow as tf
from pygoko import CoverTree
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.int64)
x_train = x_train.reshape(-1, 28*28)
```
Here we build the covertree, with a leaf cutoff and a minimum resolution index to control the size of the tree.
The minimum resolution index is the scale at which the tree stops splitting. This can be used to control the L2 error (we use the standard fast implementation, which is not the most accurate), or to specify a scale at which the KNN doesn't matter to you.
The leaf cutoff controls the size of individual leafs of the tree. If a node covers fewer than this number of points, the splitting stops and the node becomes a leaf.
The scale base controls the down-step of each split. 1.3 is a good default. It is usually close to the fastest at creating the tree but can be hard to reason about. Another popular choice is 2, which means the radius halves at each step.
```
tree = CoverTree()
tree.set_leaf_cutoff(10)
tree.set_scale_base(1.3)
tree.set_min_res_index(-20)
tree.fit(x_train,y_train)
```
Here's the basic KNN for this data structure.
```
point = np.zeros([784], dtype=np.float32)
tree.knn(point,5)
```
The nodes are addressable by specifying the scale index, and the point index (in the originating dataset). This errors out if you supply an address that isn't known tot he tree. (Currently this is rust panicing about you unwrapping an option that is a None). Only use this creation method with known, correct, addresses.
```
root = tree.root()
print(f"Root address: {root.address()}")
for child in root.children():
child_address = child.address()
# The following is the same node as the child:
copy_of_child = tree.node(child_address)
print(f" Child address: {copy_of_child.address()}")
```
If a query point were to belong to the dataset that the tree was constructed from, but was never selected as a routing node, then it would end up at a particular leaf node. This leaf node is deterministic (given the pre-built tree). The path for the query point is the addresses of the nodes from the root node to this leaf.
```
path = tree.path(point)
print(path)
print("Summary of the labels of points covered by the node at address")
for dist, address in path:
node = tree.node(address)
label_summary = node.label_summary()
print(f"Address: {address} \t Summary: {label_summary}")
```
We can also query for the path of known points, by index in the original dataset.
```
path = tree.known_path(40000)
print("Summary of the labels of points covered by the node at address")
for dist, address in path:
node = tree.node(address)
label_summary = node.label_summary()
print(f"Address: {address} \t Summary: {label_summary}")
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import random
from torch import optim
from tqdm import tqdm
import numpy as np
import pandas as pd
import string
import torch
import re
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer
from transformers import BertConfig, BertTokenizer
from nltk.tokenize import word_tokenize
class NerIDCardDataset(Dataset):
# Static constant variable
LABELS = [
'U-FLD_PROV', 'B-VAL_PROV', 'L-VAL_PROV', 'U-FLD_KAB', 'U-VAL_KAB',
'U-FLD_NIK', 'U-VAL_NIK', 'U-FLD_NAMA', 'B-VAL_NAMA', 'L-VAL_NAMA',
'B-FLD_TTL', 'L-FLD_TTL', 'B-VAL_TTL', 'L-VAL_TTL', 'B-FLD_GDR',
'L-FLD_GDR', 'U-VAL_GDR', 'B-FLD_GLD', 'L-FLD_GLD', 'U-VAL_GLD',
'U-FLD_ADR', 'B-VAL_ADR', 'I-VAL_ADR', 'L-VAL_ADR', 'U-FLD_RTW',
'U-VAL_RTW', 'U-FLD_KLH', 'U-VAL_KLH', 'U-FLD_KCM', 'U-VAL_KCM',
'U-FLD_RLG', 'U-VAL_RLG', 'B-FLD_KWN', 'L-FLD_KWN', 'B-VAL_KWN',
'L-VAL_KWN', 'U-FLD_KRJ', 'U-VAL_KRJ', 'U-FLD_WRG', 'U-VAL_WRG',
'B-FLD_BLK', 'L-FLD_BLK', 'B-VAL_BLK', 'L-VAL_BLK', 'U-VAL_SGP',
'U-VAL_SGD', 'B-VAL_KAB', 'L-VAL_KAB', 'U-VAL_NAMA', 'B-VAL_KLH',
'L-VAL_KLH', 'B-VAL_KRJ', 'I-VAL_KRJ', 'L-VAL_KRJ', 'B-VAL_SGP',
'L-VAL_SGP', 'I-VAL_TTL', 'L-VAL_KCM', 'B-VAL_KCM', 'U-VAL_KWN',
'U-VAL_PROV', 'I-VAL_NAMA', 'I-VAL_PROV', 'I-VAL_KAB', 'I-VAL_KCM',
'I-VAL_SGP', 'U-VAL_ADR', 'I-VAL_KLH', 'O'
]
LABEL2INDEX = dict((label,idx) for idx, label in enumerate(LABELS))
INDEX2LABEL = dict((idx, label) for idx, label in enumerate(LABELS))
NUM_LABELS = len(LABELS)
def __init__(self, dataset_path, tokenizer, *args, **kwargs):
self.data = self.load_dataset(dataset_path)
self.tokenizer = tokenizer
def load_dataset(self, path):
dframe = pd.read_csv(path)
dataset, sentence, seq_label = [], [], []
length_sentence = len(dframe.sentence_idx.unique())
for idx in range(length_sentence):
sframe = dframe[dframe.sentence_idx == idx]
for sidx in range(len(sframe)):
line = sframe.iloc[sidx]
word = str(line.word)
label = str(line.tag)
sentence.append(word)
seq_label.append(self.LABEL2INDEX[label])
dataset.append({
'sentence': sentence,
'seq_label': seq_label
})
sentence, seq_label = [], []
return dataset
def __getitem__(self, index):
data = self.data[index]
sentence, seq_label = data['sentence'], data['seq_label']
# Add CLS token
subwords = [self.tokenizer.cls_token_id]
subword_to_word_indices = [-1] # For CLS
# Add subwords
for word_idx, word in enumerate(sentence):
subword_list = self.tokenizer.encode(word, add_special_tokens=False)
subword_to_word_indices += [word_idx for i in range(len(subword_list))]
subwords += subword_list
# Add last SEP token
subwords += [self.tokenizer.sep_token_id]
subword_to_word_indices += [-1]
return np.array(subwords), np.array(subword_to_word_indices), np.array(seq_label), data['sentence']
def __len__(self):
return len(self.data)
class NerDataLoader(DataLoader):
def __init__(self, max_seq_len=512, *args, **kwargs):
super(NerDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = self._collate_fn
self.max_seq_len = max_seq_len
def _collate_fn(self, batch):
batch_size = len(batch)
max_seq_len = max(map(lambda x: len(x[0]), batch))
max_seq_len = min(self.max_seq_len, max_seq_len)
max_tgt_len = max(map(lambda x: len(x[2]), batch))
subword_batch = np.zeros((batch_size, max_seq_len), dtype=np.int64)
mask_batch = np.zeros((batch_size, max_seq_len), dtype=np.float32)
subword_to_word_indices_batch = np.full((batch_size, max_seq_len), -1, dtype=np.int64)
seq_label_batch = np.full((batch_size, max_tgt_len), -100, dtype=np.int64)
seq_list = []
for i, (subwords, subword_to_word_indices, seq_label, raw_seq) in enumerate(batch):
subwords = subwords[:max_seq_len]
subword_to_word_indices = subword_to_word_indices[:max_seq_len]
subword_batch[i,:len(subwords)] = subwords
mask_batch[i,:len(subwords)] = 1
subword_to_word_indices_batch[i,:len(subwords)] = subword_to_word_indices
seq_label_batch[i,:len(seq_label)] = seq_label
seq_list.append(raw_seq)
return subword_batch, mask_batch, subword_to_word_indices_batch, seq_label_batch, seq_list
dataset_path = 'data/idcard/ktp_ner_dataset.csv'
pretrained_model = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
trainset = NerIDCardDataset(dataset_path, tokenizer)
# NerIDCardDataset.LABEL2INDEX
for subwords, subword_to_word_indices, seq_label, sentence in trainset:
print(subwords, len(subwords))
print(subword_to_word_indices, len(subword_to_word_indices))
print(seq_label, len(seq_label))
print(sentence, len(sentence))
break
loader = NerDataLoader(dataset=trainset, batch_size=32, num_workers=0)
# %%time
# for i, (subwords, mask, subword_to_word_indices, seq_label, seq_list) in enumerate(loader):
# print(subwords, mask, subword_to_word_indices, seq_label, seq_list)
# if i == 2:
# break
```
| github_jupyter |
# 20. Transfer Learning with Inception v3
```
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import models
import torchvision.utils
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## 20.1 Prepare Data
```
# https://pytorch.org/docs/stable/torchvision/models.html
# https://github.com/pytorch/vision/tree/master/torchvision/models
# 미리 사용할 모델의 Input 파악 필수!
train_transform = transforms.Compose([
transforms.RandomResizedCrop(299),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), # ToTensor : [0, 255] -> [0, 1]
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
test_transform = transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(), # ToTensor : [0, 255] -> [0, 1]
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_data = dsets.ImageFolder('data/squir_chip_data/train/', train_transform)
test_data = dsets.ImageFolder('data/squir_chip_data/val/', test_transform)
batch_size = 5
train_loader = DataLoader(train_data,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(test_data,
batch_size=batch_size,
shuffle=True)
def imshow(img, title):
img = torchvision.utils.make_grid(img, normalize=True)
npimg = img.numpy()
fig = plt.figure(figsize = (5, 15))
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.title(title)
plt.axis('off')
plt.show()
dataiter = iter(train_loader)
images, labels = dataiter.next()
imshow(images, [train_data.classes[i] for i in labels])
```
## 20.2 Define Model
```
model = models.inception_v3(pretrained=True)
model
model.aux_logits = False
for parameter in model.parameters():
parameter.requires_grad = False
model.fc = nn.Sequential(
nn.Linear(model.fc.in_features, 10),
nn.Linear(10, 2)
)
model = model.cuda()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
```
## 20.3 Train Model
```
num_epochs = 30
for epoch in range(num_epochs):
total_batch = len(train_data)//batch_size
for i, (batch_images, batch_labels) in enumerate(train_loader):
X = batch_images.cuda()
Y = batch_labels.cuda()
pre = model(X)
cost = loss(pre, Y)
optimizer.zero_grad()
cost.backward()
optimizer.step()
if (i+1) % 5 == 0:
print('Epoch [%d/%d], lter [%d/%d] Loss: %.4f'
%(epoch+1, num_epochs, i+1, total_batch, cost.item()))
```
## 20.4 Test Model
```
model.eval()
correct = 0
total = 0
for images, labels in test_loader:
images = images.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
print('Accuracy of test images: %f %%' % (100 * float(correct) / total))
classes = ["Squirrel", "Chipmunk"]
images, labels = iter(test_loader).next()
outputs = model(images.cuda())
_, predicted = torch.max(outputs.data, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(5)))
title = (' '.join('%5s' % classes[labels[j]] for j in range(5)))
imshow(torchvision.utils.make_grid(images, normalize=True), title)
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Reducer/min_max_reducer.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/min_max_reducer.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Reducer/min_max_reducer.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/min_max_reducer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
```
import numpy as np
from pycalphad import Model, Database, calculate, equilibrium
from pycalphad.core.compiled_model import CompiledModel
import pycalphad.variables as v
from pycalphad.tests.datasets import ISSUE43_TDB
dbf = Database(ISSUE43_TDB)
models = {key: CompiledModel(dbf, ['AL', 'NI', 'CR', 'VA'], key, _debug=True) for key in dbf.phases.keys()}
eq = equilibrium(dbf, ['AL', 'NI', 'CR', 'VA'], ['FCC_A1', 'GAMMA_PRIME'],
{v.X('AL'): .1246, v.X('CR'): 0.6, v.T: 1273, v.P: 101325},
verbose=True, model=models)
eq
eq.GM.values
(eq.X * eq.NP).sum(axis=-2)
sum(np.squeeze(eq.MU.values) * [0.1246, 0.6, 1-(0.6+0.1246)])
eq.Y
```
Hessian mismatch
Not equal to tolerance rtol=1e-07, atol=0
(mismatch 53.125%)
x: array([[ -0.000000e+00, -0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00],
[ -0.000000e+00, -5.476986e-02, -8.690206e+01, -5.472656e+01,...
y: array([[ 0.000000e+00, -6.635584e+01, -1.515135e+05, 0.000000e+00,
-2.664257e+05, 0.000000e+00, 0.000000e+00, 0.000000e+00],
[ 0.000000e+00, -6.980498e+01, -2.664257e+05, -7.424881e+01,...
[[ 0.00000000e+00 -6.63558420e+01 -1.51513479e+05 0.00000000e+00
-2.66425667e+05 0.00000000e+00 0.00000000e+00 0.00000000e+00]
[ 0.00000000e+00 -6.97502143e+01 -2.66338765e+05 -1.95222520e+01
-1.02480385e+05 1.25225654e+02 0.00000000e+00 0.00000000e+00]
[ 0.00000000e+00 1.26532444e+01 -7.03144582e+04 -1.22393038e+05
-4.33049259e+04 2.66505973e+05 0.00000000e+00 0.00000000e+00]
[ 0.00000000e+00 -8.49995534e+01 2.90506364e+04 -3.67187926e+04
-2.68563738e+05 1.02556891e+05 0.00000000e+00 0.00000000e+00]
[ 0.00000000e+00 7.53462656e+01 1.22406719e+05 2.21757348e+04
-1.97110052e+04 1.94854455e+05 0.00000000e+00 0.00000000e+00]
[ 0.00000000e+00 5.88698116e+01 3.00750789e+05 -9.22638253e+04
1.94854455e+05 3.52309661e+05 0.00000000e+00 0.00000000e+00]
[ 0.00000000e+00 7.03350099e+04 -2.48465473e+03 0.00000000e+00
0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00]
[ -2.72785265e-02 -2.91090718e+04 -1.02555731e+05 -1.39726116e+02
0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00]]
('DOF:', array([ 1.01325000e+05, 1.27300000e+03, 1.91466366e-01,
4.29294845e-01, 3.79238789e-01, 1.00000000e+00]))
[['AL', 'CR', 'NI'], ['VA']]
```
mod = Model(dbf, ['AL', 'CR', 'NI', 'VA'], 'FCC_A1')
mod.GM.diff(v.T, v.P)
```
| github_jupyter |
# FloPy
### SWI2 Example 1. Rotating Interface
This example problem is the first example problem in the SWI2 documentation (http://pubs.usgs.gov/tm/6a46/) and simulates transient movement of a freshwater\seawater interface separating two density zones in a two-dimensional vertical plane. The problem domain is 250 m long, 40 m high, and 1 m wide. The aquifer is confined, storage changes are not considered (all MODFLOW stress periods are steady-state), and the top and bottom of the aquifer are horizontal and impermeable.
The domain is discretized into 50 columns, 1 row, and 1 layer, with respective cell dimensions of 5 m (`DELR`), 1 m (`DELC`), and 40 m. A constant head of 0 m is specified for column 50. The hydraulic conductivity is 2 m/d and the effective porosity (`SSZ`) is 0.2. A flow of 1 m$^3$/d of seawater is specified in column 1 and causes groundwater to flow from left to right
in the model domain.
The domain contains one freshwater zone and one seawater zone, separated by an active `ZETA` surface between the zones (`NSRF=1`) that approximates the 50-percent seawater salinity contour. A 400-day period is simulated using a constant time step of 2 days. Fluid density is represented using the stratified option (`ISTRAT=1`) and the elevation of the interface is output every 100 days (every 50 time steps). The densities, $\rho$, of the freshwater and saltwater are 1,000 and 1,025 kg/m$^3$, respectively. Hence, the dimensionless densities, $\nu$, of the freshwater and saltwater are 0.0 and 0.025, respectively. The maximum slope of the toe and tip is specified as 0.2 (`TOESLOPE=TIPSLOPE=0.2`), and default tip/toe parameters are used (`ALPHA=BETA=0.1`). Initially, the interface is at a 45$^{\circ}$ angle from (x,z) = (80,0) to (x,z) = (120,-40). The source/sink terms (`ISOURCE`) are specified to be freshwater everywhere (`ISOURCE=1`) except in cell 1 where saltwater enters the model and `ISOURCE` equals 2. A comparison of results for SWI2 and the exact Dupuit solution of Wilson and Sa Da Costa (1982) are presented below. The constant flow from left to right results in an average velocity of 0.125 m/d. The exact Dupuit solution is a rotating straight interface of which the center moves to the right with this velocity
Import `numpy` and `matplotlib`, set all figures to be inline, import `flopy.modflow` and `flopy.utils`.
```
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
```
Define model name of your model and the location of MODFLOW executable. All MODFLOW files and output will be stored in the subdirectory defined by the workspace. Create a model named `ml` and specify that this is a MODFLOW-2005 model.
```
modelname = 'swiex1'
#Set name of MODFLOW exe
# assumes executable is in users path statement
exe_name = 'mf2005'
if platform.system() == 'Windows':
exe_name = 'mf2005.exe'
workspace = os.path.join('data')
#make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
ml = flopy.modflow.Modflow(modelname, version='mf2005', exe_name=exe_name, model_ws=workspace)
```
Define the number of layers, rows and columns, and the cell size along the rows (`delr`) and along the columns (`delc`). Then create a discretization file. Specify the top and bottom of the aquifer. The heads are computed quasi-steady state (hence a steady MODFLOW run) while the interface will move. There is one stress period with a length of 400 days and 200 steps (so one step is 2 days).
```
nlay = 1
nrow = 1
ncol = 50
delr = 5.
delc = 1.
nper, perlen, nstp = 1, 400., 200
discret = flopy.modflow.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=0, botm=-40.0,
steady=True, nper=nper, perlen=perlen, nstp=nstp)
```
All cells are active (`ibound=1`), while the last cell is fixed head (`ibound=-1`). The starting values of the head are not important, as the heads are computed every time with a steady run.
```
ibound = np.ones((nrow, ncol))
ibound[0, -1] = -1
bas = flopy.modflow.ModflowBas(ml, ibound=ibound, strt=0.0)
```
Define the hydraulic conductivity. The aquifer is confined (`laytype=0`) and the intercell hydraulic conductivity is the harmonic meand (`layavg=0`).
```
lpf = flopy.modflow.ModflowLpf(ml, hk=2., laytyp=0, layavg=0)
```
Inflow on the right side of the model is 1 m$^3$/d (layer 0, row 0, column 0, discharge 1)
```
wel = flopy.modflow.ModflowWel(ml, stress_period_data = {0:[[0, 0, 0, 1]]} )
```
Define the output control to save heads and interface every 50 steps, and define the pcg solver with default arguments.
```
spd = {}
for istp in range(49, nstp+1, 50):
spd[(0, istp)] = ['save head', 'print budget']
spd[(0, istp+1)] = []
oc = flopy.modflow.ModflowOc(ml, stress_period_data=spd)
pcg = flopy.modflow.ModflowPcg(ml)
```
The intial interface is straight. The isource is one everywhere (inflow and outflow is fresh (zone 1)) except for the first cell (index=0) which has saltwater inflow (zone 2).
```
z = np.zeros((nrow, ncol), float)
z[0, 16:24] = np.arange(-2.5, -40, -5)
z[0, 24:] = -40
z = [z] # zeta needs to be
isource = np.ones((nrow, ncol), int)
isource[0, 0] = 2
#
swi = flopy.modflow.ModflowSwi2(ml, nsrf=1, istrat=1,
toeslope=0.2, tipslope=0.2, nu=[0, 0.025],
zeta=z, ssz=0.2, isource=isource,
nsolver=1, iswizt=55)
```
Write the MODFLOW input files and run the model
```
ml.write_input()
ml.run_model(silent=True)
```
Load the head and zeta data from the file
```
# read model heads
hfile = flopy.utils.HeadFile(os.path.join(ml.model_ws, modelname+'.hds'))
head = hfile.get_alldata()
# read model zeta
zfile = flopy.utils.CellBudgetFile(os.path.join(ml.model_ws, modelname+'.zta'))
kstpkper = zfile.get_kstpkper()
zeta = []
for kk in kstpkper:
zeta.append(zfile.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta = np.array(zeta)
```
Make a graph and add the solution of Wilson and Sa da Costa
```
plt.figure(figsize=(16,6))
# define x-values of xcells and plot interface
x = np.arange(0, ncol*delr, delr) + delr/2.
label = ['SWI2','_','_','_'] # labels with an underscore are not added to legend
for i in range(4):
zt = np.ma.masked_outside(zeta[i,0,0,:], -39.99999, -0.00001)
plt.plot(x, zt, 'r-', lw=1,
zorder=10, label=label[i])
# Data for the Wilson - Sa da Costa solution
k = 2.0
n = 0.2
nu = 0.025
H = 40.0
tzero = H * n / (k * nu) / 4.0
Ltoe = np.zeros(4)
v = 0.125
t = np.arange(100, 500, 100)
label = ['Wilson and Sa Da Costa (1982)','_','_','_'] # labels with an underscore are not added to legend
for i in range(4):
Ltoe[i] = H * np.sqrt(k * nu * (t[i] + tzero) / n / H)
plt.plot([100 - Ltoe[i] + v * t[i], 100 + Ltoe[i] + v * t[i]], [0, -40], '0.75',
lw=8, zorder=0, label=label[i])
# Scale figure and add legend
plt.axis('scaled')
plt.xlim(0, 250)
plt.ylim(-40, 0)
plt.legend(loc='best');
```
Use ModelCrossSection plotting class and plot_surface() method to plot zeta surfaces.
```
fig = plt.figure(figsize=(16, 3))
ax = fig.add_subplot(1, 1, 1)
modelxsect = flopy.plot.PlotCrossSection(model=ml, line={'Row': 0})
label = ['SWI2','_','_','_']
for k in range(zeta.shape[0]):
modelxsect.plot_surface(zeta[k, :, :, :], masked_values=[0, -40.],
color='red', lw=1, label=label[k])
linecollection = modelxsect.plot_grid()
ax.set_title('ModelCrossSection.plot_surface()')
# Data for the Wilson - Sa da Costa solution
k = 2.0
n = 0.2
nu = 0.025
H = 40.0
tzero = H * n / (k * nu) / 4.0
Ltoe = np.zeros(4)
v = 0.125
t = np.arange(100, 500, 100)
label = ['Wilson and Sa Da Costa (1982)','_','_','_'] # labels with an underscore are not added to legend
for i in range(4):
Ltoe[i] = H * np.sqrt(k * nu * (t[i] + tzero) / n / H)
ax.plot([100 - Ltoe[i] + v * t[i], 100 + Ltoe[i] + v * t[i]], [0, -40], 'blue',
lw=1, zorder=0, label=label[i])
# Scale figure and add legend
ax.axis('scaled')
ax.set_xlim(0, 250)
ax.set_ylim(-40, 0)
ax.legend(loc='best');
```
Use ModelCrossSection plotting class and plot_fill_between() method to fill between zeta surfaces.
```
fig = plt.figure(figsize=(16, 3))
ax = fig.add_subplot(1, 1, 1)
modelxsect = flopy.plot.PlotCrossSection(model=ml, line={'Row': 0})
modelxsect.plot_fill_between(zeta[3, :, :, :])
linecollection = modelxsect.plot_grid()
ax.set_title('ModelCrossSection.plot_fill_between()');
```
Convert zeta surfaces to relative seawater concentrations
```
X, Y = np.meshgrid(x, [0, -40])
zc = flopy.plot.SwiConcentration(model=ml)
conc = zc.calc_conc(zeta={0:zeta[3,:,:,:]}) / 0.025
print(conc[0, 0, :])
v = np.vstack((conc[0, 0, :], conc[0, 0, :]))
plt.imshow(v, extent=[0, 250, -40, 0], cmap='Reds')
cb = plt.colorbar(orientation='horizontal')
cb.set_label('percent seawater');
plt.contour(X, Y, v, [0.25, 0.5, 0.75], linewidths=[2, 1.5, 1], colors='black');
```
| github_jupyter |
```
def bingo(board,targetNum):
for line in board:
for i, num in enumerate(line):
if num==targetNum:
line[i]=-1
return board
def isWin(board):
for i in range(5): #rows
if board[i][0]!=-1:
continue
else:
allEqual=True
for j in range(5):
if board[i][j]!=-1:
allEqual=False
break
if allEqual:
return True
for j in range(5): #cols
if board[0][j]!=-1:
continue
else:
allEqual=True
for i in range(5):
if board[i][j]!=-1:
allEqual=False
break
if allEqual:
return True
return False
def sumOfNonTarget(board):
s=0
for line in board:
for num in line:
if num!=-1:
s+=num
return s
with open('1.txt') as f:
stream=list(map(int,f.readline().strip().split(',')))
print(stream)
boards=[]
while f.readline()!='':
board=[]
for lineId in range(5):
line=list(map(int,f.readline().strip().split()))
board.append(line)
boards.append(board)
# print(len(boards))
# print(boards[0])
try:
for num in stream:
for board in boards:
bingo(board,num)
if isWin(board):
print(sumOfNonTarget(board)*num)
raise StopIteration
except StopIteration:
pass
def bingo(board,targetNum):
for line in board:
for i, num in enumerate(line):
if num==targetNum:
line[i]=-1
return board
def isWin(board):
for i in range(5): #rows
if board[i][0]!=-1:
continue
else:
allEqual=True
for j in range(5):
if board[i][j]!=-1:
allEqual=False
break
if allEqual:
return True
for j in range(5): #cols
if board[0][j]!=-1:
continue
else:
allEqual=True
for i in range(5):
if board[i][j]!=-1:
allEqual=False
break
if allEqual:
return True
return False
def sumOfNonTarget(board):
s=0
for line in board:
for num in line:
if num!=-1:
s+=num
return s
with open('1.txt') as f:
stream=list(map(int,f.readline().strip().split(',')))
print(stream)
boards=[]
while f.readline()!='':
board=[]
for lineId in range(5):
line=list(map(int,f.readline().strip().split()))
board.append(line)
boards.append(board)
# print(len(boards))
# print(boards[0])
winned=[0]*len(boards)
out=[]
for num in stream:
for i, board in enumerate(boards):
bingo(board,num)
if isWin(board) and winned[i]==0:
# print(num,i)
# print(sumOfNonTarget(board)*num)
out=sumOfNonTarget(board)*num
winned[i]=1
if sum(winned)==len(boards):
break
print(out)
```
| github_jupyter |
# Deconvolution Validation
Parameterized notebook to analyze a single dataset in a comparison between Flowdec and DeconvolutionLab2
```
import tempfile, os, warnings, timeit, math
import pandas as pd
import numpy as np
import papermill as pm
import matplotlib.pyplot as plt
import plotnine as pn
import io as pyio
from skimage import io
from flowdec import data as fd_data
from flowdec import psf as fd_psf
from flowdec.nb import utils as fd_nb_utils
from flowdec import restoration as fd_restoration
from skimage.exposure import rescale_intensity
from skimage.measure import compare_ssim, compare_psnr, compare_nrmse
from scipy.stats import describe
from collections import OrderedDict
```
<hr>
### Parameters
```
# Default Parameters
n_iter = 25
dl2_path = os.path.join(
os.path.expanduser('~'), 'repos', 'misc', 'DeconvolutionLab2',
'target', 'DeconcolutionLab2_-0.1.0-SNAPSHOT.jar')
downsample_factor = None
crop_slice = None
# Required Parameters
dataset_name = None
# Parameters
crop_slice = "(slice(48, 80), slice(96, 160), slice(192, 320))"
dataset_name = "microtubules"
n_iter = 25
# # Debugging
# dataset_name = 'bars'
# downsample_factor = .25
# crop_slice = '(slice(None), slice(None), slice(None))'
# dataset_name = 'celegans-dapi'
# downsample_factor = None
# crop_slice = '(slice(39, 65), slice(300, 600), slice(300, 600))'
# dataset_name = 'microtubules'
# downsample_factor = None
# crop_slice = '(slice(48, 80), slice(96, 160), slice(192, 320))'
assert dataset_name, 'Must set "dataset_name" parameter'
if crop_slice:
crop_slice = eval(crop_slice)
```
<hr>
### Dataset Prep
```
def prep(acq):
if crop_slice:
print('Applying crop slices {}'.format(crop_slice))
acq = acq.apply(lambda v: v[crop_slice])
if downsample_factor:
print('Downsampling dataset (factor = {})'.format(downsample_factor))
acq = acq.apply(lambda v: rescale_intensity(v.astype(np.float32), out_range=(-1., 1.)))
acq = fd_data.downsample_acquisition(acq, downsample_factor)
acq = acq.apply(lambda v: rescale_intensity(v.astype(np.float32), out_range=np.uint16).astype(np.uint16))
return acq
# Load dataset and run prep function to convert to uint16 and crop/downsample as configured
if dataset_name.startswith('celegans'):
acq = fd_data.load_celegans_channel(dataset_name.split('-')[1].upper())
else:
acq = eval('fd_data.load_' + dataset_name + '()')
print('Preparing raw dataset with shapes/types:')
print(acq.shape())
print(acq.dtype())
print()
acq = prep(acq)
print('\nPrepared dataset shapes/types:')
print(acq.shape())
print(acq.dtype())
print('\nDataset stats:')
print('\n'.join(map(str, acq.stats().items())))
# Visualize various projections/rotations of the volume to be deconvolved
fd_nb_utils.plot_rotations(acq.data)
```
<hr>
### Run Deconvolution
```
def to_uint(img):
tinfo = np.iinfo(np.uint16)
return img.clip(tinfo.min, tinfo.max).astype(np.uint16)
def run_flowdec(data, kernel, **kwargs):
algo = fd_restoration.RichardsonLucyDeconvolver(data.ndim, **kwargs)
acq = fd_data.Acquisition(data=data, kernel=kernel)
img = algo.initialize().run(acq, niter=n_iter).data
return to_uint(img)
def run_dl2(data, kernel, algo='RL {}'.format(n_iter), exargs='-constraint nonnegativity'):
# Generate temporary files to store image data within
data_file = tempfile.mktemp('.tif', 'data-')
kernel_file = tempfile.mktemp('.tif', 'kernel-')
output_file = tempfile.mktemp('', 'output-')
# Ignore low-contrast image warnings from skimage
with warnings.catch_warnings():
warnings.simplefilter("ignore")
io.imsave(data_file, acq.data)
io.imsave(kernel_file, acq.kernel)
# Setup system call to execute DL2 CLI
dl2_cmd = "java -Xmx32G -cp {jar} DeconvolutionLab2 Run -image file {data}"\
" -psf file {psf} -algorithm {algo} {exargs} -out stack {output_file} -path {output_path}"\
.format(
jar=dl2_path, data=data_file, psf=kernel_file, algo=algo, exargs=exargs,
output_file=os.path.basename(output_file), output_path=os.path.dirname(output_file)
)
!$dl2_cmd
img = io.imread(output_file + '.tif')
return to_uint(img)
acq_decon = OrderedDict()
%%capture
# Non-negative constraint
acq_decon[('dl2', 'rl-npad')] = run_dl2(acq.data, acq.kernel)
%%capture
# Power of 2 padding, non-negative constraint
acq_decon[('dl2', 'rl-wpad')] = run_dl2(acq.data, acq.kernel, exargs='-pad E2 E2 1 1 -constraint nonnegativity')
%%capture
# Naive-inverse Filtering
acq_decon[('dl2', 'nif')] = run_dl2(acq.data, acq.kernel, algo='NIF')
%%capture
# Regularized Richardson Lucy
acq_decon[('dl2', 'rltv')] = run_dl2(acq.data, acq.kernel, algo='RLTV 10 0.1')
%%capture
# Regularized Inverse Filtering
acq_decon[('dl2', 'rif')] = run_dl2(acq.data, acq.kernel, algo='RIF 0.001')
%%capture
# Landweber
acq_decon[('dl2', 'lw')] = run_dl2(acq.data, acq.kernel, algo='LW 25 1.0')
# Emulate DeconvolutionLab2 behavior with no padding
acq_decon[('flowdec', 'rl-npad')] = run_flowdec(acq.data, acq.kernel, start_mode='input', pad_mode='none')
# Emulate DeconvolutionLab2 behavior w/ padding noting that paddings must be with 0s rather
# than reflection of images (DL2 doesn't seem to support anything other than 0 padding)
acq_decon[('flowdec', 'rl-wpad')] = run_flowdec(acq.data, acq.kernel, start_mode='input', pad_mode='log2',
pad_min=[1,1,1], pad_fill='constant')
# Also include default flowdec settings w/ small minimum padding
acq_decon[('flowdec', 'rl-default')] = run_flowdec(acq.data, acq.kernel, pad_min=[1,1,1])
# Ensure that all results are of the same type
unique_types = {k: img.dtype for k, img in acq_decon.items()}
assert len(np.unique(unique_types.values())) == 1, \
'Results have differing data types; Data type by result: {}'.format(unique_types)
print('Data type of all results:', list(unique_types.values())[0])
```
<hr>
### Visualize Results
```
imgs = [('original', acq.data)]
if acq.actual is not None:
imgs += [('actual', acq.actual)]
imgs += [(k, img) for k, img in acq_decon.items()]
ncols = 2
nrows = math.ceil(len(imgs) / ncols)
fig, axs = plt.subplots(nrows, ncols)
fig.set_size_inches(ncols * 6, nrows * 4)
axs = axs.ravel()
for ax in axs:
ax.axis('off')
for i, (k, img) in enumerate(imgs):
axs[i].imshow(img.max(axis=0))
axs[i].set_title(k)
axs[i].axis('on')
plt.tight_layout()
score_fns = dict(
# Increase ssim window size to avoid obvious corner case w/ nearly completely saturated results
ssim=lambda it, ip: compare_ssim(it, ip, win_size=31),
# Use negative nrmse so that larger scores are better
nrmse=lambda it, ip: -compare_nrmse(it, ip),
psnr=compare_psnr
)
if acq.actual is not None:
comp_type = 'ground_truth'
img_true = acq.actual
else:
comp_type = 'original'
img_true = acq.data
def get_scores(img_pred):
return {k:fn(img_true, img_pred) for k, fn in score_fns.items()}
scores = pd.DataFrame(
{k: get_scores(img) for k, img in acq_decon.items()}
).T
scores.index.names = ['lib', 'algo']
scores = scores.reset_index().assign(comp_type=comp_type)
scores
(
pn.ggplot(
scores.melt(id_vars=['lib', 'algo', 'comp_type']),
pn.aes(x='algo', y='value', fill='lib')
) +
pn.geom_bar(stat='identity', position='dodge') +
pn.facet_wrap('~variable', ncol=1, scales='free') +
pn.ggtitle('Restoration Score Comparisons') +
pn.theme(figure_size=(12, 8))
)
```
<hr>
### Export
```
buf = pyio.StringIO()
scores.to_json(buf, orient='records')
pm.record('scores', buf.getvalue())
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.