code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
from quchem.Hamiltonian_Generator_Functions import *
from quchem.Graph import *
### HAMILTONIAN start
Molecule = 'H2'
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., 0.74))]
basis = 'sto-3g'
### Get Hamiltonian
Hamilt = Hamiltonian_PySCF(Molecule,
run_scf=1, run_mp2=1, run_cisd=1, run_ccsd=1, run_fci=1,
basis=basis,
multiplicity=1,
geometry=geometry) # normally None!
QubitHamiltonian = Hamilt.Get_Qubit_Hamiltonian(threshold=None, transformation='JW')
### HAMILTONIAN end
#####################################
print(QubitHamiltonian)
from quchem.Ansatz_Generator_Functions import *
n_electrons=Hamilt.molecule.n_electrons
n_qubits=Hamilt.molecule.n_qubits
ansatz_obj = Ansatz(n_electrons,n_qubits)
print('JW ground state = ', ansatz_obj.Get_JW_HF_state_in_OCC_basis())
print('BK ground state = ', ansatz_obj.Get_BK_HF_state_in_OCC_basis())
ansatz_obj.Get_ia_and_ijab_terms()
print('ia')
print(ansatz_obj.Sec_Quant_CC_ia_Fermi_ops)
print(ansatz_obj.theta_ia)
print('######')
print('ijab')
print(ansatz_obj.Sec_Quant_CC_ijab_Fermi_ops)
print(ansatz_obj.theta_ijab)
transformation='JW'
ansatz_obj.UCCSD_single_trotter_step(transformation,
List_FermiOps_ia=ansatz_obj.Sec_Quant_CC_ia_Fermi_ops,
List_FermiOps_ijab=ansatz_obj.Sec_Quant_CC_ijab_Fermi_ops)
ansatz_obj.Second_Quant_CC_single_Trot_list_ia
ansatz_obj.Second_Quant_CC_single_Trot_list_ijab
# None Simplified
print('input state', ansatz_obj.Get_JW_HF_state_in_OCC_basis())
UCCSD_ansatz_Q_Circ_obj = Ansatz_Circuit(ansatz_obj.Get_JW_HF_state_in_OCC_basis(),
ansatz_obj.Second_Quant_CC_single_Trot_list_ia,
ansatz_obj.Second_Quant_CC_single_Trot_list_ijab)
UCCSD_ansatz_Q_Circ =UCCSD_ansatz_Q_Circ_obj.Get_Full_HF_UCCSD_QC(Theta_param_list_ia=ansatz_obj.theta_ia,
Theta_param_list_ijab=ansatz_obj.theta_ijab,
ia_first=True)
UCCSD_ansatz_Q_Circ
```
From Helgaker, T., P. Jorgensen,and J. Olsen (2014),Molecularelectronic-structure theory(John Wiley & Sons)
we known
$$H_{2}^{ground} = 0.9939| 1100\rangle - 0.1106| 0011\rangle$$
LOOK at:
PHYS. REV. X, **8**, 031022 (2018)
```
from quchem.Unitary_partitioning_LCU_method import *
Hamiltonian_graph_obj = Openfermion_Hamiltonian_Graph(QubitHamiltonian)
commutativity_flag = 'AC' ## <- defines relationship between sets!!!
plot_graph = False
Graph_colouring_strategy='largest_first'
anti_commuting_sets = Hamiltonian_graph_obj.Get_Clique_Cover_as_QubitOp(commutativity_flag, Graph_colouring_strategy=Graph_colouring_strategy, plot_graph=plot_graph)
anti_commuting_sets
import random
theta_ia_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia))]
theta_ijab_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ijab))]
input_state = ansatz_obj.Get_JW_HF_state_in_OCC_basis()
print(input_state)
UCCSD_ansatz_Q_Circ_obj = Ansatz_Circuit(ansatz_obj.Get_JW_HF_state_in_OCC_basis(),
ansatz_obj.Second_Quant_CC_single_Trot_list_ia,
ansatz_obj.Second_Quant_CC_single_Trot_list_ijab)
UCCSD_ansatz_Q_Circ =UCCSD_ansatz_Q_Circ_obj.Get_Full_HF_UCCSD_QC(
Theta_param_list_ia=theta_ia_random_input,
Theta_param_list_ijab=theta_ijab_random_input,
ia_first=True)
UCCSD_ansatz_Q_Circ
SET_index = 7
N_index = 1
R_uncorrected, Pn, gamma_l = Get_R_op_list(anti_commuting_sets[SET_index], N_index)
R_corrected_Op_list, R_corr_list, ancilla_amplitudes, l1 = absorb_complex_phases(R_uncorrected)
###
full_Q_circuit = Full_Ansatz_and_Quantum_R_circuit(Pn,
R_corrected_Op_list,
R_corr_list,
ancilla_amplitudes,
Hamilt.molecule.n_qubits ,
UCCSD_ansatz_Q_Circ)
full_Q_circuit
N_QUBITS = Hamilt.molecule.n_qubits
def GIVE_ENERGY_lin_alg(theta_ia_ijab):
theta_ia=theta_ia_ijab[:len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia)]
theta_ijab=theta_ia_ijab[len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia):]
ansatz_cirq_circuit = UCCSD_ansatz_Q_Circ_obj.Get_Full_HF_UCCSD_QC(
Theta_param_list_ia=theta_ia,
Theta_param_list_ijab=theta_ijab,
ia_first=True)
VQE_exp_LCU_lin_alg = VQE_Experiment_LCU_UP_lin_alg(anti_commuting_sets,
ansatz_cirq_circuit,
N_QUBITS, # <--- NOTE THIS
N_indices_dict={7:0, 8:1, 9:0, 10:1})
energy = VQE_exp_LCU_lin_alg.Calc_Energy()
return np.array(energy).real
theta_ia_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia))]
theta_ijab_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ijab))]
combined_ia_ijab_random_input=[*theta_ia_random_input, *theta_ijab_random_input]
GIVE_ENERGY_lin_alg(combined_ia_ijab_random_input)
```
## Optimizing
```
from quchem.Scipy_Optimizer import *
GG = Optimizer(GIVE_ENERGY_lin_alg,
combined_ia_ijab_random_input,
args=(),
method='Nelder-Mead',
jac=None,
hess=None,
hessp=None,
bounds=None,
constraints=None,
tol=1e-8,
display_convergence_message=True,
display_steps=True)
GG.get_env(50)
GG.plot_convergence()
plt.show()
Hamilt.molecule.fci_energy
### optimizer
def calc_gradient_ADAM(theta_ijab_list):
grad_list=[]
for index, theta in enumerate(theta_ijab_list):
new_theta_list = theta_ijab_list.copy()
new_theta_list[index] = theta + np.pi/4
Obs_PLUS = GIVE_ENERGY_lin_alg(new_theta_list)
new_theta_list[index] = theta - np.pi/4
Obs_MINUS = GIVE_ENERGY_lin_alg(new_theta_list)
gradient = Obs_PLUS - Obs_MINUS
grad_list.append(gradient)
return np.array(grad_list)
custom_optimizer_DICT = {'learning_rate': 0.1, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-8,
'delta': 1e-8, 'maxfev': 15000}
GG = Optimizer(GIVE_ENERGY_lin_alg,
combined_ia_ijab_random_input,
args=(),
method=_minimize_Adam,
jac=calc_gradient_ADAM,
hess=None,
hessp=None,
bounds=None,
constraints=None,
tol=1e-20,
display_convergence_message=True,
display_steps=True,
custom_optimizer_DICT=custom_optimizer_DICT)
GG.get_env(50)
GG.plot_convergence()
plt.show()
```
# Reduced Ansatz
From PHYS. REV. X, **8**, 031022 (2018):
$$U = e^{-i \theta Y_{0} X_{1} X_{2} X_{3}}$$
- when acting on $| 1100 \rangle_{HF-JW-STATE}$
$$U | \psi_{HF}\rangle = | \psi_{UCCSD}\rangle$$
to do this in Q.C the following circuit is employed:
```
######### Ansatz circuit
from quchem.Simulating_Quantum_Circuit import *
from quchem.Ansatz_Generator_Functions import *
from openfermion.ops import QubitOperator
def H2_ansatz(theta):
HF_circ = [cirq.X.on(cirq.LineQubit(0)), cirq.X.on(cirq.LineQubit(1))]
full_exp_circ_obj = full_exponentiated_PauliWord_circuit(QubitOperator('Y0 X1 X2 X3', -1j), theta)
UCCSD_circ = cirq.Circuit(cirq.decompose_once((full_exp_circ_obj(*cirq.LineQubit.range(full_exp_circ_obj.num_qubits())))))
full_circuit = cirq.Circuit([*HF_circ, *UCCSD_circ.all_operations()])
return full_circuit
H2_ansatz(np.pi)
ciruict = H2_ansatz(np.pi)
len(list(ciruict.all_operations()))
SET_index = 7
N_index = 1
ansatz = H2_ansatz(np.pi)
R_uncorrected, Pn, gamma_l = Get_R_op_list(anti_commuting_sets[SET_index], N_index)
R_corrected_Op_list, R_corr_list, ancilla_amplitudes, l1 = absorb_complex_phases(R_uncorrected)
###
Q_circuit = Full_Q_Circuit(Pn,
R_corrected_Op_list,
R_corr_list,
ancilla_amplitudes,
Hamilt.molecule.n_qubits ,
ansatz)
Q_circuit
# print(Q_circuit.to_qasm())/
list(Q_circuit.all_operations())
list(Q_circuit.all_operations())[-3].qubits
Q_circuit.final_wavefunction().shape
######### Ansatz circuit
from quchem.Simulating_Quantum_Circuit import *
from quchem.Ansatz_Generator_Functions import *
def H2_ansatz(theta):
HF_circ = [cirq.X.on(cirq.LineQubit(0)), cirq.X.on(cirq.LineQubit(1))]
full_exp_circ_obj = full_exponentiated_PauliWord_circuit(QubitOperator('Y0 X1 X2 X3', -1j), theta)
UCCSD_circ = cirq.Circuit(cirq.decompose_once((full_exp_circ_obj(*cirq.LineQubit.range(full_exp_circ_obj.num_qubits())))))
full_circuit = cirq.Circuit([*HF_circ, *UCCSD_circ.all_operations()])
return full_circuit
cirq_circuit = H2_ansatz(0.12)
print(cirq_circuit)
def Calc_E(H, ansatz_circuit):
H_mat = get_sparse_operator(H).todense()
input_state = reduce(np.kron, [np.array([[1],[0]]) for _ in range(4)])
ansatz_unitary = ansatz_circuit.unitary()
ansatz_ket = ansatz_unitary.dot(input_state)
ansatz_bra = ansatz_ket.conj().T
return np.dot(ansatz_bra, H_mat.dot(ansatz_ket)).item(0)
E_list=[]
theta_list = np.arange(0,np.pi*2, 0.1)
for theta in theta_list:
Energy = Calc_E(QubitHamiltonian, H2_ansatz(theta))
E_list.append(Energy)
import matplotlib.pyplot as plt
%matplotlib notebook
fig, ax = plt.subplots()
ax.plot(theta_list, E_list, color='k', label='standard VQE', linestyle='-')
ax.set(xlabel='theta', ylabel='E| / Ha')
# ,title='Scaling of methods')
ax.grid()
plt.legend()
plt.show()
```
| github_jupyter |
# Model Training (Elasticsearch LTR)
We train a LambdaMart model using [RankLib](https://sourceforge.net/p/lemur/wiki/RankLib%20How%20to%20use/) and upload the trained model to Elasticsearch.
```
import json
import os
import requests
DATA_DIR = "../../data"
MODEL_FILE = os.path.join(DATA_DIR, "es_lambdamart_model.txt")
ES_URL = "http://localhost:9200/"
```
## Train Model with RankLib
Command is as follows:
java -jar RankLib-2.1-patched.jar \
-train ../data/es_features_train.txt \
-test ../data/es_features_test.txt \
-validate ../data/es_features_val.txt \
-ranker 6 \
-metric2t NDCG@10 \
-metric2T ERR@10 \
-save ../data/solr_lambdamart_model.txt
Console output is shown below:
[+] General Parameters:
Training data: ../data/es_features_train.txt
Test data: ../data/es_features_test.txt
Validation data: ../data/es_features_val.txt
Feature vector representation: Dense.
Ranking method: LambdaMART
Feature description file: Unspecified. All features will be used.
Train metric: NDCG@10
Test metric: ERR@10
Highest relevance label (to compute ERR): 4
Feature normalization: No
Model file: ../data/es_lambdamart_model.txt
[+] LambdaMART's Parameters:
No. of trees: 1000
No. of leaves: 10
No. of threshold candidates: 256
Learning rate: 0.1
Stop early: 100 rounds without performance gain on validation data
Reading feature file [../data/es_features_train.txt]... [Done.]
(12 ranked lists, 1200 entries read)
Reading feature file [../data/es_features_val.txt]... [Done.]
(3 ranked lists, 300 entries read)
Reading feature file [../data/es_features_test.txt]... [Done.]
(5 ranked lists, 480 entries read)
Initializing... [Done]
---------------------------------
Training starts...
---------------------------------
#iter | NDCG@10-T | NDCG@10-V |
---------------------------------
1 | 0.844 | 0.844 |
2 | 0.8652 | 0.8652 |
3 | 0.8652 | 0.8652 |
4 | 0.8652 | 0.8652 |
5 | 0.8652 | 0.8652 |
6 | 0.8652 | 0.8652 |
7 | 0.8652 | 0.8652 |
8 | 0.8652 | 0.8652 |
9 | 0.8652 | 0.8652 |
10 | 0.8652 | 0.8652 |
11 | 0.8652 | 0.8652 |
12 | 0.8652 | 0.8652 |
13 | 0.8997 | 0.8997 |
14 | 0.8997 | 0.8997 |
15 | 0.9011 | 0.9011 |
16 | 0.9011 | 0.9011 |
17 | 0.9028 | 0.9028 |
18 | 0.9028 | 0.9028 |
19 | 0.9373 | 0.9373 |
20 | 0.9373 | 0.9373 |
21 | 0.9373 | 0.9373 |
22 | 0.9435 | 0.9435 |
23 | 0.9607 | 0.9607 |
24 | 0.9607 | 0.9607 |
25 | 0.978 | 0.978 |
26 | 0.9801 | 0.9801 |
27 | 0.9865 | 0.9865 |
28 | 0.9917 | 0.9917 |
29 | 0.9917 | 0.9917 |
30 | 0.9917 | 0.9917 |
31 | 0.9917 | 0.9917 |
32 | 0.9917 | 0.9917 |
33 | 1.0 | 1.0 |
34 | 1.0 | 1.0 |
35 | 1.0 | 1.0 |
36 | 1.0 | 1.0 |
37 | 1.0 | 1.0 |
38 | 1.0 | 1.0 |
39 | 1.0 | 1.0 |
40 | 1.0 | 1.0 |
41 | 1.0 | 1.0 |
42 | 1.0 | 1.0 |
43 | 1.0 | 1.0 |
44 | 1.0 | 1.0 |
45 | 1.0 | 1.0 |
46 | 1.0 | 1.0 |
47 | 1.0 | 1.0 |
48 | 1.0 | 1.0 |
49 | 1.0 | 1.0 |
50 | 1.0 | 1.0 |
51 | 1.0 | 1.0 |
52 | 1.0 | 1.0 |
53 | 1.0 | 1.0 |
54 | 1.0 | 1.0 |
55 | 1.0 | 1.0 |
56 | 1.0 | 1.0 |
57 | 1.0 | 1.0 |
58 | 1.0 | 1.0 |
59 | 1.0 | 1.0 |
60 | 1.0 | 1.0 |
61 | 1.0 | 1.0 |
62 | 1.0 | 1.0 |
63 | 1.0 | 1.0 |
64 | 1.0 | 1.0 |
65 | 1.0 | 1.0 |
66 | 1.0 | 1.0 |
67 | 1.0 | 1.0 |
68 | 1.0 | 1.0 |
69 | 1.0 | 1.0 |
70 | 1.0 | 1.0 |
71 | 1.0 | 1.0 |
72 | 1.0 | 1.0 |
73 | 1.0 | 1.0 |
74 | 1.0 | 1.0 |
75 | 1.0 | 1.0 |
76 | 1.0 | 1.0 |
77 | 1.0 | 1.0 |
78 | 1.0 | 1.0 |
79 | 1.0 | 1.0 |
80 | 1.0 | 1.0 |
81 | 1.0 | 1.0 |
82 | 1.0 | 1.0 |
83 | 1.0 | 1.0 |
84 | 1.0 | 1.0 |
85 | 1.0 | 1.0 |
86 | 1.0 | 1.0 |
87 | 1.0 | 1.0 |
88 | 1.0 | 1.0 |
89 | 1.0 | 1.0 |
90 | 1.0 | 1.0 |
91 | 1.0 | 1.0 |
92 | 1.0 | 1.0 |
93 | 1.0 | 1.0 |
94 | 1.0 | 1.0 |
95 | 1.0 | 1.0 |
96 | 1.0 | 1.0 |
97 | 1.0 | 1.0 |
98 | 1.0 | 1.0 |
99 | 1.0 | 1.0 |
100 | 1.0 | 1.0 |
101 | 1.0 | 1.0 |
102 | 1.0 | 1.0 |
103 | 1.0 | 1.0 |
104 | 1.0 | 1.0 |
105 | 1.0 | 1.0 |
106 | 1.0 | 1.0 |
107 | 1.0 | 1.0 |
108 | 1.0 | 1.0 |
109 | 1.0 | 1.0 |
110 | 1.0 | 1.0 |
111 | 1.0 | 1.0 |
112 | 1.0 | 1.0 |
113 | 1.0 | 1.0 |
114 | 1.0 | 1.0 |
115 | 1.0 | 1.0 |
116 | 1.0 | 1.0 |
117 | 1.0 | 1.0 |
118 | 1.0 | 1.0 |
119 | 1.0 | 1.0 |
120 | 1.0 | 1.0 |
121 | 1.0 | 1.0 |
122 | 1.0 | 1.0 |
123 | 1.0 | 1.0 |
124 | 1.0 | 1.0 |
125 | 1.0 | 1.0 |
126 | 1.0 | 1.0 |
127 | 1.0 | 1.0 |
128 | 1.0 | 1.0 |
129 | 1.0 | 1.0 |
130 | 1.0 | 1.0 |
131 | 1.0 | 1.0 |
132 | 1.0 | 1.0 |
133 | 1.0 | 1.0 |
134 | 1.0 | 1.0 |
---------------------------------
Finished sucessfully.
NDCG@10 on training data: 1.0
NDCG@10 on validation data: 1.0
---------------------------------
ERR@10 on test data: 2.1856
Model saved to: ../data/es_lambdamart_model.txt
## Upload Trained Model
```
model_def = None
with open(MODEL_FILE, "r") as model_file:
model_def = model_file.read()
data = {
"model": {
"name": "es_lambdamart_model",
"model": {
"type": "model/ranklib",
"definition": model_def
}
}
}
headers = {
"Content-Type": "application/json"
}
resp = requests.post(ES_URL + "_ltr/_featureset/myFeatures/_createmodel",
headers=headers, data=json.dumps(data))
print(resp.text)
```
| github_jupyter |
# Artificial Intelligence Nanodegree
## Convolutional Neural Networks
## Project: Write an Algorithm for a Dog Identification App
---
In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.
The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this IPython notebook.
---
### Why We're Here
In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!).

In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience!
### The Road Ahead
We break the notebook into separate steps. Feel free to use the links below to navigate the notebook.
* [Step 0](#step0): Import Datasets
* [Step 1](#step1): Detect Humans
* [Step 2](#step2): Detect Dogs
* [Step 3](#step3): Create a CNN to Classify Dog Breeds (from Scratch)
* [Step 4](#step4): Use a CNN to Classify Dog Breeds (using Transfer Learning)
* [Step 5](#step5): Create a CNN to Classify Dog Breeds (using Transfer Learning)
* [Step 6](#step6): Write your Algorithm
* [Step 7](#step7): Test Your Algorithm
---
<a id='step0'></a>
## Step 0: Import Datasets
### Import Dog Dataset
In the code cell below, we import a dataset of dog images. We populate a few variables through the use of the `load_files` function from the scikit-learn library:
- `train_files`, `valid_files`, `test_files` - numpy arrays containing file paths to images
- `train_targets`, `valid_targets`, `test_targets` - numpy arrays containing onehot-encoded classification labels
- `dog_names` - list of string-valued dog breed names for translating labels
```
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('dogImages/train')
valid_files, valid_targets = load_dataset('dogImages/valid')
test_files, test_targets = load_dataset('dogImages/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
# print statistics about the dataset
print('There are %d total dog categories.' % len(dog_names))
print('There are %s total dog images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training dog images.' % len(train_files))
print('There are %d validation dog images.' % len(valid_files))
print('There are %d test dog images.'% len(test_files))
```
### Import Human Dataset
In the code cell below, we import a dataset of human images, where the file paths are stored in the numpy array `human_files`.
```
import random
random.seed(8675309)
# load filenames in shuffled human dataset
human_files = np.array(glob("lfw/*/*"))
random.shuffle(human_files)
# print statistics about the dataset
print('There are %d total human images.' % len(human_files))
```
---
<a id='step1'></a>
## Step 1: Detect Humans
We use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory.
In the next code cell, we demonstrate how to use this detector to find human faces in a sample image.
```
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# extract pre-trained face detector
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# load color (BGR) image
img = cv2.imread(human_files[3])
# convert BGR image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces in image
faces = face_cascade.detectMultiScale(gray)
# print number of faces detected in the image
print('Number of faces detected:', len(faces))
# get bounding box for each detected face
for (x,y,w,h) in faces:
# add bounding box to color image
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# display the image, along with bounding box
plt.imshow(cv_rgb)
plt.show()
```
Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter.
In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box.
### Write a Human Face Detector
We can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below.
```
# returns "True" if face is detected in image stored at img_path
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
```
### (IMPLEMENTATION) Assess the Human Face Detector
__Question 1:__ Use the code cell below to test the performance of the `face_detector` function.
- What percentage of the first 100 images in `human_files` have a detected human face?
- What percentage of the first 100 images in `dog_files` have a detected human face?
Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`.
__Answer:__
1. Percentage of the first 100 images in `human_files` have a detected human face is 100%.That's awesome.
2. Percentage of the first 100 images in `dog_files` have a detected human face 12%.That's quite good but not perfect.I was expecting 0%.
```
human_files_short = human_files[:100]
dog_files_short = train_files[:100]
# Do NOT modify the code above this line.
## TODO: Test the performance of the face_detector algorithm
## on the images in human_files_short and dog_files_short.
count_humans_human_images = 0
count_humans_dog_images = 0
for image_path in human_files_short:
count_humans_human_images += face_detector(image_path)
print(count_humans_human_images) # percentage of the first 100 images in human_files have a detected human face.
for image_path in dog_files_short:
count_humans_dog_images += face_detector(image_path)
print(count_humans_dog_images) #What percentage of the first 100 images in dog_files have a detected human face.
```
__Question 2:__ This algorithmic choice necessitates that we communicate to the user that we accept human images only when they provide a clear view of a face (otherwise, we risk having unneccessarily frustrated users!). In your opinion, is this a reasonable expectation to pose on the user? If not, can you think of a way to detect humans in images that does not necessitate an image with a clearly presented face?
__Answer:__
Well,yes our algorithm choice necessitates that we communicate to the user that we accept human images only.It is important to notify user about that otherwise he might provide a dog face and what is our algorithm detected it as a human.He would obvioulsy be frustrated.Well,according to me i would rather use a deep learning CNN model which is trained on many human images which included all rotational and translational invariance so that it is not really required for an image to have clearly presented face.CNN is what i would go for.
---
<a id='step2'></a>
## Step 2: Detect Dogs
In this section, we use a pre-trained [ResNet-50](http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006) model to detect dogs in images. Our first line of code downloads the ResNet-50 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). Given an image, this pre-trained ResNet-50 model returns a prediction (derived from the available categories in ImageNet) for the object that is contained in the image.
```
from keras.applications.resnet50 import ResNet50
# define ResNet50 model
ResNet50_model = ResNet50(weights='imagenet')
```
### Pre-process the Data
When using TensorFlow as backend, Keras CNNs require a 4D array (which we'll also refer to as a 4D tensor) as input, with shape
$$
(\text{nb_samples}, \text{rows}, \text{columns}, \text{channels}),
$$
where `nb_samples` corresponds to the total number of images (or samples), and `rows`, `columns`, and `channels` correspond to the number of rows, columns, and channels for each image, respectively.
The `path_to_tensor` function below takes a string-valued file path to a color image as input and returns a 4D tensor suitable for supplying to a Keras CNN. The function first loads the image and resizes it to a square image that is $224 \times 224$ pixels. Next, the image is converted to an array, which is then resized to a 4D tensor. In this case, since we are working with color images, each image has three channels. Likewise, since we are processing a single image (or sample), the returned tensor will always have shape
$$
(1, 224, 224, 3).
$$
The `paths_to_tensor` function takes a numpy array of string-valued image paths as input and returns a 4D tensor with shape
$$
(\text{nb_samples}, 224, 224, 3).
$$
Here, `nb_samples` is the number of samples, or number of images, in the supplied array of image paths. It is best to think of `nb_samples` as the number of 3D tensors (where each 3D tensor corresponds to a different image) in your dataset!
```
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
```
### Making Predictions with ResNet-50
Getting the 4D tensor ready for ResNet-50, and for any other pre-trained model in Keras, requires some additional processing. First, the RGB image is converted to BGR by reordering the channels. All pre-trained models have the additional normalization step that the mean pixel (expressed in RGB as $[103.939, 116.779, 123.68]$ and calculated from all pixels in all images in ImageNet) must be subtracted from every pixel in each image. This is implemented in the imported function `preprocess_input`. If you're curious, you can check the code for `preprocess_input` [here](https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py).
Now that we have a way to format our image for supplying to ResNet-50, we are now ready to use the model to extract the predictions. This is accomplished with the `predict` method, which returns an array whose $i$-th entry is the model's predicted probability that the image belongs to the $i$-th ImageNet category. This is implemented in the `ResNet50_predict_labels` function below.
By taking the argmax of the predicted probability vector, we obtain an integer corresponding to the model's predicted object class, which we can identify with an object category through the use of this [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a).
```
from keras.applications.resnet50 import preprocess_input, decode_predictions
def ResNet50_predict_labels(img_path):
# returns prediction vector for image located at img_path
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_model.predict(img))
```
### Write a Dog Detector
While looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained ResNet-50 model, we need only check if the `ResNet50_predict_labels` function above returns a value between 151 and 268 (inclusive).
We use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).
```
### returns "True" if a dog is detected in the image stored at img_path
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
```
### (IMPLEMENTATION) Assess the Dog Detector
__Question 3:__ Use the code cell below to test the performance of your `dog_detector` function.
- What percentage of the images in `human_files_short` have a detected dog?
- What percentage of the images in `dog_files_short` have a detected dog?
__Answer:__
1.Percentage of the images in `human_files_short` have a detected dog is 0%.
2.Percentage of the images in `dog_files_short` have a detected dog is 100%.
This is a fantastic job.
```
### TODO: Test the performance of the dog_detector function
### on the images in human_files_short and dog_files_short.
count_dogs_human_images = 0
count_dogs_dog_images = 0
for image_path in human_files_short:
count_dogs_human_images += dog_detector(image_path)
print(count_dogs_human_images) # Percentage of the images in human_files_short have a detected dog
for image_path in dog_files_short:
count_dogs_dog_images += dog_detector(image_path)
print(count_dogs_dog_images) # Percentage of the images in dog_files_short have a detected dog?
```
---
<a id='step3'></a>
## Step 3: Create a CNN to Classify Dog Breeds (from Scratch)
Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 1%. In Step 5 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.
Be careful with adding too many trainable layers! More parameters means longer training, which means you are more likely to need a GPU to accelerate the training process. Thankfully, Keras provides a handy estimate of the time that each epoch is likely to take; you can extrapolate this estimate to figure out how long it will take for your algorithm to train.
We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have great difficulty in distinguishing between a Brittany and a Welsh Springer Spaniel.
Brittany | Welsh Springer Spaniel
- | -
<img src="images/Brittany_02625.jpg" width="100"> | <img src="images/Welsh_springer_spaniel_08203.jpg" width="200">
It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels).
Curly-Coated Retriever | American Water Spaniel
- | -
<img src="images/Curly-coated_retriever_03896.jpg" width="200"> | <img src="images/American_water_spaniel_00648.jpg" width="200">
Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed.
Yellow Labrador | Chocolate Labrador | Black Labrador
- | -
<img src="images/Labrador_retriever_06457.jpg" width="150"> | <img src="images/Labrador_retriever_06455.jpg" width="240"> | <img src="images/Labrador_retriever_06449.jpg" width="220">
We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%.
Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun!
### Pre-process the Data
We rescale the images by dividing every pixel in every image by 255.
```
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
```
### (IMPLEMENTATION) Model Architecture
Create a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:
model.summary()
We have imported some Python modules to get you started, but feel free to import as many modules as you need. If you end up getting stuck, here's a hint that specifies a model that trains relatively fast on CPU and attains >1% test accuracy in 5 epochs:

__Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. If you chose to use the hinted architecture above, describe why you think that CNN architecture should work well for the image classification task.
__Answer:__
1.First i started off with a convolutional layer with 16 filters,window size of 2 and an activation function as `relu`.As this is the first layer,i have given it a `input_shape` parameter as well.I assumed that this layer might fins small things like `edges` etc.
2.Next,to reduce number of features, i added a Max-pooling layer of size 2.
3.Next, i added another convolutional layer with 32 filters with same strides and window size as the first convolutional layer.I assumed this layer might find things like shapes,lines,circles etc.
4.Next, i added a Max-pooling layer with the same reason,to decrease number of parameters so that later, my model won't have any time problem while training.
5.Next, i added another convolutional layer with 64 filters with same strides and window size as the first convolutional layer.I assumed this layer might find pretty complex structures.
6.Next,i added a Max-Pooling layer to decrease number of parameters.
7.Finally i flattened the final layer so i can connect it to a dense layer with 133 nodes.133 nodes is because we have 133 different classes of dongs in our dataset.
8.My last layer has a `softmax` activation function so i will get probablities for each class i.e how likely each class is w.r.t image.
This is about my CNN architecture.I haven't used the one that is used in the above image.Well CNN actually performs very well for image classification tasks.One of the reasons is that it will take account the information that is beside a particular part of an image i.e it will learn how different parts of image that are close are related to each other.This is a lackness in the case of **MLP**
CNN will learn to recognize components of an image (e.g., lines, curves, etc.) and then learn to combine these components to recognize larger structures
```
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(filters=16,kernel_size=2,strides=1,activation='relu',input_shape=(224,224,3)))
model.add(MaxPooling2D(2))
model.add(Conv2D(filters=32,kernel_size=2,strides=1,activation='relu'))
model.add(MaxPooling2D(3))
model.add(Conv2D(filters=64,kernel_size=2,strides=1,activation='relu'))
model.add(MaxPooling2D(3))
model.add(Flatten())
model.add(Dense(133,activation='softmax'))
model.summary()
```
### Compile the Model
```
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
```
### (IMPLEMENTATION) Train the Model
Train your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss.
You are welcome to [augment the training data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), but this is not a requirement.
```
from keras.callbacks import ModelCheckpoint
### TODO: specify the number of epochs that you would like to use to train the model.
epochs = 5
### Do NOT modify the code below this line.
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
```
### Load the Model with the Best Validation Loss
```
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
```
### Test the Model
Try out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 1%.
```
# get index of predicted dog breed for each image in test set
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
```
---
<a id='step4'></a>
## Step 4: Use a CNN to Classify Dog Breeds
To reduce training time without sacrificing accuracy, we show you how to train a CNN using transfer learning. In the following step, you will get a chance to use transfer learning to train your own CNN.
### Obtain Bottleneck Features
```
bottleneck_features = np.load('bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features['train']
valid_VGG16 = bottleneck_features['valid']
test_VGG16 = bottleneck_features['test']
```
### Model Architecture
The model uses the the pre-trained VGG-16 model as a fixed feature extractor, where the last convolutional output of VGG-16 is fed as input to our model. We only add a global average pooling layer and a fully connected layer, where the latter contains one node for each dog category and is equipped with a softmax.
```
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
```
### Compile the Model
```
VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
```
### Train the Model
```
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_targets,
validation_data=(valid_VGG16, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
```
### Load the Model with the Best Validation Loss
```
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
```
### Test the Model
Now, we can use the CNN to test how well it identifies breed within our test dataset of dog images. We print the test accuracy below.
```
# get index of predicted dog breed for each image in test set
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# report test accuracy
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
```
### Predict Dog Breed with the Model
```
from extract_bottleneck_features import *
def VGG16_predict_breed(img_path):
# extract bottleneck features
bottleneck_feature = extract_VGG16(path_to_tensor(img_path))
# obtain predicted vector
predicted_vector = VGG16_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)]
```
---
<a id='step5'></a>
## Step 5: Create a CNN to Classify Dog Breeds (using Transfer Learning)
You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set.
In Step 4, we used transfer learning to create a CNN using VGG-16 bottleneck features. In this section, you must use the bottleneck features from a different pre-trained model. To make things easier for you, we have pre-computed the features for all of the networks that are currently available in Keras:
- [VGG-19](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG19Data.npz) bottleneck features
- [ResNet-50](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz) bottleneck features
- [Inception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz) bottleneck features
- [Xception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogXceptionData.npz) bottleneck features
The files are encoded as such:
Dog{network}Data.npz
where `{network}`, in the above filename, can be one of `VGG19`, `Resnet50`, `InceptionV3`, or `Xception`. Pick one of the above architectures, download the corresponding bottleneck features, and store the downloaded file in the `bottleneck_features/` folder in the repository.
### (IMPLEMENTATION) Obtain Bottleneck Features
In the code block below, extract the bottleneck features corresponding to the train, test, and validation sets by running the following:
bottleneck_features = np.load('bottleneck_features/Dog{network}Data.npz')
train_{network} = bottleneck_features['train']
valid_{network} = bottleneck_features['valid']
test_{network} = bottleneck_features['test']
```
bottleneck_features = np.load('bottleneck_features/DogResnet50Data.npz')
train_ResNet50 = bottleneck_features['train']
valid_ResNet50 = bottleneck_features['valid']
test_ResNet50 = bottleneck_features['test']
```
### (IMPLEMENTATION) Model Architecture
Create a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:
<your model's name>.summary()
__Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem.
__Answer:__
1.Here we are using using transfer learning technique.I am using **ResNet-50** bottleneck features as my input for my first layer which is **Global Avergage Pooling** layer.The reason i used **Global Avergage Pooling** is as said in the classroom lecture i.e to decrease number of parameters.
2.Next i added a dense layer with **133** nodes with **softmax** activation function.I used 133 nodes because we have **133** different breeds of dogs.I used **softmax** activation function so that we get probabilities of each class saying how likely is the dog of a particular breed.
That's it.
```
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
ResNet_model = Sequential()
ResNet_model.add(GlobalAveragePooling2D(input_shape=train_ResNet50.shape[1:]))
ResNet_model.add(Dense(133, activation='softmax'))
ResNet_model.summary()
```
### (IMPLEMENTATION) Compile the Model
```
ResNet_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
```
### (IMPLEMENTATION) Train the Model
Train your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss.
You are welcome to [augment the training data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), but this is not a requirement.
```
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.ResNet50.hdf5',
verbose=1, save_best_only=True)
ResNet_model.fit(train_ResNet50, train_targets,
validation_data=(valid_ResNet50, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
```
### (IMPLEMENTATION) Load the Model with the Best Validation Loss
```
ResNet_model.load_weights('saved_models/weights.best.ResNet50.hdf5')
```
### (IMPLEMENTATION) Test the Model
Try out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 60%.
```
# get index of predicted dog breed for each image in test set
ResNet_predictions = [np.argmax(ResNet_model.predict(np.expand_dims(feature, axis=0))) for feature in test_ResNet50]
# report test accuracy
test_accuracy = 100*np.sum(np.array(ResNet_predictions)==np.argmax(test_targets, axis=1))/len(ResNet_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
```
### (IMPLEMENTATION) Predict Dog Breed with the Model
Write a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan_hound`, etc) that is predicted by your model.
Similar to the analogous function in Step 5, your function should have three steps:
1. Extract the bottleneck features corresponding to the chosen CNN model.
2. Supply the bottleneck features as input to the model to return the predicted vector. Note that the argmax of this prediction vector gives the index of the predicted dog breed.
3. Use the `dog_names` array defined in Step 0 of this notebook to return the corresponding breed.
The functions to extract the bottleneck features can be found in `extract_bottleneck_features.py`, and they have been imported in an earlier code cell. To obtain the bottleneck features corresponding to your chosen CNN architecture, you need to use the function
extract_{network}
where `{network}`, in the above filename, should be one of `VGG19`, `Resnet50`, `InceptionV3`, or `Xception`.
```
from extract_bottleneck_features import *
def ResNet50_predict_breed(img_path):
# extract bottleneck features
bottleneck_feature = extract_Resnet50(path_to_tensor(img_path))
# obtain predicted vector
predicted_vector = ResNet_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)]
```
---
<a id='step6'></a>
## Step 6: Write your Algorithm
Write an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,
- if a __dog__ is detected in the image, return the predicted breed.
- if a __human__ is detected in the image, return the resembling dog breed.
- if __neither__ is detected in the image, provide output that indicates an error.
You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 5 to predict dog breed.
Some sample output for our algorithm is provided below, but feel free to design your own user experience!

### (IMPLEMENTATION) Write your Algorithm
```
def predict_dog_or_human(img_path):
dog_detected = dog_detector(img_path)
human_detected = face_detector(img_path)
if human_detected == True:
print("Hi human!You look like {}".format(ResNet50_predict_breed(img_path)))
elif dog_detected == True:
print("Hi {}".format(ResNet50_predict_breed(img_path)))
else:
print("Error")
```
---
<a id='step7'></a>
## Step 7: Test Your Algorithm
In this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that __you__ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog?
### (IMPLEMENTATION) Test Your Algorithm on Sample Images!
Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images.
__Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm.
__Answer:__
Not really,the model is good at finding if image contains dog or human.It's also good at finding if image contain either of them or not i.e human and dog.
But i think the model is lacking at finding correct breeds for some of the dog breeds whose look pretty similar.Even we can make a mistake in finding correct breed as they are too similar looking.
One possible point i think as an improvement is adding more dense layers so that the model can find more complex differences between breeds that look very similar.
The other possible thing i think will work is , may be i can add same dog images in the set with different color scales so that it can identify two dogs of same breed but different color.
Another improvement i think is **image Augmentation**.I didn't use that in my model.But may be it would have increased accuracy if i used image Augmentation.
Next improvement might be using some other datasets like **imagenet** which are available online and see how that works etc.
```
predict_dog_or_human('images/hackquest.jpg')
predict_dog_or_human('images/doberman.jpeg')
predict_dog_or_human('images/boxer.jpeg')
predict_dog_or_human('images/siberan_husky.jpeg')
predict_dog_or_human('images/Mount_Everest.jpg')
predict_dog_or_human('images/sample_human_output.png')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/athenian-ct-projects/Robinson-Crusoe-Day-AW/blob/master/Robinson_Crusoe_Project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This is for Robinson Cruesoe day.
Written by Alison W.
Press play to start and scroll down to the bottom of the code. You don't need to read the code to understand the game
```
name = input("Enter your nickname: ")
#function that is called when player dies
def death_message():
print("You didn't pick the right choice and now you're dead 😔✋")
#introductory message and first question
def first_function():
print("Hello "+name+"! Welcome to the Robinson Cruesoe Game. You are a voyager in the 1600s. During this game you will be presented with choices. Try to select \nthe answer that will keep you alive. Good Luck!")
#first question prints a "death message" no matter what, but then the game starts for real
print("This is your first voyage! Where would you like to go? Your choices are to journey around Cape Horn or to the Bermuda Triangle")
answertofirstquestion = int(input("Enter the number '1' for Cape Horn and '2' for the Bermuda Triangle: "))
#I used the > sign instead of the = sign
if answertofirstquestion > 1:
print("Oh no! Your ship crashed. What a shame. You managed to make it into a lifeboat though, and after many days floating around on the ocean you get rescued.")
#both options call the second function/choice
second_function()
else:
print("Oh no! Your ship crashed. What a shame. You managed to make it into a lifeboat though, and after many days floating around on the ocean you get rescued.")
second_function()
def second_function():
print("When you make it back to England you manage to convince a rich landowner to finance another voyage. He wants you to captain an expedition to colonize foreign lands.")
shipname = input("Enter your ship name: ")
print(shipname+ " sets sail. On your way you encounter a swanky island with penguins and seals. Do you want to kidnap them?")
#no matter what the player gets abandoned
answertoquestion2 = int(input("Enter the number '1' to kidnap the seals and penguins and '2' to keep going."))
#mini game if option 2 is selected in which the crew ends up dying no matter what
if answertoquestion2 > 1:
print ("It was nice of you to not kidnap seals and penguins but you still crashed.")
mini_function()
else:
print ("Your vegan crew did not align with the morals of kidnapping animals and they abandoned you on the island. Tragic 😪")
fourth_function()
#mini functon game
def mini_function():
print("Your ship has been destroyed and your only chance of rescue is if a ship happens to pass by. You might be able to kill your crew during the night for food.")
answertominigame = int(input("Enter '1' to attempt to kill them or '2' to let them live: "))
if answertominigame > 1:
print("The entire crew dies of Typhoid fever anyway 🥳")
fourth_function()
else:
fifth_function()
def fourth_function():
print("Your new goal of the game is to make it back to England. You are getting lonely on the island when you find a parrot.")
#player is informed that their new goal is to make it back to england
parrotname = str(input("Enter a name for your parrot: "))
#parrot name is displayed in the next print message
print("You scavenge the wrecked ship for supplies with " + str(parrotname)+" and find seeds to grow plants. Do you grow tobacco or corn?")
#charcter dies if tobacco is planted
fourthanswer = int(input("Enter '1' to grow corn or '2' to grow tobacco: "))
if fourthanswer > 1:
death_message()
else:
("Good choice. You have food now.")
fifth_function()
def fifth_function():
#both choices lead to similar fquestions which both meet at function eight
print("You discover a bible on the Island.")
fifthfunctionanswer = int(input("Enter the number '1' to form a cult or '2' to study Christianity: "))
if fifthfunctionanswer > 1:
print ("With the aid of Christianity you are able to find relief in life alone on the island.")
sixth_function()
else:
cultname = str(input("Enter the name of your cult: "))
cultgod = str(input("Enter the god of your cult: "))
seventh_function()
def sixth_function():
#player dies if they attempt to kill the cannibals
print("You encounter cannibals on the island. ")
sixthfunctionanswer = int(input("Enter '1' to kill the cannibals or '2' to teach them the bible: "))
if sixthfunctionanswer > 1:
print("The cannibals join your religion and you guys are having a fun time on the island.")
eighth_function()
else:
print("That was a terrible idea. They were stronger than you. ")
death_message()
def seventh_function():
#very similar to the sixth function
print("You encouunter cannibals on the island.")
seventhfunctionanswer = int(input("Enter '1' to kill the cannibals or '2' to convert them into your cult: "))
if seventhfunctionanswer > 1:
print("The cannibals join your cult and you guys are having a fun time on the island.")
eighth_function()
#correct option leads to the eighth function like in the sixth function
else:
print("That was a terrible idea. They were stronger than you. ")
death_message()
def eighth_function():
print("A working ship passes by and your people manage to imprison the crew. The ship is fully working. ")
eighthfunctionanswer = int(input("Enter '1' to sail back to England or '2' to stay on the island: "))
#game ends if player does not choose to go to england
if eighthfunctionanswer > 1:
print("You live a long life on the island and die happily at the age of 666. You technically did not win, as you didn't make it back to England.")
else:
ninth_function()
def ninth_function():
print("On your way back to England you encounter pirates.")
ninthfunctionanswer = int(input("Enter '1' to fight the pirates or '2' to return to the island: "))
#game ends if player does not make it to england
if ninthfunctionanswer > 1:
print("You live a long life on the island and die happily at the age of 666. You technically did not win, as you didn't make it back to England.")
else:
tenth_function()
def tenth_function():
print("You are now battling the pirates.")
tenthfunctionanswer = int(input("Enter '1' to fire cannons or '2' to attempt to board the ship: "))
#character dies if they attempt to board the ship
if tenthfunctionanswer > 1:
death_message()
else:
eleventh_function()
def eleventh_function():
#while loop, player dies if options 1-3 are chosen
print("You successfully make it back to England. You should purchase a pet.")
x = int(input("Enter '1' to adopt a cat, '2' for a bat, '3' for a fox, '4' for a rabbit, or '5' for a dog: "))
while x < 4:
print("Your pet gave you rabies.")
death_message()
else:
twelfth_function()
def twelfth_function():
#for loop
emojis = ["🖐", "🤯", "😎", "🥵","🥳"]
for y in emojis:
print(y)
print("You won the game!")
#first function is called in order to start the game
first_function()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/BRIJNANDA1979/CNN-Sentinel/blob/master/Understand_band_data_info_using_histogram_and_classifying_pixel_values.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#https://www.earthdatascience.org/courses/use-data-open-source-python/multispectral-remote-sensing/vegetation-indices-in-python/calculate-NDVI-python/
#Sentinel 2 Use Handbook. https://sentinels.copernicus.eu/documents/247904/685211/Sentinel-2_User_Handbook
!pip install rioxarray
!pip install geopandas
import os
import matplotlib.pyplot as plt
import numpy as np
import rioxarray as rxr
import geopandas as gpd
path = '/content/drive/MyDrive/Big/S2A_MSIL2A_20170613T101031_0_55/S2A_MSIL2A_20170613T101031_0_55_B01.tif'
#os.chdir(path)
#data_path = os.path.join("/content/drive/MyDrive/Big/S2A_MSIL2A_20170613T101031_0_55/S2A_MSIL2A_20170613T101031_0_55_B01.tif")
data = rxr.open_rasterio(path)
data.shape
!pip install earthpy
import earthpy as et
import earthpy.spatial as es
import earthpy.plot as ep
ep.plot_bands(data,
title="Bigearthnet Band 1 Raster")
plt.show()
#https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_bands_functionality.html
#Stack all bands of BigEarthNet Data sample one band tiff images
import glob
files = glob.glob(os.path.join('/content/drive/MyDrive/Big/S2A_MSIL2A_20170613T101031_0_55/S2A_MSIL2A_20170613T101031_0_55_B*.tif'))
files.sort()
print("Number of Bands",len(files))
print(files)
print(files[0]) # Band1
print(files[1]) # Band2
print(files[10]) # Band12
#array_stack, meta_data = es.stack(path, nodata=-9999)
```
# New Section
```
print(files[0])
band1= rxr.open_rasterio(files[0])
ep.plot_bands(band1,
title="Bigearthnet Band 1 Raster")
plt.show()
print("The CRS of this data is:", band1.rio.crs)
#Converting EPSG to Proj4 in Python
# Convert to project string using earthpy
proj4 = et.epsg['32634']
print(proj4)
#Spatial Extent
#You can access the spatial extent using the .bounds() attribute in rasterio.
print(band1.rio.bounds())
#Raster Resolution: area covered by 1 pixel on ground e.g 60m * 60m
# What is the x and y resolution for your raster data?
print(band1.rio.resolution())
print("The nodatavalue of your data is:", band1.rio.nodata)
# How many bands / layers does the object have?
print("Number of bands", band1.rio.count)
print("The shape of your data is:", band1.shape)
print('min value:', np.nanmin(band1))
print('max value:', np.nanmax(band1))
import matplotlib.pyplot as plt
f, ax = plt.subplots()
band1.plot.hist(color="purple")
ax.set(title="Distribution of Raster Cell Values Band 1 Data",
xlabel="",
ylabel="Number of Pixels")
plt.show()
print(files[1])
band2= rxr.open_rasterio(files[1])
ep.plot_bands(band2,
title="Bigearthnet Band 2 Raster")
plt.show()
print("The CRS of this data is:", band2.rio.crs)
#Converting EPSG to Proj4 in Python
# Convert to project string using earthpy
proj4 = et.epsg['32634']
print(proj4)
#Spatial Extent
#You can access the spatial extent using the .bounds() attribute in rasterio.
print(band2.rio.bounds())
#Raster Resolution: area covered by 1 pixel on ground e.g 60m * 60m
# What is the x and y resolution for your raster data?
print(band2.rio.resolution())
print("The nodatavalue of your data is:", band2.rio.nodata)
# How many bands / layers does the object have?
print("Number of bands", band2.rio.count)
print("The shape of your data is:", band2.shape)
print('min value:', np.nanmin(band2))
print('max value:', np.nanmax(band2))
import matplotlib.pyplot as plt
f, ax = plt.subplots()
band1.plot.hist(color="purple")
ax.set(title="Distribution of Raster Cell Values Band 2 Data",
xlabel="",
ylabel="Number of Pixels")
plt.show()
#https://rasterio.readthedocs.io/en/latest/api/rasterio.plot.html
#rasterio.plot.reshape_as_image(arr)
#Returns the source array reshaped into the order expected by image processing and visualization software (matplotlib, scikit-image, etc) by swapping the axes order from (bands, rows, columns) to (rows, columns, bands)
print('min value:', np.nanmin(data))
print('max value:', np.nanmax(data))
#https://www.earthdatascience.org/courses/use-data-open-source-python/intro-raster-data-python/raster-data-processing/classify-plot-raster-data-in-python/
import matplotlib.pyplot as plt
f, ax = plt.subplots()
data.plot.hist(color="purple")
ax.set(title="Distribution of Raster Cell Values Data",
xlabel="",
ylabel="Number of Pixels")
plt.show()
bins=[0, 100, 200, 250, 275, 300,350]
f, ax = plt.subplots()
data.plot.hist(color="purple",bins=[0, 100, 200, 250, 275, 300,350])
ax.set(title="Distribution of Raster Cell Values Data",
xlabel="",
ylabel="Number of Pixels")
plt.show()
class_bins = [-np.inf,250,275,300,350,+np.inf]
import xarray as xr
data_class = xr.apply_ufunc(np.digitize,
data,
class_bins)
print(data_class.shape)
#data_class = np.array(data_class[0])
import matplotlib.pyplot as plt
f, ax = plt.subplots()
data_class.plot.hist(color="purple")
ax.set(title="Distribution of Raster Cell Values Data",
xlabel="",
ylabel="Number of Pixels")
plt.show()
#https://www.spatialreference.org/ref/epsg/32634/
#/*EPSG:32634
#WGS 84 / UTM zone 34N (Google it)
#WGS84 Bounds: 18.0000, 0.0000, 24.0000, 84.0000
#Projected Bounds: 166021.4431, 0.0000, 833978.5569, 9329005.1825
#Scope: Large and medium scale topographic mapping and engineering survey.
#Last Revised: June 2, 1995
#Area: World - N hemisphere - 18°E to 24°E - by country*/
#Proj4js.defs["EPSG:32634"] = "+proj=utm +zone=34 +ellps=WGS84 +datum=WGS84 +units=m +no_defs";
print(files[10])
band12= rxr.open_rasterio(files[10])
ep.plot_bands(band12,
title="Bigearthnet Band 12 Raster")
plt.show()
print("The CRS of this data is:", band12.rio.crs)
#Converting EPSG to Proj4 in Python
# Convert to project string using earthpy
proj4 = et.epsg['32634']
print(proj4)
#Spatial Extent
#You can access the spatial extent using the .bounds() attribute in rasterio.
print(band12.rio.bounds())
#Raster Resolution: area covered by 1 pixel on ground e.g 60m * 60m
# What is the x and y resolution for your raster data?
print(band12.rio.resolution())
print("The nodatavalue of your data is:", band12.rio.nodata)
# How many bands / layers does the object have?
print("Number of bands", band12.rio.count)
print("The shape of your data is:", band12.shape)
print('min value:', np.nanmin(band12))
print('max value:', np.nanmax(band12))
import matplotlib.pyplot as plt
f, ax = plt.subplots()
band1.plot.hist(color="purple")
ax.set(title="Distribution of Raster Cell Values Band 12 Data",
xlabel="",
ylabel="Number of Pixels")
plt.show()
```
## New Section : Making Dataframe for min/max values of each bands of 1,2 and 12
```
import pandas as pd
df = pd.DataFrame(columns= ['filename','min','max'])
df.head()
import glob
import os
files_batch=[] #batch of same bands
min=[]
max=[]
mean_min =[]
mean_max =[]
path = '/content/drive/MyDrive/Big'
os.chdir(path)
dirs = os.listdir()
dirs.sort()
print(dirs)
print(len(dirs))
#remove last element of list
del dirs[0]
print(dirs)
print(len(dirs))
step_size = len(dirs)
# Add batch of band1 tif files to files list
path = '/content/drive/MyDrive/Big'
for i in dirs:
s = ""
s = s + path + '/' + str(i) + '/' +'*01.tif'
print(s)
temp = (glob.glob(os.path.join(s)))
files_batch.append(temp[0])
# Fetch Filenames of band 1
print(files_batch,files_batch[0],len(files_batch)) #Batch of Band 1 files
# Add min/max values of band 1 to min/max list
for i in range(0,step_size):
band1= rxr.open_rasterio(files_batch[i])
min.append(np.nanmin(band1))
max.append(np.nanmax(band1))
print(min)
print(max)
mean_min.append(np.mean(min))
mean_max.append(np.mean(max))
#df['B1_min'] = min
#df['B1_max'] = max
#print(df)
# Add batch of band2 tif files to files list
path = '/content/drive/MyDrive/Big'
for i in dirs:
s = ""
s = s + path + '/' + str(i) + '/' +'*02.tif'
print(s)
temp = (glob.glob(os.path.join(s)))
files_batch.append(temp[0])
print(files_batch)
print(files_batch[len(files_batch)-1], len(files_batch))
# Add min/max values of band 2 to min/max list
for i in range(step_size,2*step_size):
band2= rxr.open_rasterio(files_batch[i])
min.append(np.nanmin(band2))
max.append(np.nanmax(band2))
print(min)
print(max)
mean_min.append(np.mean(min))
mean_max.append(np.mean(max))
# Add batch of band 12 tif files to files list
path = '/content/drive/MyDrive/Big'
for i in dirs:
s = ""
s = s + path + '/' + str(i) + '/' +'*12.tif'
print(s)
temp = (glob.glob(os.path.join(s)))
files_batch.append(temp[0])
print(files_batch)
print(files_batch[len(files_batch)-1], len(files_batch))
# Add min/max values of band 12 to min/max list
for i in range(2*step_size,3*step_size):
band2= rxr.open_rasterio(files_batch[i])
min.append(np.nanmin(band2))
max.append(np.nanmax(band2))
print(min)
print(max)
mean_min.append(np.mean(min))
mean_max.append(np.mean(max))
```
# Add files and min/max lists to dataframe
```
print(files_batch)
df['filename'] = files_batch
df['min'] = min
df['max'] = max
df.head()
#print means of min and max values for each band 1 2 and 12
print(mean_min)
print(mean_max)
# Plot histogram
import matplotlib.pyplot as plt
x=np.array(min)
y=np.array(max)
plt.bar(x,y,align='center') # A bar chart
plt.xlabel('Min')
plt.ylabel('Max')
plt.show()
# Plot histogram for mean min and mean max
import matplotlib.pyplot as plt
x=np.array(mean_min)
y=np.array(mean_max)
plt.bar(x,y,align='center') # A bar chart
plt.xlabel('Mean_Min')
plt.ylabel('Mean_Max')
plt.show()
```
### **USE RASTERIO module to open Raster images and read it to Array**
```
band1 = np.array(band1)
band1.shape
print(files)
band2= rxr.open_rasterio(files[1])
band2 = np.array(band2)
band2.shape
band12 = np.array(band12)
band12.shape
print(df['filename'])
files_bands = []
files_bands = df['filename']
print(files_bands[0:6])
# Reading raster geotif files
#https://automating-gis-processes.github.io/CSC18/lessons/L6/reading-raster.html
import rasterio
band1_batch = files_bands[0:6]
print(band1_batch[0])
band1_raster = rasterio.open(band1_batch[0])
print(type(band1_raster))
#Projection
print(band1_raster.crs)
#Affine transform (how raster is scaled, rotated, skewed, and/or translated
band1_raster.transform
band1_raster.meta
#reading raster to array
band1_array = band1_raster.read()
print(band1_array)
stats = []
for band in band1_array:
stats.append({
'mean' : band.mean(),
'min' : band.min(),
'max' : band.max(),
'median': np.median(band)
})
print(stats)
```
# Read all Band1 files and find mean of all 6 Forest class Band1 data
```
print(df['filename'])
files_bands = []
files_bands = df['filename']
print(files_bands[0:6])
# Reading raster geotif files using Rasterio
#https://automating-gis-processes.github.io/CSC18/lessons/L6/reading-raster.html
import rasterio
band1_batch = files_bands[0:6]
print(band1_batch[0])
band1_array=[]
for i in band1_batch:
band1_raster = rasterio.open(i)
band1_array.append(band1_raster.read())
band1_mean=[]
band1_min = []
band1_max = []
print(len(band1_array))
for i in band1_array:
for band in i:
band1_mean.append(band.mean())
band1_min.append(band.min())
band1_max.append(band.max())
print("Band 1 stat for 6 images is :------>")
print(band1_mean)
print(band1_min)
print(band1_max)
# Stat for band 2 images
band2_batch = files_bands[6:12]
print(band2_batch)
band2_array=[]
for i in band2_batch:
band2_raster = rasterio.open(i)
band2_array.append(band2_raster.read())
band2_mean=[]
band2_min = []
band2_max = []
print(len(band2_array))
for i in band2_array:
for band in i:
band2_mean.append(band.mean())
band2_min.append(band.min())
band2_max.append(band.max())
print("Band 2 stat for 6 images is :------>")
print(band2_mean)
print(band2_min)
print(band2_max)
# Stat for band 12 images
band12_batch = files_bands[12:18]
print(band12_batch)
band12_array=[]
for i in band12_batch:
band12_raster = rasterio.open(i)
band12_array.append(band12_raster.read())
band12_mean=[]
band12_min = []
band12_max = []
print(len(band12_array))
for i in band12_array:
for band in i:
band12_mean.append(band.mean())
band12_min.append(band.min())
band12_max.append(band.max())
print("Band 12 stat for 6 images is :------>")
print(band12_mean)
print(band12_min)
print(band12_max)
y=np.array(band1_mean)
x=(1,2,3,4,5,6)
plt.bar(x,y,align='center')
plt.axis([0, 6, 100, 600])
plt.xlabel('Bands')
plt.ylabel('Commulative Mean')
plt.show()
y=np.array(band2_mean)
x=(1,2,3,4,5,6)
plt.bar(x,y,align='center')
plt.axis([0, 6, 100, 600])
plt.xlabel('Bands')
plt.ylabel('Commulative Mean')
plt.show()
y=np.array(band12_mean)
x=(1,2,3,4,5,6)
plt.bar(x,y,align='center')
plt.axis([0, 6, 100, 600])
plt.xlabel('Bands')
plt.ylabel('Commulative Mean')
plt.show()
df = pd.DataFrame(columns = ['mean_band1','mean_band2','mean_band12'])
df['mean_band1'] = np.array(band1_mean)
df['mean_band2'] = np.array(band2_mean)
df['mean_band12'] = np.array(band12_mean)
df
df.plot()
```
| github_jupyter |
# Rank Classification using BERT on Amazon Review dataset
## Introduction
In this tutorial, you learn how to train a rank classification model using [Transfer Learning](https://en.wikipedia.org/wiki/Transfer_learning). We will use a pretrained DistilBert model to train on the Amazon review dataset.
## About the dataset and model
[Amazon Customer Review dataset](https://s3.amazonaws.com/amazon-reviews-pds/readme.html) consists of all different valid reviews from amazon.com. We will use the "Digital_software" category that consists of 102k valid reviews. As for the pre-trained model, use the DistilBERT[[1]](https://arxiv.org/abs/1910.01108) model. It's a light-weight BERT model already trained on [Wikipedia text corpora](https://en.wikipedia.org/wiki/List_of_text_corpora), a much larger dataset consisting of over millions text. The DistilBERT served as a base layer and we will add some more classification layers to output as rankings (1 - 5).
<img src="https://djl-ai.s3.amazonaws.com/resources/images/amazon_review.png" width="500">
<center>Amazon Review example</center>
We will use review body as our data input and ranking as label.
## Pre-requisites
This tutorial assumes you have the following knowledge. Follow the READMEs and tutorials if you are not familiar with:
1. How to setup and run [Java Kernel in Jupyter Notebook](https://github.com/awslabs/djl/blob/master/jupyter/README.md)
2. Basic components of Deep Java Library, and how to [train your first model](https://github.com/awslabs/djl/blob/master/jupyter/tutorial/02_train_your_first_model.ipynb).
## Getting started
Load the Deep Java Libarary and its dependencies from Maven:
```
%mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.9.0-SNAPSHOT
%maven ai.djl:basicdataset:0.9.0-SNAPSHOT
%maven ai.djl.mxnet:mxnet-model-zoo:0.9.0-SNAPSHOT
%maven org.slf4j:slf4j-api:1.7.26
%maven org.slf4j:slf4j-simple:1.7.26
%maven net.java.dev.jna:jna:5.3.0
// See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md
// for more MXNet library selection options
%maven ai.djl.mxnet:mxnet-native-auto:1.7.0-backport
```
Now let's import the necessary modules:
```
import ai.djl.Application;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.Model;
import ai.djl.basicdataset.CsvDataset;
import ai.djl.basicdataset.utils.DynamicBuffer;
import ai.djl.inference.Predictor;
import ai.djl.metric.Metrics;
import ai.djl.modality.Classifications;
import ai.djl.modality.nlp.SimpleVocabulary;
import ai.djl.modality.nlp.bert.BertFullTokenizer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Activation;
import ai.djl.nn.Block;
import ai.djl.nn.SequentialBlock;
import ai.djl.nn.core.Linear;
import ai.djl.nn.norm.Dropout;
import ai.djl.repository.zoo.*;
import ai.djl.training.*;
import ai.djl.training.dataset.Batch;
import ai.djl.training.dataset.RandomAccessDataset;
import ai.djl.training.evaluator.Accuracy;
import ai.djl.training.listener.CheckpointsTrainingListener;
import ai.djl.training.listener.TrainingListener;
import ai.djl.training.loss.Loss;
import ai.djl.training.util.ProgressBar;
import ai.djl.translate.*;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.List;
import org.apache.commons.csv.CSVFormat;
```
## Prepare Dataset
First step is to prepare the dataset for training. Since the original data was in TSV format, we can use CSVDataset to be the dataset container. We will also need to specify how do we want to preprocess the raw data. For BERT model, the input data are required to be tokenized and mapped into indices based on the inputs. In DJL, we defined an interface called Fearurizer, it is designed to allow user customize operation on each selected row/column of a dataset. In our case, we would like to clean and tokenize our sentencies. So let's try to implement it to deal with customer review sentencies.
```
final class BertFeaturizer implements CsvDataset.Featurizer {
private final BertFullTokenizer tokenizer;
private final int maxLength; // the cut-off length
public BertFeaturizer(BertFullTokenizer tokenizer, int maxLength) {
this.tokenizer = tokenizer;
this.maxLength = maxLength;
}
/** {@inheritDoc} */
@Override
public void featurize(DynamicBuffer buf, String input) {
SimpleVocabulary vocab = tokenizer.getVocabulary();
// convert sentence to tokens
List<String> tokens = tokenizer.tokenize(input);
// trim the tokens to maxLength
tokens = tokens.size() > maxLength ? tokens.subList(0, maxLength) : tokens;
// BERT embedding convention "[CLS] Your Sentence [SEP]"
buf.put(vocab.getIndex("[CLS]"));
tokens.forEach(token -> buf.put(vocab.getIndex(token)));
buf.put(vocab.getIndex("[SEP]"));
}
}
```
Once we got this part done, we can apply the `BertFeaturizer` into our Dataset. We take `review_body` column and apply the Featurizer. We also pick `star_rating` as our label set. Since we go for batch input, we need to tell the dataset to pad our data if it is less than the `maxLength` we defined. `PaddingStackBatchifier` will do the work for you.
```
CsvDataset getDataset(int batchSize, BertFullTokenizer tokenizer, int maxLength) {
String amazonReview =
"https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Digital_Software_v1_00.tsv.gz";
float paddingToken = tokenizer.getVocabulary().getIndex("[PAD]");
return CsvDataset.builder()
.optCsvUrl(amazonReview) // load from Url
.setCsvFormat(CSVFormat.TDF.withQuote(null).withHeader()) // Setting TSV loading format
.setSampling(batchSize, true) // make sample size and random access
.addFeature(
new CsvDataset.Feature(
"review_body", new BertFeaturizer(tokenizer, maxLength)))
.addNumericLabel("star_rating") // set label
.optDataBatchifier(
PaddingStackBatchifier.builder()
.optIncludeValidLengths(false)
.addPad(0, 0, (m) -> m.ones(new Shape(1)).mul(paddingToken))
.build()) // define how to pad dataset to a fix length
.build();
}
```
## Construct your model
We will load our pretrained model and prepare the classification. First construct the `criteria` to specify where to load the embedding (DistiledBERT), then call `loadModel` to download that embedding with pre-trained weights. Since this model is built without classification layer, we need to add a classification layer to the end of the model and train it. After you are done modifying the block, set it back to model using `setBlock`.
### Load the word embedding
We will download our word embedding and load it to memory (this may take a while)
```
Criteria<NDList, NDList> criteria = Criteria.builder()
.optApplication(Application.NLP.WORD_EMBEDDING)
.setTypes(NDList.class, NDList.class)
.optModelUrls("https://resources.djl.ai/test-models/distilbert.zip")
.optProgress(new ProgressBar())
.build();
ZooModel<NDList, NDList> embedding = ModelZoo.loadModel(criteria);
```
### Create classification layers
Then let's build a simple MLP layer to classify the ranks. We set the output of last FullyConnected (Linear) layer to 5 to get the predictions for star 1 to 5. Then all we need to do is to load the block into the model. Before applying the classification layer, we also need to add text embedding to the front. In our case, we just create a Lambda function that do the followings:
1. batch_data (batch size, token indices) -> batch_data + max_length (size of the token indices)
2. generate embedding
```
Predictor<NDList, NDList> embedder = embedding.newPredictor();
Block classifier = new SequentialBlock()
// text embedding layer
.add(
ndList -> {
NDArray data = ndList.singletonOrThrow();
long batchSize = data.getShape().get(0);
float maxLength = data.getShape().get(1);
try {
return embedder.predict(
new NDList(data, data.getManager()
.full(new Shape(batchSize), maxLength)));
} catch (TranslateException e) {
throw new IllegalArgumentException("embedding error", e);
}
})
// classification layer
.add(Linear.builder().setUnits(768).build()) // pre classifier
.add(Activation::relu)
.add(Dropout.builder().optRate(0.2f).build())
.add(Linear.builder().setUnits(5).build()) // 5 star rating
.addSingleton(nd -> nd.get(":,0")); // Take [CLS] as the head
Model model = Model.newInstance("AmazonReviewRatingClassification");
model.setBlock(classifier);
```
## Start Training
Finally, we can start building our training pipeline to train the model.
### Creating Training and Testing dataset
Firstly, we need to create a voabulary that is used to map token to index such as "hello" to 1121 (1121 is the index of "hello" in dictionary). Then we simply feed the vocabulary to the tokenizer that used to tokenize the sentence. Finally, we just need to split the dataset based on the ratio.
Note: we set the cut-off length to 64 which means only the first 64 tokens from the review will be used. You can increase this value to achieve better accuracy.
```
// Prepare the vocabulary
SimpleVocabulary vocabulary = SimpleVocabulary.builder()
.optMinFrequency(1)
.addFromTextFile(embedding.getArtifact("vocab.txt").getPath())
.optUnknownToken("[UNK]")
.build();
// Prepare dataset
int maxTokenLength = 64; // cutoff tokens length
int batchSize = 8;
BertFullTokenizer tokenizer = new BertFullTokenizer(vocabulary, true);
CsvDataset amazonReviewDataset = getDataset(batchSize, tokenizer, maxTokenLength);
// split data with 7:3 train:valid ratio
RandomAccessDataset[] datasets = amazonReviewDataset.randomSplit(7, 3);
RandomAccessDataset trainingSet = datasets[0];
RandomAccessDataset validationSet = datasets[1];
```
### Setup Trainer and training config
Then, we need to setup our trainer. We set up the accuracy and loss function. The model training logs will be saved to `build/modlel`.
```
CheckpointsTrainingListener listener = new CheckpointsTrainingListener("build/model");
listener.setSaveModelCallback(
trainer -> {
TrainingResult result = trainer.getTrainingResult();
Model model = trainer.getModel();
// track for accuracy and loss
float accuracy = result.getValidateEvaluation("Accuracy");
model.setProperty("Accuracy", String.format("%.5f", accuracy));
model.setProperty("Loss", String.format("%.5f", result.getValidateLoss()));
});
DefaultTrainingConfig config = new DefaultTrainingConfig(Loss.softmaxCrossEntropyLoss()) // loss type
.addEvaluator(new Accuracy())
.optDevices(Device.getDevices(1)) // train using single GPU
.addTrainingListeners(TrainingListener.Defaults.logging("build/model"))
.addTrainingListeners(listener);
```
### Start training
We will start our training process. Training on GPU will takes approximately 10 mins. For CPU, it will take more than 2 hours to finish.
```
int epoch = 2;
Trainer trainer = model.newTrainer(config);
trainer.setMetrics(new Metrics());
Shape encoderInputShape = new Shape(batchSize, maxTokenLength);
// initialize trainer with proper input shape
trainer.initialize(encoderInputShape);
EasyTrain.fit(trainer, epoch, trainingSet, validationSet);
System.out.println(trainer.getTrainingResult());
```
### Save the model
```
model.save(Paths.get("build/model"), "amazon-review.param");
```
## Verify the model
We can create a predictor from the model to run inference on our customized dataset. Firstly, we can create a `Translator` for the model to do preprocessing and post processing. Similar to what we have done before, we need to tokenize the input sentence and get the output ranking.
```
class MyTranslator implements Translator<String, Classifications> {
private BertFullTokenizer tokenizer;
private SimpleVocabulary vocab;
private List<String> ranks;
public MyTranslator(BertFullTokenizer tokenizer) {
this.tokenizer = tokenizer;
vocab = tokenizer.getVocabulary();
ranks = Arrays.asList("1", "2", "3", "4", "5");
}
@Override
public Batchifier getBatchifier() { return new StackBatchifier(); }
@Override
public NDList processInput(TranslatorContext ctx, String input) {
List<String> tokens = tokenizer.tokenize(input);
float[] indices = new float[tokens.size() + 2];
indices[0] = vocab.getIndex("[CLS]");
for (int i = 0; i < tokens.size(); i++) {
indices[i+1] = vocab.getIndex(tokens.get(i));
}
indices[indices.length - 1] = vocab.getIndex("[SEP]");
return new NDList(ctx.getNDManager().create(indices));
}
@Override
public Classifications processOutput(TranslatorContext ctx, NDList list) {
return new Classifications(ranks, list.singletonOrThrow().softmax(0));
}
}
```
Finally, we can create a `Predictor` to run the inference. Let's try with a random customer review:
```
String review = "It works great, but it takes too long to update itself and slows the system";
Predictor<String, Classifications> predictor = model.newPredictor(new MyTranslator(tokenizer));
System.out.println(predictor.predict(review));
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
# How to build a simple text classifier with TF-Hub
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/hub/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
TF-Hub is a platform to share machine learning expertise packaged in reusable resources, notably pre-trained **modules**. This tutorial is organized into two main parts.
** *Introduction:* Training a text classifier with TF-Hub**
We will use a TF-Hub text embedding module to train a simple sentiment classifier with a reasonable baseline accuracy. We will then analyze the predictions to make sure our model is reasonable and propose improvements to increase the accuracy.
** *Advanced:* Transfer learning analysis **
In this section, we will use various TF-Hub modules to compare their effect on the accuracy of the estimator and demonstrate advantages and pitfalls of transfer learning.
## Optional prerequisites
* Basic understanding of Tensorflow [premade estimator framework](https://www.tensorflow.org/get_started/premade_estimators).
* Familiarity with [Pandas](https://pandas.pydata.org/) library.
## Setup
```
# Install TF-Hub.
!pip install seaborn
```
More detailed information about installing Tensorflow can be found at [https://www.tensorflow.org/install/](https://www.tensorflow.org/install/).
```
from absl import logging
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
```
# Getting started
## Data
We will try to solve the [Large Movie Review Dataset v1.0](http://ai.stanford.edu/~amaas/data/sentiment/) task [(Mass et al., 2011)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf). The dataset consists of IMDB movie reviews labeled by positivity from 1 to 10. The task is to label the reviews as **negative** or **positive**.
```
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.io.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
# Merge positive and negative examples, add a polarity column and shuffle.
def load_dataset(directory):
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
# Download and process the dataset files.
def download_and_load_datasets(force_download=False):
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True)
train_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "train"))
test_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "test"))
return train_df, test_df
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
train_df, test_df = download_and_load_datasets()
train_df.head()
```
## Model
### Input functions
[Estimator framework](https://www.tensorflow.org/get_started/premade_estimators#overview_of_programming_with_estimators) provides [input functions](https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/pandas_input_fn) that wrap Pandas dataframes.
```
# Training input on the whole training set with no limit on training epochs.
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
train_df, train_df["polarity"], num_epochs=None, shuffle=True)
# Prediction on the whole training set.
predict_train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
train_df, train_df["polarity"], shuffle=False)
# Prediction on the test set.
predict_test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
test_df, test_df["polarity"], shuffle=False)
```
### Feature columns
TF-Hub provides a [feature column](https://www.tensorflow.org/hub/api_docs/python/hub/text_embedding_column.md) that applies a module on the given text feature and passes further the outputs of the module. In this tutorial we will be using the [nnlm-en-dim128 module](https://tfhub.dev/google/nnlm-en-dim128/1). For the purpose of this tutorial, the most important facts are:
* The module takes **a batch of sentences in a 1-D tensor of strings** as input.
* The module is responsible for **preprocessing of sentences** (e.g. removal of punctuation and splitting on spaces).
* The module works with any input (e.g. **nnlm-en-dim128** hashes words not present in vocabulary into ~20.000 buckets).
```
embedded_text_feature_column = hub.text_embedding_column(
key="sentence",
module_spec="https://tfhub.dev/google/nnlm-en-dim128/1")
```
### Estimator
For classification we can use a [DNN Classifier](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) (note further remarks about different modelling of the label function at the end of the tutorial).
```
estimator = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
feature_columns=[embedded_text_feature_column],
n_classes=2,
optimizer=tf.keras.optimizers.Adagrad(lr=0.003))
```
### Training
Train the estimator for a reasonable amount of steps.
```
# Training for 5,000 steps means 640,000 training examples with the default
# batch size. This is roughly equivalent to 25 epochs since the training dataset
# contains 25,000 examples.
estimator.train(input_fn=train_input_fn, steps=5000);
```
# Prediction
Run predictions for both training and test set.
```
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn)
print("Training set accuracy: {accuracy}".format(**train_eval_result))
print("Test set accuracy: {accuracy}".format(**test_eval_result))
```
## Confusion matrix
We can visually check the confusion matrix to understand the distribution of misclassifications.
```
def get_predictions(estimator, input_fn):
return [x["class_ids"][0] for x in estimator.predict(input_fn=input_fn)]
LABELS = [
"negative", "positive"
]
# Create a confusion matrix on training data.
cm = tf.math.confusion_matrix(train_df["polarity"],
get_predictions(estimator, predict_train_input_fn))
# Normalize the confusion matrix so that each row sums to 1.
cm = tf.cast(cm, dtype=tf.float32)
cm = cm / tf.math.reduce_sum(cm, axis=1)[:, np.newaxis]
sns.heatmap(cm, annot=True, xticklabels=LABELS, yticklabels=LABELS);
plt.xlabel("Predicted");
plt.ylabel("True");
```
# Further improvements
1. **Regression on sentiment**: we used a classifier to assign each example into a polarity class. But we actually have another categorical feature at our disposal - sentiment. Here classes actually represent a scale and the underlying value (positive/negative) could be well mapped into a continuous range. We could make use of this property by computing a regression ([DNN Regressor](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNRegressor)) instead of a classification ([DNN Classifier](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier)).
2. **Larger module**: for the purposes of this tutorial we used a small module to restrict the memory use. There are modules with larger vocabularies and larger embedding space that could give additional accuracy points.
3. **Parameter tuning**: we can improve the accuracy by tuning the meta-parameters like the learning rate or the number of steps, especially if we use a different module. A validation set is very important if we want to get any reasonable results, because it is very easy to set-up a model that learns to predict the training data without generalizing well to the test set.
4. **More complex model**: we used a module that computes a sentence embedding by embedding each individual word and then combining them with average. One could also use a sequential module (e.g. [Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder/2) module) to better capture the nature of sentences. Or an ensemble of two or more TF-Hub modules.
5. **Regularization**: to prevent overfitting, we could try to use an optimizer that does some sort of regularization, for example [Proximal Adagrad Optimizer](https://www.tensorflow.org/api_docs/python/tf/train/ProximalAdagradOptimizer).
# Advanced: Transfer learning analysis
Transfer learning makes it possible to **save training resources** and to achieve good model generalization even when **training on a small dataset**. In this part, we will demonstrate this by training with two different TF-Hub modules:
* **[nnlm-en-dim128](https://tfhub.dev/google/nnlm-en-dim128/1)** - pretrained text embedding module,
* **[random-nnlm-en-dim128](https://tfhub.dev/google/random-nnlm-en-dim128/1)** - text embedding module that has same vocabulary and network as **nnlm-en-dim128**, but the weights were just randomly initialized and never trained on real data.
And by training in two modes:
* training **only the classifier** (i.e. freezing the module), and
* training the **classifier together with the module**.
Let's run a couple of trainings and evaluations to see how using a various modules can affect the accuracy.
```
def train_and_evaluate_with_module(hub_module, train_module=False):
embedded_text_feature_column = hub.text_embedding_column(
key="sentence", module_spec=hub_module, trainable=train_module)
estimator = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
feature_columns=[embedded_text_feature_column],
n_classes=2,
optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003))
estimator.train(input_fn=train_input_fn, steps=1000)
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn)
training_set_accuracy = train_eval_result["accuracy"]
test_set_accuracy = test_eval_result["accuracy"]
return {
"Training accuracy": training_set_accuracy,
"Test accuracy": test_set_accuracy
}
results = {}
results["nnlm-en-dim128"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/nnlm-en-dim128/1")
results["nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/nnlm-en-dim128/1", True)
results["random-nnlm-en-dim128"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/random-nnlm-en-dim128/1")
results["random-nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/random-nnlm-en-dim128/1", True)
```
Let's look at the results.
```
pd.DataFrame.from_dict(results, orient="index")
```
We can already see some patterns, but first we should establish the baseline accuracy of the test set - the lower bound that can be achieved by outputting only the label of the most represented class:
```
estimator.evaluate(input_fn=predict_test_input_fn)["accuracy_baseline"]
```
Assigning the most represented class will give us accuracy of **50%**. There are a couple of things to notice here:
1. Maybe surprisingly, **a model can still be learned on top of fixed, random embeddings**. The reason is that even if every word in the dictionary is mapped to a random vector, the estimator can separate the space purely using its fully connected layers.
2. Allowing training of the module with **random embeddings** increases both training and test accuracy as oposed to training just the classifier.
3. Training of the module with **pre-trained embeddings** also increases both accuracies. Note however the overfitting on the training set. Training a pre-trained module can be dangerous even with regularization in the sense that the embedding weights no longer represent the language model trained on diverse data, instead they converge to the ideal representation of the new dataset.
| github_jupyter |
```
import tkinter
import tkinter as tk
from tkinter import *
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image,ImageTk
# class box:
# def __init__(self):
# root = tkinter.Tk()
# root.wm_title("Training")
# fig = Figure(figsize=(8, 8), dpi=100)
# canvas = FigureCanvasTkAgg(fig, master=root)
# # fig = figg(fig,canvas)# A tk.DrawingArea.
# canvas.draw()
# canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
# toolbar = NavigationToolbar2Tk(canvas, root)
# toolbar.update()
# canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
# def on_key_press(event):
# print("you pressed {}".format(event.key))
# key_press_handler(event, canvas, toolbar)
# canvas.mpl_connect("key_press_event", on_key_press)
# trainbutton = tkinter.Button(master=root, text="Train")
# quitbutton = tkinter.Button(master=root, text="FORCE QUIT")
# compilebutton = tkinter.Button(master=root, text="compile")
# predictbutton = tkinter.Button(master=root, text="predict")
# # button = tkinter.Button(master=root, text="Train")
# trainbutton.bind("<Button-1>",train)
# quitbutton.bind("<Button-1>",quit)
# trainbutton.pack(side=tkinter.BOTTOM)
# quitbutton.pack(side=tkinter.BOTTOM)
# compilebutton.pack(side=tkinter.BOTTOM)
# predictbutton.pack(side=tkinter.BOTTOM)
# tkinter.mainloop()
# def quit(event):
# root.quit() # stops mainloop
# root.destroy() # this is necessary on Windows to prevent
# # Fatal Python Error: PyEval_RestoreThread: NULL tstate
# def train(event):
# ax1 = fig.add_subplot(2,1,1)
# ax2 =fig.add_subplot(2,1,2)
# lossx = []
# acc = []
# for i in range(1,100):
# loss = np.random.normal(0,1)
# bacc = np.random.normal(0,1)
# y = np.arange(i)
# lossx.append(loss)
# acc.append(bacc)
# ax1.plot(y, lossx)
# ax1.set(xlabel='step no', ylabel='loss')
# ax2.plot(y,acc)
# ax2.set(xlabel='step no', ylabel='accuracy')
# canvas.draw()
# class Train:
# def __init__(self,parent,b):
# self.parent = parent
# self.parent.geometry("600x1000")
# self.parent.title("TRAIN")
# b1,b2,b3,b4 = b
# self.frame0 = Frame(self.parent)
# self.fig = Figure(figsize=(8, 8), dpi=100)
# self.canvas = FigureCanvasTkAgg(self.fig, master=self.frame0)
# self.canvas.draw()
# self.canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
# self.toolbar = NavigationToolbar2Tk(self.canvas, self.frame0)
# self.toolbar.update()
# self.canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
# self.canvas.mpl_connect("key_press_event", self.on_key_press)
# self.frame0.place(relwidth = 1 ,relheight = 0.85,rely = 0)
# self.frame1 = Frame(self.parent,bg = "#423B39")
# self.frame1.place(relwidth = 1 ,relheight = 0.15,rely = 0.85)
# self.trainbutton = tkinter.Button(master=self.frame1, text="Train",image = b1)
# self.compilebutton = tkinter.Button(master=self.frame1, text="compile" , image = b4)
# self.predictbutton = tkinter.Button(master=self.frame1, text="predict" ,image = b2)
# self.quitbutton = tkinter.Button(master=self.frame1, text="FORCE QUIT" ,image = b3)
# # button = tkinter.Button(master=self.frame1, text="Train")
# self.trainbutton.bind("<Button-1>",self.train)
# self.quitbutton.bind("<Button-1>",self.quit)
# self.trainbutton.place(width = 100 ,height = 100,rely = 0.1 ,relx = 0.1)
# self.compilebutton.place(width = 100 ,height = 100,rely = 0.1 , relx = 0.3)
# self.predictbutton.place(width = 100 ,height = 100,rely = 0.1 , relx = 0.5)
# self.quitbutton.place(width = 100 ,height = 100,rely = 0.1 ,relx = 0.7)
# def getbuttons(self):
# return b1,b2,b3,b4
# def on_key_press(self,event):
# print("you pressed {}".format(event.key))
# key_press_handler(event, self.canvas, self.toolbar)
# def quit(self,event):
# self.parent.quit() # stops mainloop
# self.parent.destroy() # this is necessary on Windows to prevent
# # Fatal Python Error: PyEval_RestoreThread: NULL tstate
# def train(self,event):
# ax1 = self.fig.add_subplot(2,1,1)
# ax2 =self.fig.add_subplot(2,1,2)
# lossx = []
# acc = []
# for i in range(1,100):
# loss = np.random.normal(0,1)
# bacc = np.random.normal(0,1)
# y = np.arange(i)
# lossx.append(loss)
# acc.append(bacc)
# ax1.plot(y, lossx)
# ax1.set(xlabel='step no', ylabel='loss')
# ax2.plot(y,acc)
# ax2.set(xlabel='step no', ylabel='accuracy')
# self.canvas.draw()
root = tk.Tk()
b1 = ImageTk.PhotoImage(Image.open("/home/crazy/UI/buttons/trainingbutton.png"))
b2 = ImageTk.PhotoImage(Image.open("/home/crazy/UI/buttons/predbutton.png"))
b3 = ImageTk.PhotoImage(Image.open("/home/crazy/UI/buttons/exitbutton.png"))
b4 = ImageTk.PhotoImage(Image.open("/home/crazy/UI/buttons/compilebutton1.png"))
obj = Train(root,[b1,b2,b3,b4])
root.mainloop()
```
| github_jupyter |
# Graph Neural Network (GCN)-based Synthetic Binding Logic Classification with Graph-SafeML
The eisting example of GCN-based Synthetic Binding Logic Classification from google research team is used to test the idea of SafeML for Graph-based classifiers. You can find the source code [here](https://github.com/google-research/graph-attribution) and the related paper for the code is available [here](https://papers.nips.cc/paper/2020/file/417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf) [[1]](https://papers.nips.cc/paper/2020/file/417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf).
Regarding the Graph-based distance measure, the theory of "Graph distance for complex networks" provided by of Yutaka Shimada et al. is used [[2]](https://www.nature.com/articles/srep34944). The code related to this paper is avaialble [here](https://github.com/msarrias/graph-distance-for-complex-networks).
You can read more about the idea of SafeML in [[3]](https://github.com/ISorokos/SafeML). To read more about "Synthetic Binding Logic Classification" and the related dataset that is used in this notebook, please check [[4]](https://www.pnas.org/content/pnas/116/24/11624.full.pdf).

The SafeML project takes place at the University of Hull in collaboration with Fraunhofer IESE and Nuremberg Institute of Technology
## Table of Content
* [Initialization and Installations](#init)
* [Importing Required Libraries](#lib)
* [Graph Attribution Specific Imports](#glib)
* [Load Experiment Data, Task and Attribution Techniques](#load)
* [Creating a GNN Model](#model)
* [Graph Vizualization](#gviz)
* [Graph Distance Measures and SafeML Idea](#SafeML)
* [Discussion](#dis)
### References:
[[1]. Wiltschko, A. B., Sanchez-Lengeling, B., Lee, B., Reif, E., Wei, J., McCloskey, K. J., & Wang, Y. (2020). Evaluating Attribution for Graph Neural Networks.](https://papers.nips.cc/paper/2020/file/417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf)
[[2]. Shimada, Y., Hirata, Y., Ikeguchi, T., & Aihara, K. (2016). Graph distance for complex networks. Scientific reports, 6(1), 1-6.](https://www.nature.com/articles/srep34944)
[[3]. Aslansefat, K., Sorokos, I., Whiting, D., Kolagari, R. T., & Papadopoulos, Y. (2020, September). SafeML: Safety Monitoring of Machine Learning Classifiers Through Statistical Difference Measures. In International Symposium on Model-Based Safety and Assessment (pp. 197-211). Springer, Cham.](https://arxiv.org/pdf/2005.13166.pdf)
[[4]. McCloskey, K., Taly, A., Monti, F., Brenner, M. P., & Colwell, L. J. (2019). Using attribution to decode binding mechanism in neural network models for chemistry. Proceedings of the National Academy of Sciences, 116(24), 11624-11629.](https://www.pnas.org/content/pnas/116/24/11624.full.pdf)
<a id = "init"></a>
## Initialization and Installations
```
import warnings
warnings.filterwarnings('ignore')
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('..')
import sys
IN_COLAB = 'google.colab' in sys.modules
REPO_DIR = '..' if IN_COLAB else '..'
!git clone https://github.com/google-research/graph-attribution.git --quiet
import sys
sys.path.insert(1, '/kaggle/working/graph-attribution')
!pip install tensorflow tensorflow-probability -q
!pip install dm-sonnet -q
!pip install graph_nets "tensorflow>=2.1.0-rc1" "dm-sonnet>=2.0.0b0" tensorflow_probability
!pip install git+https://github.com/google-research/graph-attribution -quiet
!pip install git+https://github.com/google-research/graph-attribution
```
<a id = "lib"></a>
## Importing Required Libraries
```
import os
import itertools
import collections
import tqdm.auto as tqdm
from IPython.display import display
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
import sonnet as snt
import graph_nets
from graph_nets.graphs import GraphsTuple
import graph_attribution as gatt
from tqdm import tqdm
import time
import networkx as nx
# Ignore tf/graph_nets UserWarning:
# Converting sparse IndexedSlices to a dense Tensor of unknown shape
import warnings
warnings.simplefilter("ignore", UserWarning)
for mod in [tf, snt, gatt]:
print(f'{mod.__name__:20s} = {mod.__version__}')
```
<a id = "glib"></a>
## Graph Attribution specific imports
```
from graph_attribution import tasks
from graph_attribution import graphnet_models as gnn_models
from graph_attribution import graphnet_techniques as techniques
from graph_attribution import datasets
from graph_attribution import experiments
from graph_attribution import templates
from graph_attribution import graphs as graph_utils
#datasets.DATA_DIR = os.path.join(REPO_DIR, 'data')
#print(f'Reading data from: {datasets.DATA_DIR}')
datasets.DATA_DIR = './graph-attribution/data'
```
<a id = "load"></a>
# Load Experiment Data, Task and Attribution Techniques
```
print(f'Available tasks: {[t.name for t in tasks.Task]}')
print(f'Available model types: {[m.name for m in gnn_models.BlockType]}')
print(f'Available ATT techniques: {list(techniques.get_techniques_dict(None,None).keys())}')
task_type = 'logic7'
block_type = 'gcn'
#task_dir = datasets.get_task_dir(task_type)
task_dir = './graph-attribution/data/logic7'
exp, task, methods = experiments.get_experiment_setup(task_type, block_type)
task_act, task_loss = task.get_nn_activation_fn(), task.get_nn_loss_fn()
graph_utils.print_graphs_tuple(exp.x_train)
print(f'Experiment data fields:{list(exp.__dict__.keys())}')
```
<a id = "model"></a>
## Creating a GNN Model
### Defining Hyperparams of the Experiment
```
hp = gatt.hparams.get_hparams({'block_type':block_type, 'task_type':task_type})
hp
```
### Instantiate model
```
model = experiments.GNN(node_size = hp.node_size,
edge_size = hp.edge_size,
global_size = hp.global_size,
y_output_size = task.n_outputs,
block_type = gnn_models.BlockType(hp.block_type),
activation = task_act,
target_type = task.target_type,
n_layers = hp.n_layers)
model(exp.x_train)
gnn_models.print_model(model)
```
<a id ="train"></a>
## Training the GNN Model
```
optimizer = snt.optimizers.Adam(hp.learning_rate)
opt_one_epoch = gatt.training.make_tf_opt_epoch_fn(exp.x_train, exp.y_train, hp.batch_size, model,
optimizer, task_loss)
pbar = tqdm(range(hp.epochs))
losses = collections.defaultdict(list)
start_time = time.time()
for _ in pbar:
train_loss = opt_one_epoch(exp.x_train, exp.y_train).numpy()
losses['train'].append(train_loss)
losses['test'].append(task_loss(exp.y_test, model(exp.x_test)).numpy())
#pbar.set_postfix({key: values[-1] for key, values in losses.items()})
losses = {key: np.array(values) for key, values in losses.items()}
# Plot losses
for key, values in losses.items():
plt.plot(values, label=key)
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend()
plt.show()
y_pred = model(exp.x_test).numpy()
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
#y_pred
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
print(accuracy_score(exp.y_test, y_pred))
print(confusion_matrix(exp.y_test, y_pred))
print(classification_report(exp.y_test, y_pred))
# Evaluate predictions and attributions
results = []
for method in tqdm(methods.values(), total=len(methods)):
results.append(experiments.generate_result(model, method, task, exp.x_test, exp.y_test, exp.att_test))
pd.DataFrame(results)
```
<a id = "gviz"></a>
## Graph Vizualization
```
# Source: https://notebook.community/deepmind/graph_nets/graph_nets/demos/graph_nets_basics
graphs_nx = graph_nets.utils_np.graphs_tuple_to_networkxs(exp.x_test)
def nx_g_plotter(graphs_nx, ColNum=8, node_clr='#ff8080'):
_, axs = plt.subplots(ncols=ColNum, nrows = 1, figsize=(30, 5))
for iax, (graph_nx2, ax) in enumerate(zip(graphs_nx, axs)):
nx.draw(graph_nx2, ax=ax, node_color=node_clr)
ax.set_title("Graph {}".format(iax))
graphs_nx_1 = []
graphs_nx_0 = []
for ii, g_net_ii in enumerate(graphs_nx):
if exp.y_test[ii] == 1:
graphs_nx_1.append(g_net_ii)
else:
graphs_nx_0.append(g_net_ii)
nx_g_plotter(graphs_nx_1, ColNum=8, node_clr='#ff8080')
nx_g_plotter(graphs_nx_0, ColNum=8, node_clr='#00bfff')
y_wrong1[1] - y_wrong1[0]
graphs_nx_wrong0 = []
graphs_nx_wrong1 = []
graphs_nx_correct0 = []
graphs_nx_correct1 = []
y_pred2 = model(exp.x_test).numpy()
y_wrong0 = []
y_wrong1 = []
y_correct0 = []
y_correct1 = []
for ii, g_net_ii in enumerate(graphs_nx):
if exp.y_test[ii] != y_pred[ii] and exp.y_test[ii] == 0:
graphs_nx_wrong0.append(g_net_ii)
y_wrong0.append(y_pred2[ii])
elif exp.y_test[ii] != y_pred[ii] and exp.y_test[ii] == 1:
graphs_nx_wrong1.append(g_net_ii)
y_wrong1.append(y_pred2[ii])
elif exp.y_test[ii] == y_pred[ii] and exp.y_test[ii] == 0:
graphs_nx_correct0.append(g_net_ii)
y_correct0.append(y_pred2[ii])
elif exp.y_test[ii] == y_pred[ii] and exp.y_test[ii] == 1:
graphs_nx_correct1.append(g_net_ii)
y_correct1.append(y_pred2[ii])
print(len(graphs_nx_wrong0), len(graphs_nx_wrong1), len(graphs_nx_correct0), len(graphs_nx_correct1))
nx_g_plotter(graphs_nx_wrong0, ColNum=8, node_clr='#ff8080')
nx_g_plotter(graphs_nx_wrong1, ColNum=8, node_clr='#00bfff')
nx_g_plotter(graphs_nx_correct0, ColNum=8, node_clr='#00e600')
nx_g_plotter(graphs_nx_correct1, ColNum=8, node_clr='#e600ac')
y_yes = exp.y_test[exp.y_test == 1]
y_no = exp.y_test[exp.y_test != 1]
y_yes.shape, y_no.shape
recovered_data_dict_list = graph_nets.utils_np.graphs_tuple_to_data_dicts(exp.x_test)
graphs_tuple_1 = graph_nets.utils_np.data_dicts_to_graphs_tuple(recovered_data_dict_list)
```
<a id = "SafeML"></a>
## Graph Distance Measures and SafeML Idea
```
!git clone https://github.com/msarrias/graph-distance-for-complex-networks --quiet
import sys
sys.path.insert(1, '/kaggle/working/graph-distance-for-complex-networks')
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
import scipy.linalg as la
import networkx as nx
import random, time, math
from collections import Counter
import fun as f
from Graph import Graph
from Watts_Strogatz import watts_strogatz_graph
from Erdos_Renyi import erdos_renyi_graph
def Wasserstein_Dist(cdfX, cdfY):
Res = 0
power = 1
n = len(cdfX)
for ii in range(0, n-2):
height = abs(cdfX[ii]-cdfY[ii])
width = cdfX[ii+1] - cdfX[ii]
Res = Res + (height ** power) * width
return Res
def r_eigenv(G_i, G_j):
#Eigen-decomposition of G_j
A_Gi = (nx.adjacency_matrix(G_i)).todense()
D_i = np.diag(np.asarray(sum(A_Gi))[0])
eigenvalues_Gi, eigenvectors_Gi = la.eig(D_i - A_Gi)
r_eigenv_Gi = sorted(zip(eigenvalues_Gi.real, eigenvectors_Gi.T), key=lambda x: x[0])
#Eigen-decomposition of G_j
A_Gj = (nx.adjacency_matrix(G_j)).todense()
D_j = np.diag(np.asarray(sum(A_Gj))[0])
eigenvalues_Gj, eigenvectors_Gj = la.eig(D_j - A_Gj)
r_eigenv_Gj = sorted(zip(eigenvalues_Gj.real, eigenvectors_Gj.T), key=lambda x: x[0])
r = 4
signs =[-1,1]
temp = []
for sign_s in signs:
for sign_l in signs:
vri = sorted(f.normalize_eigenv(sign_s * r_eigenv_Gi[r][1]))
vrj = sorted(f.normalize_eigenv(sign_l * r_eigenv_Gj[r][1]))
cdf_dist = f.cdf_dist(vri, vrj)
temp.append(cdf_dist)
#Compute empirical CDF
step = 0.005
x=np.arange(0, 1, step)
cdf_grid_Gip = f.cdf(len(r_eigenv_Gi[r][1]),x,
f.normalize_eigenv(sorted(r_eigenv_Gi[r][1], key=lambda x: x)))
cdf_grid_Gin = f.cdf(len(r_eigenv_Gi[r][1]),x,
f.normalize_eigenv(sorted(-r_eigenv_Gi[r][1], key=lambda x: x)))
cdf_grid_Gjp = f.cdf(len(r_eigenv_Gj[r][1]),x,
f.normalize_eigenv(sorted(r_eigenv_Gj[r][1], key=lambda x: x)))
cdf_grid_Gjn = f.cdf(len(r_eigenv_Gj[r][1]),x,
f.normalize_eigenv(sorted(-r_eigenv_Gj[r][1], key=lambda x: x)))
WD1 = Wasserstein_Dist(cdf_grid_Gip, cdf_grid_Gjp)
WD2 = Wasserstein_Dist(cdf_grid_Gip, cdf_grid_Gjn)
WD3 = Wasserstein_Dist(cdf_grid_Gin, cdf_grid_Gjp)
WD4 = Wasserstein_Dist(cdf_grid_Gin, cdf_grid_Gjn)
WD = [WD1, WD2, WD3, WD4]
return max(temp), max(WD)
distt_wrong1_correct1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_correct1)))
WDist_wrong1_correct1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_correct1)))
Conf_W1_C1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_correct1)))
for ii, g_net_ii in enumerate(graphs_nx_wrong1):
for jj, g_net_jj in enumerate(graphs_nx_correct1):
distt_wrong1_correct1[ii,jj], WDist_wrong1_correct1[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
Conf_W1_C1[ii,jj] = y_correct1[jj] - y_wrong1[ii]
import seaborn as sns; sns.set_theme()
#ax = sns.heatmap(distt)
#ax = sns.displot(distt_wrong1_correct1.flatten())
df = pd.DataFrame()
df['WDist_W1_C1'] = WDist_wrong1_correct1.flatten()
df['Conf_W1_C1'] = Conf_W1_C1.flatten()
sns.scatterplot(data=df, x="Conf_W1_C1", y="WDist_W1_C1")
graphs_nx_train = graph_nets.utils_np.graphs_tuple_to_networkxs(exp.x_train)
graphs_nx_train_1 = []
graphs_nx_train_0 = []
for ii, g_net_ii in enumerate(graphs_nx_train):
if exp.y_train[ii] == 1:
graphs_nx_train_1.append(g_net_ii)
else:
graphs_nx_train_0.append(g_net_ii)
distt_wrong1_train1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_1)))
WDist_wrong1_train1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_1)))
for ii, g_net_ii in enumerate(graphs_nx_wrong1):
for jj, g_net_jj in enumerate(graphs_nx_train_1):
distt_wrong1_train1[ii,jj], WDist_wrong1_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
distt_wrong1_train0 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_0)))
WDist_wrong1_train0 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_0)))
for ii, g_net_ii in enumerate(graphs_nx_wrong1):
for jj, g_net_jj in enumerate(graphs_nx_train_0):
distt_wrong1_train0[ii,jj], WDist_wrong1_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
#ax = sns.displot(distt_wrong1_train1.flatten())
ax2 = sns.displot(WDist_wrong1_correct1.flatten(), kind = 'kde')
ax2 = sns.displot(WDist_wrong1_train1.flatten(), kind = 'kde')
ax2 = sns.displot(WDist_wrong1_train0.flatten(), kind = 'kde')
distt_wrong0_correct0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_correct0)))
WDist_wrong0_correct0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_correct0)))
for ii, g_net_ii in enumerate(graphs_nx_wrong0):
for jj, g_net_jj in enumerate(graphs_nx_correct0):
distt_wrong0_correct0[ii,jj], WDist_wrong0_correct0[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
distt_wrong0_train0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_0)))
WDist_wrong0_train0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_0)))
for ii, g_net_ii in enumerate(graphs_nx_wrong0):
for jj, g_net_jj in enumerate(graphs_nx_train_0):
distt_wrong0_train0[ii,jj], WDist_wrong0_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
distt_wrong0_train1 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_1)))
WDist_wrong0_train1 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_1)))
for ii, g_net_ii in enumerate(graphs_nx_wrong0):
for jj, g_net_jj in enumerate(graphs_nx_train_1):
distt_wrong0_train1[ii,jj], WDist_wrong0_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
ax2 = sns.displot(WDist_wrong0_correct0.flatten(), kind = 'kde')
ax2 = sns.displot(WDist_wrong0_train0.flatten(), kind = 'kde')
ax2 = sns.displot(WDist_wrong0_train1.flatten(), kind = 'kde')
distt_correct0_train0 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_0)))
WDist_correct0_train0 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_0)))
for ii, g_net_ii in enumerate(graphs_nx_correct0):
for jj, g_net_jj in enumerate(graphs_nx_train_0):
distt_correct0_train0[ii,jj], WDist_correct0_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
distt_correct0_train1 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_1)))
WDist_correct0_train1 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_1)))
for ii, g_net_ii in enumerate(graphs_nx_correct0):
for jj, g_net_jj in enumerate(graphs_nx_train_1):
distt_correct0_train1[ii,jj], WDist_correct0_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
if 0:
distt_correct1_train0 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_0)))
WDist_correct1_train0 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_0)))
for ii, g_net_ii in enumerate(graphs_nx_correct1):
for jj, g_net_jj in enumerate(graphs_nx_train_0):
distt_correct1_train0[ii,jj], WDist_correct1_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
distt_correct1_train1 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_1)))
WDist_correct1_train1 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_1)))
for ii, g_net_ii in enumerate(graphs_nx_correct1):
for jj, g_net_jj in enumerate(graphs_nx_train_1):
distt_correct1_train1[ii,jj], WDist_correct1_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj)
def Wasserstein_Dist(XX, YY):
import numpy as np
nx = len(XX)
ny = len(YY)
n = nx + ny
XY = np.concatenate([XX,YY])
X2 = np.concatenate([np.repeat(1/nx, nx), np.repeat(0, ny)])
Y2 = np.concatenate([np.repeat(0, nx), np.repeat(1/ny, ny)])
S_Ind = np.argsort(XY)
XY_Sorted = XY[S_Ind]
X2_Sorted = X2[S_Ind]
Y2_Sorted = Y2[S_Ind]
Res = 0
E_CDF = 0
F_CDF = 0
power = 1
for ii in range(0, n-2):
E_CDF = E_CDF + X2_Sorted[ii]
F_CDF = F_CDF + Y2_Sorted[ii]
height = abs(F_CDF-E_CDF)
width = XY_Sorted[ii+1] - XY_Sorted[ii]
Res = Res + (height ** power) * width;
return Res
def Wasserstein_Dist_PVal(XX, YY):
# Information about Bootstrap: https://towardsdatascience.com/an-introduction-to-the-bootstrap-method-58bcb51b4d60
import random
nboots = 1000
WD = Wasserstein_Dist(XX,YY)
na = len(XX)
nb = len(YY)
n = na + nb
comb = np.concatenate([XX,YY])
reps = 0
bigger = 0
for ii in range(1, nboots):
e = random.sample(range(n), na)
f = random.sample(range(n), nb)
boost_WD = Wasserstein_Dist(comb[e],comb[f]);
if (boost_WD > WD):
bigger = 1 + bigger
pVal = bigger/nboots;
return pVal, WD
pVal, WD = Wasserstein_Dist_PVal(WDist_wrong0_train0.flatten(), WDist_wrong0_train1.flatten())
print(pVal, WD)
#pVal, WD = Wasserstein_Dist_PVal(WDist_correct0_train0.flatten(), WDist_correct0_train1.flatten())
#print(pVal, WD)
pVal, WD = Wasserstein_Dist_PVal(WDist_wrong1_train1.flatten(), WDist_wrong1_train0.flatten())
print(pVal, WD)
```
<a id = "dis"></a>
## Discussion
It seems that the current idea is not successful and we should do more investigation. We can also consider about model-specific SafeML.
| github_jupyter |
# Simple Line Plots
Perhaps the simplest of all plots is the visualization of a single function $y = f(x)$.
Here we will take a first look at creating a simple plot of this type.
As with all the following sections, we'll start by **setting up the notebook for plotting and importing the packages we will use:**
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
```
For all Matplotlib plots, we start by creating a figure and an axes.
In their simplest form, a figure and axes can be created as follows:
```
fig = plt.figure()
ax = plt.axes()
```
In Matplotlib, the *figure* (an instance of the class ``plt.Figure``) can be thought of as a single **container that contains all the objects representing axes, graphics, text, and labels.**
The *axes* (an instance of the class ``plt.Axes``) is what we see above: a bounding box with **ticks and labels, which will eventually contain the plot elements that make up our visualization.**
Throughout this book, we'll commonly use the variable name ``fig`` to refer to a figure instance, and ``ax`` to refer to an axes instance or group of axes instances.
Once we have created an axes, we can use the ``ax.plot`` function to plot some data. Let's start with a simple sinusoid:
```
fig = plt.figure()
ax = plt.axes()
x = np.linspace(0,10,1000)
ax.plot(x, np.sin(x));
```
Alternatively, we can use the pylab interface and let the figure and axes be created for us in the background
(see [Two Interfaces for the Price of One](04.00-Introduction-To-Matplotlib.ipynb#Two-Interfaces-for-the-Price-of-One) for a discussion of these two interfaces):
```
plt.plot(x, np.sin(x))
```
If we want to create a single figure with multiple lines, we can simply call the ``plot`` function multiple times:
```
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x));
```
That's all there is to plotting simple functions in Matplotlib!
We'll now dive into some more details about how to control the appearance of the axes and lines.
## Adjusting the Plot: Line Colors and Styles
The first adjustment you might wish to make to a plot is to control the line colors and styles.
The ``plt.plot()`` function takes additional arguments that can be used to specify these.
To adjust the color, you can use the ``color`` keyword, which accepts a string argument representing virtually any imaginable color.
The color can be specified in a variety of ways:
```
plt.plot(x, np.sin(x - 0), color='blue') # specify color by name
plt.plot(x, np.sin(x - 1), color='g') # short color code (rgbcmyk)
plt.plot(x, np.sin(x - 2), color='0.75') # Grayscale between 0 and 1
plt.plot(x, np.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF)
plt.plot(x, np.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 to 1
plt.plot(x, np.sin(x - 5), color='chartreuse'); # all HTML color names supported
```
If no color is specified, Matplotlib will automatically cycle through a set of default colors for multiple lines.
Similarly, the line style can be adjusted using the ``linestyle`` keyword:
```
plt.plot(x, x + 0, linestyle='solid')
plt.plot(x, x + 1, linestyle='dashed')
plt.plot(x, x + 2, linestyle='dashdot')
plt.plot(x, x + 3, linestyle='dotted');
# For short, you can use the following codes:
plt.plot(x, x + 4, linestyle='-') # solid
plt.plot(x, x + 5, linestyle='--') # dashed
plt.plot(x, x + 6, linestyle='-.') # dashdot
plt.plot(x, x + 7, linestyle=':'); # dotted
```
If you would like to be extremely terse, these ``linestyle`` and ``color`` codes can be combined into a single non-keyword argument to the ``plt.plot()`` function:
```
plt.plot(x, x + 0, '-g') # solid green
plt.plot(x, x + 1, '--c') # dashed cyan
plt.plot(x, x + 2, '-.k') # dashdot black
plt.plot(x, x + 3, ':r'); # dotted red
```
These single-character color codes reflect the standard abbreviations in the RGB (Red/Green/Blue) and CMYK (Cyan/Magenta/Yellow/blacK) color systems, commonly used for digital color graphics.
There are many other keyword arguments that can be used to fine-tune the appearance of the plot; for more details, I'd suggest viewing the docstring of the ``plt.plot()`` function using IPython's help tools.
## Adjusting the Plot: Axes Limits
Matplotlib does a decent job of choosing default axes limits for your plot, but sometimes it's nice to have finer control.
The most basic way to adjust axis limits is to use the ``plt.xlim()`` and ``plt.ylim()`` methods:
```
plt.plot(x, np.sin(x))
plt.xlim(-1,11)
plt.ylim(-1.5, 1.5)
```
If for some reason you'd like either axis to be displayed in reverse, you can simply reverse the order of the arguments:
```
plt.plot(x, np.sin(x))
plt.xlim(10,0)
plt.ylim(1.2, -1.2)
```
A useful related method is ``plt.axis()`` (note here the potential confusion between *axes* with an *e*, and *axis* with an *i*).
The ``plt.axis()`` method allows you to set the ``x`` and ``y`` limits with a single call, by passing a list which specifies ``[xmin, xmax, ymin, ymax]``:
```
plt.plot(x, np.sin(x))
plt.axis([-1, 11, -1.5, 1.5]);
```
The ``plt.axis()`` method goes even beyond this, allowing you to do things like automatically tighten the bounds around the current plot:
```
plt.plot(x, np.sin(x))
plt.axis('tight')
```
It allows even higher-level specifications, such as ensuring an equal aspect ratio so that on your screen, one unit in ``x`` is equal to one unit in ``y``:
```
plt.plot(x, np.sin(x))
plt.axis('equal')
```
For more information on axis limits and the other capabilities of the ``plt.axis`` method, refer to the ``plt.axis`` docstring.
## Labeling Plots
As the last piece of this section, we'll briefly look at the labeling of plots: titles, axis labels, and simple legends.
Titles and axis labels are the simplest such labels—there are methods that can be used to quickly set them:
```
plt.plot(x, np.sin(x))
plt.title("A sin curve")
plt.xlabel("x")
plt.ylabel("sin(x)");
```
**The position, size, and style of these labels can be adjusted using optional arguments to the function.**
For more information, see the Matplotlib documentation and the docstrings of each of these functions.
When multiple lines are being shown within a single axes, it can be useful to create a **plot legend** that labels each line type.
Again, **Matplotlib has a built-in way of quickly creating such a legend.**
It is done via the (you guessed it) ``plt.legend()`` method.
Though there are several valid ways of using this, I find it easiest to specify the label of each line using the ``label`` keyword of the plot function:
```
plt.plot(x, np.sin(x), '-g', label = 'sin(x)')
plt.plot(x, np.cos(x), ':b', label = 'cos(x)')
plt.axis('equal')
plt.grid(False)
plt.legend();
```
As you can see, the ``plt.legend()`` function keeps track of the line style and color, and matches these with the correct label.
More information on specifying and formatting plot legends can be found in the ``plt.legend`` docstring; additionally, we will cover some more advanced legend options in [Customizing Plot Legends](04.06-Customizing-Legends.ipynb).
## Aside: Matplotlib Gotchas
While most ``plt`` functions translate directly to ``ax`` methods (such as ``plt.plot()`` → ``ax.plot()``, ``plt.legend()`` → ``ax.legend()``, etc.), this is not the case for all commands.
In particular, functions to set limits, labels, and titles are slightly modified.
**For transitioning between MATLAB-style functions and object-oriented methods, make the following changes:**
- ``plt.xlabel()`` → ``ax.set_xlabel()``
- ``plt.ylabel()`` → ``ax.set_ylabel()``
- ``plt.xlim()`` → ``ax.set_xlim()``
- ``plt.ylim()`` → ``ax.set_ylim()``
- ``plt.title()`` → ``ax.set_title()``
In the object-oriented interface to plotting, rather than calling these functions individually, it is often more convenient to use the ``ax.set()`` method to set all these properties at once:
```
ax = plt.axes()
ax.plot(x, np.sin(x))
ax.set(xlim = (0,10), ylim = (-2, 2),
xlabel = 'x', ylabel = 'sin(x)',
title = 'A simple plot');
```
| github_jupyter |
```
#!/usr/bin/env python
# INSTRUCTIONS
# Your task for this assignment is to combine the principles that you learned
# in unit 3, 4 and 5 and create a fully automated program that can display
# the cause-effect chain automatically.
#
# In problem set 4 you created a program that generated cause chain
# if you provided it the locations (line and iteration number) to look at.
# That is not very useful. If you know the lines to look for changes, you
# already know a lot about the cause. Instead now, with the help of concepts
# introduced in unit 5 (line coverage), improve this program to create
# the locations list automatically, and then use it to print out only the
# failure inducing lines, as before.
# See some hints at the provided functions, and an example output at the end.
import sys
import copy
# the buggy program
def remove_html_markup(s):
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
# Global variables to communicate between callbacks and drivers
the_line = None
the_iteration = None
the_state = None
the_diff = None
the_input = None
coverage = []
def traceit(frame, event, arg):
"""Tracing function that records the covered lines during
the execution of the program, in order of their execution,
and saves them in the global variable 'coverage':
[8, 9, 10, 11, 12, 14, 16, 17, 11, 12, ...]
"""
global coverage
if event == "line":
lineno = frame.f_lineno
# This check is only necessary when running this code in Jupyter notebook
filename = frame.f_code.co_filename
if "Anaconda" in filename:
return traceit
coverage.append(lineno)
return traceit
def make_locations(coverage):
"""Returns a list of tuples in the format
[(line, iteration), (line, iteration) ...]
"""
locations = []
# create a dictionary to record the number of iterations
# for each line in coverage
iterations = dict.fromkeys(coverage, 0)
for line in coverage:
iterations[line] += 1
locations.append((line, iterations[line]))
return locations
def trace_fetch_state(frame, event, arg):
"""Tracing function that traces the program execution until the specified
global variables 'the_line' and 'the_iteration', and then records
the state of the program in the global variable 'the_state'.
Complement to the 'get_state' function.
"""
global the_line
global the_iteration
global the_state
# If function is executed for the first time, create
# function attribute 'iteration' and set it to zero
if not hasattr(trace_fetch_state, "iteration"):
trace_fetch_state.iteration = 0
if event == "line" and frame.f_lineno == the_line:
trace_fetch_state.iteration += 1
if trace_fetch_state.iteration == the_iteration:
the_state = copy.deepcopy(frame.f_locals)
# reset 'iteration' attribute for subsequent function calls
trace_fetch_state.iteration = 0
# stop tracing
sys.settrace(None)
return trace_fetch_state
def get_state(input_string, line, iteration):
""""Returns the state of the program at the specified line and iteration.
Complement to the 'trace_fetch_state' function.
"""
global the_line
global the_iteration
global the_state
the_line = line
the_iteration = iteration
sys.settrace(trace_fetch_state)
y = remove_html_markup(input_string)
sys.settrace(None)
return the_state
def test(diffs):
"""Testing function that calls 'remove_html_markup', stops
at 'the_line' and 'the_iteration' and applies the differences in 'diffs'.
Finally, it checks the returned value of 'remove_html_markup' and
returns "PASS" or "FAIL" accordingly.
Complement to the 'trace_apply_diff' function.
"""
global the_diff
global the_input
global the_line
global the_iteration
line = the_line
iteration = the_iteration
the_diff = diffs
sys.settrace(trace_apply_diff)
y = remove_html_markup(the_input)
sys.settrace(None)
the_line = line
the_iteration = iteration
if y.find('<') == -1:
return "PASS"
else:
return "FAIL"
def ddmin(s):
"""Delta debugger.
It is used to minimize the list of variable values that cause the failing run.
"""
n = 2 # Initial granularity
while len(s) >= 2:
start = 0
subset_length = len(s) / n
some_complement_is_failing = False
while start < len(s):
complement = s[:start] + s[start + subset_length:]
if test(complement) == "FAIL":
s = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(s):
break
n = min(n * 2, len(s))
return s
def trace_apply_diff(frame, event, arg):
"""Tracing function that stops at 'the_line' and 'the_iteration'
and updates the execution frame's variable dictionary 'frame.f_locals',
with the values found in global variable 'the_diff'.
Complement to the 'test' function.
"""
global the_line
global the_diff
global the_iteration
if frame.f_lineno == the_line:
the_iteration = the_iteration - 1
if the_iteration == 0:
frame.f_locals.update(the_diff)
the_line = None
return None # Stop tracing
return trace_apply_diff
def auto_cause_chain(locations):
"""Iterates over all the (line, iteration) pairs provided.
- Then, for each line and iteration pair, it compares the various
variable values between the corresponding failing and passing runs
and calculates a list of differences of the form:
[(variable1 name, variable1 value at failing run), ...]
- This list is then passed to 'ddmin' which does the delta
debugging and returns the minimum set of failure-inducing
variables from the list.
"""
global html_fail
global html_pass
global the_input
global the_line
global the_iteration
global the_diff
print "The program was started with", repr(html_fail)
"""This is the list of causes found while checking each (line, iteration).
It is used to avoid duplicating the same cause in the output.
This is necessary because the program variables state
may not change between two executed lines and thus the
differences variable 'diffs' will be exactly the same."""
causes = []
# Main loop: test for each covered line and iteration
for (line, iteration) in locations:
# Get the passing and the failing states
state_pass = get_state(html_pass, line, iteration)
state_fail = get_state(html_fail, line, iteration)
# Compute the differences
diffs = []
# First check if the execution frame's variable dictionary is not empty
if state_fail:
for var in state_fail:
if (var not in state_pass) or state_pass[var] != state_fail[var]:
diffs.append((var, state_fail[var]))
# Minimize the failure-inducing set of differences
# Since this time you have all the covered lines and iterations in
# locations, you will have to figure out how to automatically detect
# which lines/iterations are the ones that are part of the
# failure chain and print out only these.
the_input = html_pass
the_line = line
the_iteration = iteration
if diffs:
cause = ddmin(diffs)
if cause not in causes:
causes.append(cause)
for variable, value in cause:
print 'Then', variable, 'became', repr(value)
print "Then the program failed."
###### Testing runs
# We will test your function with different strings and on a different function
html_fail = '"<b>foo</b>"'
html_pass = "'<b>foo</b>'"
# MAIN PROGRAM:
# Record line coverage of the failing run in global variable 'coverage'
sys.settrace(traceit)
remove_html_markup(html_fail)
sys.settrace(None)
# Create a list of tuples in the format [(line1, iteration1), ...] using
# the line coverage data from 'coverage'
locations = make_locations(coverage)
# Execute main function 'auto_cause_chain'
auto_cause_chain(locations)
# The output should look like follows:
"""
The program was started with '"<b>foo</b>"'
Then s became '"<b>foo</b>"'
Then c became '"'
Then quote became True
...
"""
```
| github_jupyter |
```
import os
os.environ['KERAS_BACKEND'] = 'theano'
import gzip
import pandas as pd
import numpy as np
np.random.seed(1000)
from matplotlib import pyplot as plt
import seaborn as sns
plt.tight_layout()
sns.set(style="whitegrid")
sns.set_palette((sns.color_palette('colorblind', 8)))
dims = (11.7, 8.27)
%matplotlib inline
from sklearn.preprocessing import StandardScaler
import time
import math
import random
from keras.utils import np_utils
from numpy import argmax
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, Convolution2D, SpatialDropout2D, MaxPooling3D, SeparableConv2D
def load_mnist(path, kind='train'):
## Load MNIST function. Retrieved from https://github.com/zalandoresearch/fashion-mnist
labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)
images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)
return images, labels
def preprocess(train, test):
sc = StandardScaler()
sc.fit(train)
xform_train = sc.transform(train)
xform_test = sc.transform(test)
return xform_train, xform_test
def refresh_data():
X_train, y_train = load_mnist('C:/git/IST718/Lab3/data/fashion', kind='train')
X_test, y_test = load_mnist('C:/git/IST718/Lab3/data/fashion', kind='t10k')
## Transforming the data
X_train, X_test = preprocess(X_train.astype('float64'), X_test.astype('float64'))
## Shaping the data from a 2 dimensional array to 3
X_train = np.reshape(X_train, (60000, 28, 28))
X_test = np.reshape(X_test, (10000, 28, 28))
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
## Assigning the categories to the Y data
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
## Translating the X data to float32 and reducing to a decimal value
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
return X_train, X_test, Y_train, Y_test
def refresh_model():
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=3, strides=3, activation='relu', padding='same', input_shape=(1,28,28)))
model.add(Conv2D(filters=64, kernel_size=3, strides=3, activation='relu', padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model
def int_to_desc(i):
## Numeric dict for each value in the dataset
conv = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal',
6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
## Try to get the value for key i, else assign unknown
try:
ret = conv[i]
except:
ret = 'Unknown'
return ret
def check_random(n, x, y, p):
## Takes in integer N, X data, Y data, and predicted Y data and returns a plot with the information displayed
rows = math.ceil(n/5)
fig, ax = plt.subplots(nrows=rows, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(n):
j = random.randint(0,len(p)-1)
img = x[j].reshape(28, 28)
if p[j] != y[j]:
cmap = 'Reds'
else:
cmap = 'Greens'
ax[i].imshow(img, cmap=cmap, interpolation='nearest')
predicted = int_to_desc(p[j])
actual = int_to_desc(y[j])
ax[i].set_title('P: {}\n A: {}'.format(predicted,actual))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
losses = ['categorical_crossentropy', 'mean_squared_error', 'kullback_leibler_divergence', 'categorical_hinge']
optimizers = ['adam', 'adadelta', 'sgd', 'rmsprop']
metrics = ['categorical_accuracy']
epochs = 25
batchsize = 128
result_columns = ['loss_func', 'optimizer', 'epochs', 'batchsize', 'acc', 'val_acc', 'loss', 'val_loss', 'time', 'total_acc', 'total_loss']
result_df = pd.DataFrame(columns=result_columns)
for opt_func in optimizers:
for loss_func in losses:
print('Refreshing data...')
X_train, X_test, Y_train, Y_test = refresh_data()
print('Refreshing model...')
model = refresh_model()
print('Running {0} optimizer with {1} loss function.'.format(opt_func, loss_func))
## Starting timer
starttime = time.time()
## Compiling and fitting model
model.compile(loss=loss_func, optimizer=opt_func, metrics=metrics)
fit = model.fit(x=X_train, y=Y_train, validation_data=(X_test, Y_test), batch_size=batchsize, epochs=epochs, verbose=0)
## Ending timer
endtime = time.time()
## Total time
totaltime = endtime - starttime
## Evaluating model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
rownum = len(result_df)
result_df.at[rownum, 'loss_func'] = loss_func
result_df.at[rownum, 'optimizer'] = opt_func
result_df.at[rownum, 'epochs'] = epochs
result_df.at[rownum, 'batchsize'] = batchsize
result_df.at[rownum, 'acc'] = fit.history['categorical_accuracy']
result_df.at[rownum, 'val_acc'] = fit.history['val_categorical_accuracy']
result_df.at[rownum, 'loss'] = fit.history['loss']
result_df.at[rownum, 'val_loss'] = fit.history['val_loss']
result_df.at[rownum, 'time'] = totaltime
result_df.at[rownum, 'total_loss'] = score[0]
result_df.at[rownum, 'total_acc'] = score[1]
print('Refreshing data...')
X_train, X_test, Y_train, Y_test = refresh_data()
print('Refreshing model...')
model = refresh_model()
## Starting timer
starttime = time.time()
## Compiling and fitting model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['categorical_accuracy'])
fit = model.fit(x=X_train, y=Y_train, validation_data=(X_test, Y_test), batch_size=128, epochs=50, verbose=1)
## Ending timer
endtime = time.time()
## Total time
totaltime = endtime - starttime
## Predicting Values and flattening them back into a single int
pred = model.predict(X_test)
pred_flat = [argmax(x) for x in pred]
## Flattening the Y_Test values
ytest_flat = [argmax(x) for x in Y_test]
## Evaluating model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
print(score)
print(totaltime)
check_random(n=15, x=X_test, y=ytest_flat, p=pred_flat)
## Graphing the differences between K values, weight methods, and the algorithm used
fig1, ax1 = plt.subplots(figsize=dims)
plot1 = sns.scatterplot(x=range(0,50), y=fit.history['categorical_accuracy'], ax=ax1)
plot1 = fig1.suptitle('Keras: Accuracy of Each Iteration\nCategorical Crossentropy Loss, RMSProp Optimizer', fontsize=20)
plot1 = ax1.set_ylabel('Accuracy')
plot1 = ax1.set_xlabel('Iteration')
#plot1.get_figure().savefig('Keras_best_run.png')
fig2, ax2 = plt.subplots(figsize=dims)
plot2 = sns.barplot(x='loss_func', y='total_acc', hue='optimizer', data=result_df[['loss_func', 'optimizer', 'total_acc']])
plot2 = fig2.suptitle('Keras: Total Accuracy of Each Run', fontsize=20)
plot2 = ax2.set_ylabel('Accuracy')
plot2 = ax2.set_xlabel('Loss Function')
plot2 = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plot2.get_figure().savefig('Keras_all_runs.png')
```
| github_jupyter |
# Simulating and fitting a time varying source
## Prerequisites
- To understand how a single binned simulation works, please refer to [spectrum_simulation](../1D/spectrum_simulation.ipynb) [simulate_3d](../3D/simulate_3d.ipynb) for 1D and 3D simulations respectively.
- For details of light curve extraction using gammapy, refer to the two tutorials [light_curve](light_curve.ipynb) and [light_curve_flare](light_curve_flare.ipynb)
## Context
Frequently, studies of variable sources (eg: decaying GRB light curves, AGN flares, etc) require time variable simulations. For most use cases, generating an event list is an overkill, and it suffices to use binned simulations using a temporal model.
**Objective: Simulate and fit a time decaying light curve of a source with CTA using the CTA 1DC response**
## Proposed approach
We will simulate 10 spectral datasets within given time intervals (Good Time Intervals) following a given spectral (a power law) and temporal profile (an exponential decay, with a decay time of 6 hr ). These are then analysed using the light curve estimator to obtain flux points. Then, we re-fit the simulated datasets to reconstruct back the injected profiles.
In summary, necessary steps are:
- Choose observation parameters including a list of `gammapy.data.GTI`
- Define temporal and spectral models from :ref:model-gallery as per science case
- Perform the simulation (in 1D or 3D)
- Extract the light curve from the reduced dataset as shown in [light curve notebook](light_curve.ipynb)
- Optionally, we show here how to fit the simulated datasets using a source model
## Setup
As usual, we'll start with some general imports...
## Setup
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time
import logging
log = logging.getLogger(__name__)
```
And some gammapy specific imports
```
from gammapy.data import Observation
from gammapy.irf import load_cta_irfs
from gammapy.datasets import SpectrumDataset, Datasets
from gammapy.modeling.models import (
PowerLawSpectralModel,
ExpDecayTemporalModel,
SkyModel,
)
from gammapy.maps import MapAxis, RegionGeom
from gammapy.estimators import LightCurveEstimator
from gammapy.makers import SpectrumDatasetMaker
from gammapy.modeling import Fit
from gammapy.data import observatory_locations
```
## Simulating a light curve
We will simulate 10 datasets using an `PowerLawSpectralModel` and a `ExpDecayTemporalModel`. The important thing to note here is how to attach a different `GTI` to each dataset.
```
# Loading IRFs
irfs = load_cta_irfs(
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
# Reconstructed and true energy axis
energy_axis = MapAxis.from_edges(
np.logspace(-0.5, 1.0, 10), unit="TeV", name="energy", interp="log"
)
energy_axis_true = MapAxis.from_edges(
np.logspace(-1.2, 2.0, 31), unit="TeV", name="energy_true", interp="log"
)
geom = RegionGeom.create("galactic;circle(0, 0, 0.11)", axes=[energy_axis])
# Pointing position
pointing = SkyCoord(0.5, 0.5, unit="deg", frame="galactic")
```
Note that observations are usually conducted in Wobble mode, in which the source is not in the center of the camera. This allows to have a symmetrical sky position from which background can be estimated.
```
# Define the source model: A combination of spectral and temporal model
gti_t0 = Time("2020-03-01")
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
temporal_model = ExpDecayTemporalModel(t0="6 h", t_ref=gti_t0.mjd * u.d)
model_simu = SkyModel(
spectral_model=spectral_model,
temporal_model=temporal_model,
name="model-simu",
)
# Look at the model
model_simu.parameters.to_table()
```
Now, define the start and observation livetime wrt to the reference time, `gti_t0`
```
n_obs = 10
tstart = gti_t0 + [1, 2, 3, 5, 8, 10, 20, 22, 23, 24] * u.h
lvtm = [55, 25, 26, 40, 40, 50, 40, 52, 43, 47] * u.min
```
Now perform the simulations
```
datasets = Datasets()
empty = SpectrumDataset.create(
geom=geom, energy_axis_true=energy_axis_true, name="empty"
)
maker = SpectrumDatasetMaker(selection=["exposure", "background", "edisp"])
for idx in range(n_obs):
obs = Observation.create(
pointing=pointing,
livetime=lvtm[idx],
tstart=tstart[idx],
irfs=irfs,
reference_time=gti_t0,
obs_id=idx,
location=observatory_locations['cta_south'],
)
empty_i = empty.copy(name=f"dataset-{idx}")
dataset = maker.run(empty_i, obs)
dataset.models = model_simu
dataset.fake()
datasets.append(dataset)
```
The reduced datasets have been successfully simulated. Let's take a quick look into our datasets.
```
datasets.info_table()
```
## Extract the lightcurve
This section uses standard light curve estimation tools for a 1D extraction. Only a spectral model needs to be defined in this case. Since the estimator returns the integrated flux separately for each time bin, the temporal model need not be accounted for at this stage.
```
# Define the model:
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
model_fit = SkyModel(spectral_model=spectral_model, name="model-fit")
# Attach model to all datasets
datasets.models = model_fit
%%time
lc_maker_1d = LightCurveEstimator(
energy_edges=[0.3, 10] * u.TeV,
source="model-fit",
selection_optional=["ul"],
)
lc_1d = lc_maker_1d.run(datasets)
ax = lc_1d.plot(marker="o", label="3D")
```
We have the reconstructed lightcurve at this point. Further standard analysis might involve modeling the temporal profiles with an analytical or theoretical model. You may do this using your favourite fitting package, one possible option being `curve_fit` inside `scipy.optimize`.
In the next section, we show how to simultaneously fit the all datasets using a given temporal model. This does a joint fitting across the different datasets, while simultaneously minimising across the temporal model parameters as well. We will fit the amplitude, spectral index and the decay time scale. Note that `t_ref` should be fixed by default for the `ExpDecayTemporalModel`.
For modelling and fitting more complex flares, you should attach the relevant model to each group of `datasets`. The parameters of a model in a given group of dataset will be tied. For more details on joint fitting in gammapy, see [here](../2D/modeling_2D.ipynb).
## Fit the datasets
```
# Define the model:
spectral_model1 = PowerLawSpectralModel(
index=2.0, amplitude="1e-12 cm-2 s-1 TeV-1", reference="1 TeV"
)
temporal_model1 = ExpDecayTemporalModel(t0="10 h", t_ref=gti_t0.mjd * u.d)
model = SkyModel(
spectral_model=spectral_model1,
temporal_model=temporal_model1,
name="model-test",
)
model.parameters.to_table()
datasets.models = model
%%time
# Do a joint fit
fit = Fit()
result = fit.run(datasets=datasets)
result.parameters.to_table()
```
We see that the fitted parameters match well with the simulated ones!
## Exercises
1. Re-do the analysis with `MapDataset` instead of `SpectralDataset`
2. Model the flare of PKS 2155-304 which you obtained using the [light curve flare tutorial](light_curve_flare.ipynb). Use a combination of a Gaussian and Exponential flare profiles, and fit using `scipy.optimize.curve_fit`
3. Do a joint fitting of the datasets.
| github_jupyter |
[@LorenaABarba](https://twitter.com/LorenaABarba)
12 steps to Navier–Stokes
=====
***
For a moment, recall the Navier–Stokes equations for an incompressible fluid, where $\vec{v}$ represents the velocity field:
$$
\begin{eqnarray*}
\nabla \cdot\vec{v} &=& 0 \\
\frac{\partial \vec{v}}{\partial t}+(\vec{v}\cdot\nabla)\vec{v} &=& -\frac{1}{\rho}\nabla p + \nu \nabla^2\vec{v}
\end{eqnarray*}
$$
The first equation represents mass conservation at constant density. The second equation is the conservation of momentum. But a problem appears: the continuity equation for incompressble flow does not have a dominant variable and there is no obvious way to couple the velocity and the pressure. In the case of compressible flow, in contrast, mass continuity would provide an evolution equation for the density $\rho$, which is coupled with an equation of state relating $\rho$ and $p$.
In incompressible flow, the continuity equation $\nabla \cdot\vec{v}=0$ provides a *kinematic constraint* that requires the pressure field to evolve so that the rate of expansion $\nabla \cdot\vec{v}$ should vanish everywhere. A way out of this difficulty is to *construct* a pressure field that guarantees continuity is satisfied; such a relation can be obtained by taking the divergence of the momentum equation. In that process, a Poisson equation for the pressure shows up!
Step 10: 2D Poisson Equation
----
***
Poisson's equation is obtained from adding a source term to the right-hand-side of Laplace's equation:
$$\frac{\partial ^2 p}{\partial x^2} + \frac{\partial ^2 p}{\partial y^2} = b$$
So, unlinke the Laplace equation, there is some finite value inside the field that affects the solution. Poisson's equation acts to "relax" the initial sources in the field.
In discretized form, this looks almost the same as [Step 9](./12_Step_9.ipynb), except for the source term:
$$\frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\Delta x^2}+\frac{p_{i,j+1}^{n}-2 p_{i,j}^{n}+p_{i,j-1}^{n}}{\Delta y^2}=b_{i,j}^{n}$$
As before, we rearrange this so that we obtain an equation for $p$ at point $i,j$. Thus, we obtain:
$$p_{i,j}^{n}=\frac{(p_{i+1,j}^{n}+p_{i-1,j}^{n})\Delta y^2+(p_{i,j+1}^{n}+p_{i,j-1}^{n})\Delta x^2-b_{i,j}^{n}\Delta x^2\Delta y^2}{2(\Delta x^2+\Delta y^2)}$$
We will solve this equation by assuming an initial state of $p=0$ everywhere, and applying boundary conditions as follows:
$p=0$ at $x=0, \ 2$ and $y=0, \ 1$
and the source term consists of two initial spikes inside the domain, as follows:
$b_{i,j}=100$ at $i=\frac{1}{4}nx, j=\frac{1}{4}ny$
$b_{i,j}=-100$ at $i=\frac{3}{4}nx, j=\frac{3}{4}ny$
$b_{i,j}=0$ everywhere else.
The iterations will advance in pseudo-time to relax the initial spikes. The relaxation under Poisson's equation gets slower and slower as they progress. *Why?*
Let's look at one possible way to write the code for Poisson's equation. As always, we load our favorite Python libraries. We also want to make some lovely plots in 3D. Let's get our parameters defined and the initialization out of the way. What do you notice of the approach below?
```
import numpy
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
# Parameters
nx = 50
ny = 50
nt = 100
xmin = 0
xmax = 2
ymin = 0
ymax = 1
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
# Initialization
p = numpy.zeros((ny, nx))
pd = numpy.zeros((ny, nx))
b = numpy.zeros((ny, nx))
x = numpy.linspace(xmin, xmax, nx)
y = numpy.linspace(xmin, xmax, ny)
# Source
b[int(ny / 4), int(nx / 4)] = 100
b[int(3 * ny / 4), int(3 * nx / 4)] = -100
```
With that, we are ready to advance the initial guess in pseudo-time. How is the code below different from the function used in [Step 9](./12_Step_9.ipynb) to solve Laplace's equation?
```
for it in range(nt):
pd = p.copy()
p[1:-1,1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +
(pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -
b[1:-1, 1:-1] * dx**2 * dy**2) /
(2 * (dx**2 + dy**2)))
p[0, :] = 0
p[ny-1, :] = 0
p[:, 0] = 0
p[:, nx-1] = 0
```
Maybe we could reuse our plotting function from [Step 9](./12_Step_9.ipynb), don't you think?
```
def plot2D(x, y, p):
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
X, Y = numpy.meshgrid(x, y)
surf = ax.plot_surface(X, Y, p[:], rstride=1, cstride=1, cmap=cm.viridis,
linewidth=0, antialiased=False)
ax.view_init(30, 225)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plot2D(x, y, p)
```
Ah! The wonders of code reuse! Now, you probably think: "Well, if I've written this neat little function that does something so useful, I want to use it over and over again. How can I do this without copying and pasting it each time? —If you are very curious about this, you'll have to learn about *packaging*. But this goes beyond the scope of our CFD lessons. You'll just have to Google it if you really want to know.
***
## Learn More
To learn more about the role of the Poisson equation in CFD, watch **Video Lesson 11** on You Tube:
```
from IPython.display import YouTubeVideo
YouTubeVideo('ZjfxA3qq2Lg')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
> (The cell above executes the style for this notebook.)
| github_jupyter |
# Fibonacci Series Classifier
*Author: Brianna Gopaul*
The fibonacci series is a sequence of numbers that increases as it sums up it's two subsequent values. For example, 1, 1, 2, 3 are numbers within the fibonacci series because 1 + 1 = 2 + 1 = 3.
Below we create a supervised model that classifies fibonacci sequences from non-fibonacci sequences in Strawberry Fields using The [Quantum Machine Learning Toolbox](https://github.com/XanaduAI/qmlt).

## Supervised Model Tutorial
```
import tensorflow as tf
import strawberryfields as sf
from strawberryfields.ops import *
from qmlt.tf.helpers import make_param
from qmlt.tf import CircuitLearner
```
Here we define the number of iterations we want our model to run through.
```
steps = 100
```
Now we create a circuit that contains trainable parameters. The line proceeding it takes the shape of the input and runs the circuit. The tensorflow backend 'tf' is used and arguments eval, cutoff_dim and batch_size are defined. Different arguments will be required depending on the backend used. The fock backend can alternatively be used.
The output of the circuit is measure using photon counting. If we measure zero photons in the first mode and two photons in the second mode, this output is defined as p0
```
def circuit(X):
kappa = make_param('kappa', constant=0.9)
theta = make_param('theta', constant=2.25)
eng, q = sf.Engine(2)
with eng:
Dgate(X[:, 0], X[:, 1]) | q[0]
BSgate(theta=theta) | (q[0], q[1])
Sgate(X[:, 0], X[:, 1]) | q[0]
Sgate(X[:, 0], X[:, 1]) | q[1]
BSgate(theta=theta) | (q[0], q[1])
Dgate(X[:, 0], X[:, 1]) | q[0]
Kgate(kappa=kappa) | q[0]
Kgate(kappa=kappa) | q[1]
num_inputs = X.get_shape().as_list()[0]
state = eng.run('tf', cutoff_dim=10, eval=False, batch_size=num_inputs)
p0 = state.fock_prob([0, 2])
p1 = state.fock_prob([2, 0])
normalisation = p0 + p1 + 1e-10
circuit_output = p1/normalisation
return circuit_output
```
In machine learning, the loss function tells us how much error there is between the correct value and the output value.
Mean Squared Error (MSE) minimizes the summation of all errors squared.
```
def myloss(circuit_output, targets):
return tf.losses.mean_squared_error(labels=circuit_output, predictions=targets)
def outputs_to_predictions(circuit_output):
return tf.round(circuit_output)
#training and testing data
X_train = [[0.1, 0.1, 0.2, 0.3],[0.3, 0.4, 0.5, 0.8], [0.3,0.6,0.9,0.13], [0.5, 0.8, 0.14, 0.21],[0.3, 0.5, 0.8, 0.13],[0.08, 0.13, 0.21, 0.34],[0.21, 0.36, 0.59, 0.99], [1, 1, 2, 3], [0.3, 0.5, 0.8, 0.13],[0.13, 0.21, 0.34, 0.55], [0.10, 0.777, 0.13434334, 0.88809], [0.1, 0.9, 0.13, 0.17],[0.43, 0.675, 0.2, 0.9], [0.98, 0.32, 0.1, 0.3], [0.15, 0.21, 0.34, 0.56], [0.1, 0.1, 0.2, 0.3], [0.1, 0.15, 0.3, 0.5],[0.1, 0.2, 0.4, 0.5],[0.3, 0.4, 0.5, 0.8],[0.3,0.6,0.9,0.13],[0.15, 0.15, 0.25, 0.35],[0.15, 0.25, 0.35, 0.45],[0.46, 0.29, 0.7, 0.57],[0.55,0.89,1.44,2.33],[0.233, 0.377, 0.61, 0.987], [0.987, 1.597, 2.584, 4.181],[0.6, 0.7, 0.13, 0.20],[0.233, 0.377, 0.61, 0.987],[0.0008, 0.013, 0.0021, 0.0034], [0.5, 0.6, 0.11, 0.17], [0.4, 0.5, 0.9, 0.13], [0.3, 0.5, 0.8, 0.18],[0.1, 0.1, 0.2, 0.6], [0.4, 0.5, 0.10, 0.15], [0.2, 0.3, 0.5, 0.10], [0.2, 0.3, 0.6, 0.43], [0.1, 0.3, 0.4, 0.2], [0.3, 0.5, 0.8, 0.787687], [0.5, 0.8, 1.3, 1], [0.08, 0.13, 0.21, 0.4]]
Y_train = [1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
X_test = [[0.5, 0.8, 0.13, 0.21], [0.21, 0.34, 0.55, 0.89], [0.7, 0.1, 0.879, 0.444], [0.20, 0.56, 0.909, 0.11], [0.2, 0.4, 0.6, 0.99],[0.53, 0.66, 0.06, 0.31], [0.24, 0.79, 0.25, 0.69], [0.008, 0.013, 0.021, 0.034], [0.144, 0.233, 0.377, 0.61], [0.61, 0.987, 1.597, 2.584], [0.34, 0.55, 0.89, 1.44], [0.034, 0.055, 0.089, 0.144],[0.2, 0.3, 0.5, 0.8], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.9], [0.2, 0.5, 0.14, 0.12], [0.5, 0.6, 0.7, 0.8],[0.5, 0.6, 0.9, 0.7],[0.5, 0.2, 0.9, 0.7],[0.4, 0.6, 0.4, 0.3],[0.9, 0.6, 0.4, 0.9],[0.9, 0.1, 0.6, 0.9],[0.8, 0.8, 0.6, 0.5]]
Y_test = [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
X_pred = [[0.233, 0.377, 0.61, 0.987], [0.55, 0.89, 1.44, 2.33], [0.0013, 0.0021, 0.0034, 0.0055], [0.5, 0.8, 1.3, 2.1], [0.89, 1.44, 2.33, 3.77], [0.03, 0.05, 0.3, 0.13], [0.40, 0.34, 0.55, 0.89], [0.2, 0.45, 0.5, 0.8], [0.08, 0.13, 0.30, 0.34], [0.13, 0.21, 0.34, 0.80]]
```
Hyperparameters that define the task, optimizer and various other parameters listed in the QMLT docs are defined below.
A learner is then fed the hyperparameters and data.
```
hyperparams= {'circuit': circuit,
'task': 'supervised',
'loss': myloss,
'optimizer': 'SGD',
'init_learning_rate': 0.1,
'print_log': True}
learner = CircuitLearner(hyperparams=hyperparams)
learner.train_circuit(X=X_train, Y=Y_train, steps=steps)
test_score = learner.score_circuit(X=X_test, Y=Y_test,outputs_to_predictions=outputs_to_predictions)
print("Accuracy on test set: ", test_score['accuracy'])
outcomes = learner.run_circuit(X=X_pred, outputs_to_predictions=outputs_to_predictions)
print("Predictions for new inputs: {}".format(outcomes['predictions']))
```
## Observations
### Small Dataset vs Large Dataset
Here we fix the value of x_pred in each test and feed the model two different datasets in order to see the success rate of using each model. The difficulty of x_pred will vary depending on the model's success rate.
```
X_pred_level1 = [[0.08, 0.13, 0.21, 0.34], [0.2, 0.3, 0.5, 0.8],[0.01, 0.01, 0.02, 0.03],[0.008, 0.013, 0.021, 0.034], [0.3, 0.5, 0.8, 0.13], [0.55, 0.64, 0.77, 0.21], [0.62, 0.93, 0.38, 0.23],[0.9, 0.8, 0.7, 0.6], [0.4, 0.6, 0.78, 0.77],[0.44, 0.96, 0.28, 0.33]]
X_pred_level2 = [[0.34, 0.55, 0.89, 1.44], [0.003, 0.005, 0.008, 0.013], [0.3, 0.5, 0.8, 1.3], [0.08, 0.13, 0.21, 0.34], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.4], [0.3, 0.8, 0.12, 0.2], [0.4, 0.5, 0.7, 0.7], [0.7, 0.0, 0.6, 0.5]]
X_pred_level3 = [[0.233, 0.377, 0.61, 0.987], [0.55, 0.89, 1.44, 2.33], [0.0013, 0.0021, 0.0034, 0.0055], [0.5, 0.8, 1.3, 2.1], [0.89, 1.44, 2.33, 3.77], [0.03, 0.05, 0.3, 0.13], [0.40, 0.34, 0.55, 0.89], [0.2, 0.45, 0.5, 0.8], [0.08, 0.13, 0.30, 0.34], [0.13, 0.21, 0.34, 0.80]]
```
### Sparse Dataset
```
X_train = [[0.1, 0.1, 0.2, 0.3],[0.5, 0.8, 0.14, 0.21],[0.3, 0.4, 0.5, 0.8], [0.3, 0.6, 0.9, 0.13]]
Y_train = [1, 1, 0, 0]
X_test = [[0.5, 0.8, 0.13, 0.21], [0.21, 0.34, 0.55, 0.89], [0.7, 0.1, 0.879, 0.444], [0.20, 0.56, 0.909, 0.11]]
Y_test = [1, 1, 0, 0]
```
### Large Dataset
```
X_train = [[0.1, 0.1, 0.2, 0.3],[0.3, 0.4, 0.5, 0.8], [0.3,0.6,0.9,0.13], [0.5, 0.8, 0.14, 0.21],[0.3, 0.5, 0.8, 0.13],[0.08, 0.13, 0.21, 0.34],[0.21, 0.36, 0.59, 0.99], [1, 1, 2, 3], [0.3, 0.5, 0.8, 0.13],[0.13, 0.21, 0.34, 0.55], [0.10, 0.777, 0.13434334, 0.88809], [0.1, 0.9, 0.13, 0.17],[0.43, 0.675, 0.2, 0.9], [0.98, 0.32, 0.1, 0.3], [0.15, 0.21, 0.34, 0.56], [0.1, 0.1, 0.2, 0.3], [0.1, 0.15, 0.3, 0.5],[0.1, 0.2, 0.4, 0.5],[0.3, 0.4, 0.5, 0.8],[0.3,0.6,0.9,0.13],[0.15, 0.15, 0.25, 0.35],[0.15, 0.25, 0.35, 0.45],[0.46, 0.29, 0.7, 0.57],[0.55,0.89,1.44,2.33],[0.233, 0.377, 0.61, 0.987], [0.987, 1.597, 2.584, 4.181],[0.6, 0.7, 0.13, 0.20],[0.233, 0.377, 0.61, 0.987],[0.0008, 0.013, 0.0021, 0.0034], [0.5, 0.6, 0.11, 0.17], [0.4, 0.5, 0.9, 0.13], [0.3, 0.5, 0.8, 0.18],[0.1, 0.1, 0.2, 0.6], [0.4, 0.5, 0.10, 0.15], [0.2, 0.3, 0.5, 0.10], [0.2, 0.3, 0.6, 0.43], [0.1, 0.3, 0.4, 0.2], [0.3, 0.5, 0.8, 0.787687], [0.5, 0.8, 1.3, 1], [0.08, 0.13, 0.21, 0.4]]
Y_train = [1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
X_test = [[0.5, 0.8, 0.13, 0.21], [0.21, 0.34, 0.55, 0.89], [0.7, 0.1, 0.879, 0.444], [0.20, 0.56, 0.909, 0.11], [0.2, 0.4, 0.6, 0.99],[0.53, 0.66, 0.06, 0.31], [0.24, 0.79, 0.25, 0.69], [0.008, 0.013, 0.021, 0.034], [0.144, 0.233, 0.377, 0.61], [0.61, 0.987, 1.597, 2.584], [0.34, 0.55, 0.89, 1.44], [0.034, 0.055, 0.089, 0.144],[0.2, 0.3, 0.5, 0.8], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.9], [0.2, 0.5, 0.14, 0.12], [0.5, 0.6, 0.7, 0.8],[0.5, 0.6, 0.9, 0.7],[0.5, 0.2, 0.9, 0.7],[0.4, 0.6, 0.4, 0.3],[0.9, 0.6, 0.4, 0.9],[0.9, 0.1, 0.6, 0.9],[0.8, 0.8, 0.6, 0.5]]
Y_test = [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
```
### Data Generation
```
import random
import numpy as np
x=np.random.random(4)
np.set_printoptions(precision=2)
print(x)
a = [1,1]
idx = 0
for i in range(30):
a.append(a[idx] + a[idx+1])
idx +=1
print(a[-1]/100)
```
## Results
### Level 1
```
X_pred_level1 = [[0.08, 0.13, 0.21, 0.34], [0.2, 0.3, 0.5, 0.8],[0.01, 0.01, 0.02, 0.03],[0.008, 0.013, 0.021, 0.034], [0.3, 0.5, 0.8, 0.13], [0.55, 0.64, 0.77, 0.21], [0.62, 0.93, 0.38, 0.23],[0.9, 0.8, 0.7, 0.6], [0.4, 0.6, 0.78, 0.77],[0.44, 0.96, 0.28, 0.33]]
```
Level 1 is the easiest classification task. To challenge the model, the non-fibonacci sequences are close in value to each other.
__Tasks Classified Correctly:__
* Large Dataset: 20%
* Small Dataset: 80%
### Level 2
```
X_pred_level2 = [[0.34, 0.55, 0.89, 1.44], [0.003, 0.005, 0.008, 0.013], [0.3, 0.5, 0.8, 1.3], [0.08, 0.13, 0.21, 0.34], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.4], [0.3, 0.8, 0.12, 0.2], [0.4, 0.5, 0.7, 0.7], [0.7, 0.0, 0.6, 0.5]]
```
Level 2 challenges the model by testing it against unfamiliar fibonacci sequences. The non-fibonacci numbers also become closer in value.
__Tasks Classified Correctly:__
* Large Dataset: 40%
* Small Dataset: 70%
### Level 3
```
X_pred_level3 = [[0.233, 0.377, 0.61, 0.987], [0.55, 0.89, 1.44, 2.33], [0.0013, 0.0021, 0.0034, 0.0055], [0.5, 0.8, 1.3, 2.1], [0.89, 1.44, 2.33, 3.77], [0.03, 0.05, 0.3, 0.13], [0.40, 0.34, 0.55, 0.89], [0.2, 0.45, 0.5, 0.8], [0.08, 0.13, 0.30, 0.34], [0.13, 0.21, 0.34, 0.80]]
```
Level 3 is the most difficult test set.
<br>
It contains number sequences that appear to follow the fibonacci pattern but are off by a small value. e.g. 0.13, 0.21, 0.34, 0.80
**Tasks Classified Correctly:**
* Large Dataset: 70%
* Small Dataset: 30%
The graph below illustrates the performance of the small dataset model and the large dataset model on each test set.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
N = 3
B = (20, 40, 70)
A = (80, 70, 30)
BB = (1, 1, 1)
AA = (1, 1, 1)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, B, width, yerr=BB)
p2 = plt.bar(ind, A, width,
bottom=B, yerr=AA)
plt.ylabel('Correct Classifications(%)')
plt.title('Large Dataset vs Small Dataset Performance')
plt.xticks(ind, ('L1', 'L2', 'L3'))
plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0]), ('Large Dataset', 'Small Dataset'))
plt.show()
```
| github_jupyter |
```
from pyhanlp import *
print(HanLP.segment('你好,欢迎在Python中调用HanLP的API'))
import hashlib
def phone_encode(string, method="md5_sha1"):
"""
先MD5,再sha1加密
"""
try:
if 'md5' in method:
m = hashlib.md5()
m.update(string.encode(encoding='utf-8'))
string = m.hexdigest()
string = string[0:32]
if 'sha1' in method:
s = hashlib.sha1()
s.update(string.encode(encoding='utf-8').upper())
string = s.hexdigest().upper()
return string
except Exception as e:
print(e)
return None
a='河南许昌巨龙游乐设备有限责任公司'
phone_encode(a, method='md5_sha1')
import uuid
uuid.uuid4().hex
28996/180489
def read_dict():
DIC_PATH = "ChineseStrokes.dat"
data = open(DIC_PATH)
data = [e.strip() for e in data]
data = [e.split(u"\t") for e in data]
return dict(data)
WORD_DICT = read_dict()
def word2strokes(ustring):
return WORD_DICT.get(ustring, "")
def words2strokes(ustrings):
res = []
for w in ustrings:
res.append(word2strokes(w))
return " ".join(res)
def levenshtein_distance(first, second):
'''
计算两个字符串之间的L氏编辑距离
:输入参数 first: 第一个字符串
:输入参数 second: 第二个字符串
:返回值: L氏编辑距离
'''
if len(first) == 0 or len(second) == 0:
return len(first) + len(second)
first_length = len(first) + 1
second_length = len(second) + 1
distance_matrix = [[j for j in range(second_length)] for i in range(first_length)] # 初始化矩阵
for i in range(1, first_length):
for j in range(1, second_length):
deletion = distance_matrix[i - 1][j] + 1
insertion = distance_matrix[i][j - 1] + 1
substitution = distance_matrix[i - 1][j - 1]
if first[i - 1] != second[j - 1]:
substitution += 1
distance_matrix[i][j] = min(insertion, deletion, substitution)
return distance_matrix[first_length - 1][second_length - 1]
def find_n_words(s1, li):
T = []
for e in li:
dis = levenshtein_distance(s1, e)
if dis < 3 and dis > 0:
T.append(li[e] + "_" + str(dis))
return ",".join(T)
s1 = u"秒贷网"
s2 = u"秒贷金融"
print(s1,s2)
s1 = words2strokes(s1)
s2 = words2strokes(s2)
print(s1,s2)
# levenshtein_distance(s1, s2)
def is_form_same(words1, words2, return_number=False):
""" 判断两个词语是否形近
:param words1: 词语 1,unicode 编码
:param words2: 词语 2,unicode 编码
:param return_number: 返回结果是否为 编辑距离数值,默认为 False
:return: 是否形近,布尔型 (笔顺的编辑距离小于 3 则认为形近,否则为不形近)。
当 return_number = True 时,返回结果为两个字符串笔顺的编辑距离取值,int 型
example: >> is_form_same(u'习大大', u"习夶", return_number=True)
1
>> is_form_same(u'习大大', u"习夶")
True
"""
# words1 = zh_hans(words1)
# words2 = zh_hans(words2)
form1 = words2strokes(words1)
form2 = words2strokes(words2)
distance = levenshtein_distance(form1, form2)
if return_number:
return distance
if distance < 3:
return True
else:
return False
print(is_form_same("晴彩眼镜","睛彩眼镜", return_number=True))
print(is_form_same("青彩眼镜","睛彩眼镜", return_number=True))
print(is_form_same("清彩眼镜","睛彩眼镜", return_number=True))
print(is_form_same("睛采眼镜","睛彩眼镜", return_number=True))
print(is_form_same("壹染造型","壹柒造型", return_number=True))
print(is_form_same("海容袜业","睛彩眼镜", return_number=True))
print(is_form_same("多哆基","哆哆基", return_number=True))
class C1():
def f(self):
print('C1.f')
return 2*self.g()
def g(self):
print('C1.g')
return 2
class C2(C1):
def f(self):
print('C2.f')
return 3*self.g()
class C3(C1):
def g(self):
print('C3.g')
return 5
class C4(C3):
def f(self):
print('C4.f')
return 7*self.g()
obj1 = C1()
obj2 = C2()
obj3 = C3()
obj4 = C4()
print('obj1:',obj1.f())
print('obj2:',obj2.f())
print('obj3:',obj3.f())
print('obj4:',obj4.f())
3030 + 2007 + 3381 + 1910 + 2610 + 489 + 723 + 726 + 1449 + 1407 + 167 + 511 + 163 + 813 + 247 + 254 + 500
import sys
sys.version
```
| github_jupyter |
# Predicting Concrete Compressive Strength - Comparison with Linear Models
In this code notebook, we will analyze the statistics pertaining the various models presented in this project. In the Exploratory Data Analysis notebook, we explored the various relationships that each consituent of concrete has on the cured compressive strength. The materials that held the strongest relationships, regardless of curing time, were cement, cementitious ratio, superplasticizer ratio, and fly ash ratio. We will examine each of the linear ratios independent of age, as well as at the industry-standard 28 day cure time mark.
## Dataset Citation
This dataset was retrieved from the UC Irvine Machine Learning Repository from the following URL: <https://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength>.
The dataset was donated to the UCI Repository by Prof. I-Cheng Yeh of Chung-Huah University, who retains copyright for the following published paper: I-Cheng Yeh, "Modeling of strength of high performance concrete using artificial neural networks," Cement and Concrete Research, Vol. 28, No. 12, pp. 1797-1808 (1998). Additional papers citing this dataset are listed at the reference link above.
## Import the Relevant Libraries
```
# Data Manipulation
import numpy as np
import pandas as pd
# Data Visualization
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
# Data Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# Linear Regresssion Model
from sklearn.linear_model import LinearRegression
# Model Evaluation
from sklearn.metrics import mean_squared_error,mean_absolute_error,explained_variance_score
```
## Import & Check the Data
```
df1 = pd.read_csv('2020_1124_Modeling_Data.csv')
df2 = pd.read_csv('2020_1123_Concrete_Data_Loaded_Transformed.csv')
original_data = df1.copy()
transformed_data = df2.copy()
# The original data contains kg/m^3 values
original_data.head()
# Original data
original_data.describe()
# The transformed data contains ratios to total mass of the concrete mix
transformed_data.head()
# Transformed data
transformed_data.describe()
```
## Cement Modeling - Including All Cure Times
We understand that the ratio of cement to compressive strength is linear. We will model this relationship in Python and evaluate its performance compared to our ANN model.
### Visualization
```
# We will visualize the linear relationship between quantity of cement and compressive strength
cement = original_data['Cement']
strength = original_data['Compressive_Strength']
plt.scatter(cement,strength)
```
### Train the Linear Model
```
# Reshape the data so it complies with the linear model requirements
X = np.array(cement).reshape(1030,1)
y = np.array(strength).reshape(1030,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_cement = mean_absolute_error(y_test, y_pred)
MSE_cement = mean_squared_error(y_test, y_pred)
RMSE_cement = np.sqrt(mean_squared_error(y_test, y_pred))
cement_stats = [MAE_cement,MSE_cement,RMSE_cement] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENT VS. COMPRESSIVE STRENGTH")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_cement}\nMean Squared Error:\t\t\t{MSE_cement}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cement}")
print('-----------------------------\n\n')
```
## Cement Modeling - 28 Day Cure Time
We will model the cement vs compressive strength relationship for a constant cure time (28 days).
### Visualization
```
# We will visualize the linear relationship between quantity of cement and compressive strength at 28 days
cement = original_data[original_data['Age']==28]['Cement']
strength = original_data[original_data['Age']==28]['Compressive_Strength']
plt.scatter(cement,strength)
```
### Train the Linear Model
```
# Reshape the data so it complies with the linear model requirements
X = np.array(cement).reshape(425,1)
y = np.array(strength).reshape(425,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_cement_28 = mean_absolute_error(y_test, y_pred)
MSE_cement_28 = mean_squared_error(y_test, y_pred)
RMSE_cement_28 = np.sqrt(mean_squared_error(y_test, y_pred))
cement_28_stats = [MAE_cement_28,MSE_cement_28,RMSE_cement_28] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENT VS. COMPRESSIVE STRENGTH")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_cement_28}\nMean Squared Error:\t\t\t{MSE_cement_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cement_28}")
print('-----------------------------\n\n')
```
## Cementitious Ratio Modeling - Including All Cure Times
We know that the ratio of cementitious materials to the total mass is (cement + fly ash)/(total mass) to compressive strength is linear. We will model this relationship in Python and evaluate its performance.
### Visualization
```
# We will visualize the linear relationship between quantity of cementitious materials and compressive strength
cementitious = transformed_data['Cementitious_Ratio']
strength = transformed_data['Compressive_Strength']
plt.scatter(cementitious,strength)
```
### Train the Linear Model
```
# Reshape the data so it complies with the linear model requirements
X = np.array(cementitious).reshape(1030,1)
y = np.array(strength).reshape(1030,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_cementitious = mean_absolute_error(y_test, y_pred)
MSE_cementitious = mean_squared_error(y_test, y_pred)
RMSE_cementitious = np.sqrt(mean_squared_error(y_test, y_pred))
cementitious_stats = [MAE_cementitious,MSE_cementitious,RMSE_cementitious] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENTITIOUS RATIO VS. COMPRESSIVE STRENGTH")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_cementitious}\nMean Squared Error:\t\t\t{MSE_cementitious}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cementitious}")
print('-----------------------------\n\n')
```
## Cementitious Ratio Modeling - 28 Day Cure Time
### Visualization
```
# We will visualize the linear relationship between quantity of cementitious materials and compressive strength at 28 days
cementitious = transformed_data[original_data['Age']==28]['Cementitious_Ratio']
strength = transformed_data[original_data['Age']==28]['Compressive_Strength']
plt.scatter(cementitious,strength)
```
### Train the Linear Model
```
# Reshape the data so it complies with the linear model requirements
X = np.array(cementitious).reshape(425,1)
y = np.array(strength).reshape(425,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_cementitious_28 = mean_absolute_error(y_test, y_pred)
MSE_cementitious_28 = mean_squared_error(y_test, y_pred)
RMSE_cementitious_28 = np.sqrt(mean_squared_error(y_test, y_pred))
cementitious_28_stats = [MAE_cementitious_28,MSE_cementitious_28,RMSE_cementitious_28] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENTITIOUS RATIO VS. COMPRESSIVE STRENGTH AT 28 DAYS")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_cementitious_28}\nMean Squared Error:\t\t\t{MSE_cementitious_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cementitious_28}")
print('-----------------------------\n\n')
```
## Fly Ash Ratio Modeling - Including All Cure Times
The fly ash ratio is interpreted as the percentage of fly ash within the cementitious materials mix, that is, Fly_Ash_Ratio = (fly ash + cement)/(total mass).
### Visualization
```
# We will visualize the linear relationship between fly ash ratio and compressive strength
fly = transformed_data['Fly_Ash_Ratio']
strength = transformed_data['Compressive_Strength']
plt.scatter(fly,strength)
```
### Data Preprocessing
We see from the graph above that there are many instances where there is no fly ash in the mix design. Let us use only nonzero entries for our analysis.
```
fly = transformed_data[transformed_data['Fly_Ash_Ratio']!=0]['Fly_Ash_Ratio']
strength = transformed_data[transformed_data['Fly_Ash_Ratio']!=0]['Compressive_Strength']
plt.scatter(fly,strength)
```
### Train the Linear Model
```
# Reshape the data so it complies with the linear model requirements
X = np.array(fly).reshape(464,1)
y = np.array(strength).reshape(464,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_fly = mean_absolute_error(y_test, y_pred)
MSE_fly = mean_squared_error(y_test, y_pred)
RMSE_fly = np.sqrt(mean_squared_error(y_test, y_pred))
fly_stats = [MAE_fly,MSE_fly,RMSE_fly] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR FLY ASH RATIO VS. COMPRESSIVE STRENGTH")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_fly}\nMean Squared Error:\t\t\t{MSE_fly}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_fly}")
print('-----------------------------\n\n')
```
## Fly Ash Ratio Modeling - 28 Day Cure Time
The fly ash ratio is interpreted as the percentage of fly ash within the cementitious materials mix, that is, Fly_Ash_Ratio = (fly ash + cement)/(total mass).
```
fly = transformed_data[((transformed_data['Fly_Ash_Ratio']!=0)&(transformed_data['Age']==28))]['Fly_Ash_Ratio']
strength = transformed_data[((transformed_data['Fly_Ash_Ratio']!=0)&(transformed_data['Age']==28))]['Compressive_Strength']
plt.scatter(fly,strength)
```
### Train the Linear Model
```
# Reshape the data so it complies with the linear model requirements
X = np.array(fly).reshape(217,1)
y = np.array(strength).reshape(217,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_fly_28 = mean_absolute_error(y_test, y_pred)
MSE_fly_28 = mean_squared_error(y_test, y_pred)
RMSE_fly_28 = np.sqrt(mean_squared_error(y_test, y_pred))
fly_28_stats = [MAE_fly_28,MSE_fly_28,RMSE_fly_28] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR FLY ASH RATIO VS. COMPRESSIVE STRENGTH AT 28 DAYS")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_fly_28}\nMean Squared Error:\t\t\t{MSE_fly_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_fly_28}")
print('-----------------------------\n\n')
```
## Superplasticizer Ratio Modeling - Including All Cure Times
The superplasticizer ratio is the ratio of superplasticizer contained within the total mix design, by weight.
### Visualization
```
# We will visualize the linear relationship between superplasticizer ratio and compressive strength
superplasticizer = transformed_data['Superplasticizer_Ratio']
strength = transformed_data['Compressive_Strength']
plt.scatter(superplasticizer,strength)
```
### Data Preprocessing
Once agaain, we see from the graph above that there are many instances where there is no superplasticizer in the mix design. Let us use only nonzero entries for our analysis.
```
superplasticizer = transformed_data[transformed_data['Superplasticizer_Ratio']!=0]['Superplasticizer_Ratio']
strength = transformed_data[transformed_data['Superplasticizer_Ratio']!=0]['Compressive_Strength']
plt.scatter(superplasticizer,strength)
```
This is better, but we see a large spread in the data. Let's remove any outliers first, before training our model.
```
superplasticizer.describe()
mean = 0.004146
three_sigma = 3*0.001875
upper = mean + three_sigma
lower = mean - three_sigma
print(f"The lower bound is:\t{lower}\nThe upper bound is:\t{upper}")
```
Since there are no negative ratios, we only need to remove data points where the superplasticizer ratio is greater than 0.009771.
```
superplasticizer = transformed_data[transformed_data['Superplasticizer_Ratio']!=0][transformed_data['Superplasticizer_Ratio'] < upper]['Superplasticizer_Ratio']
strength = transformed_data[transformed_data['Superplasticizer_Ratio']!=0][transformed_data['Superplasticizer_Ratio'] < upper]['Compressive_Strength']
plt.scatter(superplasticizer,strength)
```
### Train the Linear Model
```
# We will train and test our model only on the data above, that does not contain outliers
# Reshape the data so it complies with the linear model requirements
X = np.array(superplasticizer).reshape(641,1)
y = np.array(strength).reshape(641,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_super = mean_absolute_error(y_test, y_pred)
MSE_super = mean_squared_error(y_test, y_pred)
RMSE_super = np.sqrt(mean_squared_error(y_test, y_pred))
super_stats = [MAE_super,MSE_super,RMSE_super] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR SUPERPLASTICIZER RATIO VS. COMPRESSIVE STRENGTH")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_super}\nMean Squared Error:\t\t\t{MSE_super}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_super}")
print('-----------------------------\n\n')
```
## Superplasticizer Ratio Modeling - 28 Day Cure Time
The superplasticizer ratio is the ratio of superplasticizer contained within the total mix design, by weight.
### Visualization
```
superplasticizer = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28))]['Superplasticizer_Ratio']
strength = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28))]['Compressive_Strength']
plt.scatter(superplasticizer,strength)
```
This is better, but we see a large spread in the data. Let's remove any outliers first, before training our model.
```
superplasticizer.describe()
mean = 0.004146
three_sigma = 3*0.001875
upper = mean + three_sigma
lower = mean - three_sigma
print(f"The lower bound is:\t{lower}\nThe upper bound is:\t{upper}")
```
Since there are no negative ratios, we only need to remove data points where the superplasticizer ratio is greater than 0.009771.
```
superplasticizer = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28)&(transformed_data['Superplasticizer_Ratio']<upper))]['Superplasticizer_Ratio']
strength = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28)&(transformed_data['Superplasticizer_Ratio']<upper))]['Compressive_Strength']
plt.scatter(superplasticizer,strength)
```
### Train the Linear Model
```
# We will train and test our model only on the data above, that does not contain outliers
# Reshape the data so it complies with the linear model requirements
X = np.array(superplasticizer).reshape(315,1)
y = np.array(strength).reshape(315,1)
# Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
# Train the linear model
lm = LinearRegression()
lm.fit(X_train,y_train)
```
### Test the Linear Model
```
y_pred = lm.predict(X_test)
```
### Linear Equation
```
# print the intercept
print(lm.intercept_)
coeff = pd.DataFrame(lm.coef_,columns=['Coefficient'])
coeff
```
### Model Evaluation
```
# Plot the linear model preditions as a line superimposed on a scatter plot of the testing data
plt.scatter(X_test,y_test)
plt.plot(X_test,y_pred,'r')
# Evaluation Metrics
MAE_super_28 = mean_absolute_error(y_test, y_pred)
MSE_super_28 = mean_squared_error(y_test, y_pred)
RMSE_super_28 = np.sqrt(mean_squared_error(y_test, y_pred))
super_stats_28 = [MAE_super_28,MSE_super_28,RMSE_super_28] # storing for model comparison at the end of this notebook
# Print the metrics
print(f"EVALUATION METRICS, LINEAR MODEL FOR SUPERPLASTICIZER RATIO VS. COMPRESSIVE STRENGTH AT 28 DAYS")
print('-----------------------------')
print(f"Mean Absolute Error (MAE):\t\t{MAE_super_28}\nMean Squared Error:\t\t\t{MSE_super_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_super_28}")
print('-----------------------------\n\n')
```
## Model Comparisons Analysis
Neither superplasticizer linear model appeared to represent the data well from a visual perspective. The cement, cementitious ratio, and fly ash ratio linear models, however, did. We can display all of the evaluation metrics below and compare them to the artificial neural network's (ANN) performance.
```
ANN_metrics = [5.083552,6.466492**2,6.466492]
metrics = [cement_stats, cementitious_stats, fly_stats, super_stats, ANN_metrics]
metrics_28 = [cement_28_stats, cementitious_28_stats, fly_28_stats, super_stats_28, ANN_metrics]
metrics_df = pd.DataFrame(data=metrics, index=['Cement (Ignoring Cure Time)','Cementitious_Ratio (Ignoring Cure Time)','Fly_Ash_Ratio (Ignoring Cure Time)','Superplasticizer_Ratio (Ignoring Cure Time)','ANN (Function of Time)'], columns=['MAE','MSE','RMSE'])
metrics_28_df = pd.DataFrame(data=metrics_28, index=['Cement (Cure Time = 28 Days)','Cementitious_Ratio (Cure Time = 28 Days)','Fly_Ash_Ratio (Cure Time = 28 Days)','Superplasticizer_Ratio (Cure Time = 28 Days)','ANN (Function of Time)'], columns=['MAE','MSE','RMSE'])
metrics_df
metrics_28_df
```
## Conclusions & Recommendations
By comparing the evaluation metrics for all models, we conclude that the ANN model performed significantly better than all of the linear models. It outperformed the best linear model's RMSE (for Fly_Ash_Ratio at 28 Days) by over 30%! An important note is that the linear models were not scaled, and the ANN model was. We kept the linear models biased in order to maintain coefficient interpretabililty, whereas that was not relevant to the ANN model.
What is surprising is that the ANN model still outperformed the linear models, even when controlling for cure time at 28 days. Perhaps the most startling insight is that the fly ash ratio was even more accurate at predicting concrete compressive strength than the cement quantity, to the point that it had the lowest errors of all of the linear models. We therefore recommend that engineers give very conservative fly ash ratio specifications when allowing substitutions for Portland cement.
| github_jupyter |
```
# default_exp vr_parser
#hide_input
import pivotpy as pp
pp.nav_links(1)
```
# Xml Parser
> This parser contains functions to extract data from vasprun.xml. All functions in xml parser can work without arguments if working directory contains `vasprun.xml`.
- Almost every object in this module returns a `Dict2Data` object with attributes accessible via dot notation. This object can by transformed to a dictionary by `to_dict()` method on the object.
```
#export
import re
import os
import json
import pickle
from itertools import islice, chain, product
from collections import namedtuple
import numpy as np
from importlib.machinery import SourceFileLoader
import textwrap
import xml.etree.ElementTree as ET
# Inside packages import to work both with package and jupyter notebook.
try:
from pivotpy import g_utils as gu
from pivotpy.sio import read_ticks
except:
import pivotpy.g_utils as gu
import pivotpy.sio.read_ticks as read_ticks
#hide_input
# To run notebook smoothly, not for module export
from nbdev.showdoc import show_doc
from pivotpy.vr_parser import dump_dict,load_export
#export
def dict2tuple(name,d):
"""Converts a dictionary (nested as well) to namedtuple, accessible via index and dot notation as well as by unpacking.
- **Parameters**
- name: Name of the tuple.
- d : Dictionary, nested works as well.
"""
return namedtuple(name,d.keys())(
*(dict2tuple(k.upper(),v) if isinstance(v,dict) else v for k,v in d.items())
)
#export
class Dict2Data:
"""
- Returns a Data object with dictionary keys as attributes of Data accessible by dot notation or by key. Once an attribute is created, it can not be changed from outside.
- **Parmeters**
- dict : Python dictionary (nested as well) containing any python data types.
- **Methods**
- to_dict : Converts a Data object to dictionary if it could be made a dictionary, otherwise throws relevant error.
- to_json : Converts to json str or save to file if `outfil` given. Accepts `indent` as parameter.
- to_pickle: Converts to bytes str or save to file if `outfile` given.
- to_tuple : Converts to a named tuple.
- **Example**
> x = Dict2Data({'A':1,'B':{'C':2}})
> x
> Data(
> A = 1
> B = Data(
> C = 2
> )
> )
> x.B.to_dict()
> {'C': 2}
"""
def __init__(self,d):
if isinstance(d,Dict2Data):
d = d.to_dict() # if nested Dict2Dataects, must expand here.
for a,b in d.items():
if isinstance(b,Dict2Data):
b = b.to_dict() # expands self instance !must here.
if isinstance(b,(list,tuple)):
setattr(self,a,[Dict2Data(x) if isinstance(x,dict) else x for x in b])
else:
setattr(self,a,Dict2Data(b) if isinstance(b,dict) else b)
def to_dict(self):
"""Converts a `Dict2Data` object (root or nested level) to a dictionary.
"""
result = {}
for k,v in self.__dict__.items():
if isinstance(v,Dict2Data):
result.update({k:Dict2Data.to_dict(v)})
else:
result.update({k:v})
return result
def to_json(self,outfile=None,indent=1):
"""Dumps a `Dict2Data` object (root or nested level) to json.
- **Parameters**
- outfile : Default is None and returns string. If given, writes to file.
- indent : Json indent. Default is 1.
"""
return dump_dict(self,dump_to='json',outfile=outfile,indent=indent)
def to_pickle(self,outfile=None):
"""Dumps a `Dict2Data` object (root or nested level) to pickle.
- **Parameters**
- outfile : Default is None and returns string. If given, writes to file.
"""
return dump_dict(self,dump_to='pickle',outfile=outfile)
def to_tuple(self):
"""Creates a namedtuple."""
return dict2tuple('Data',self.to_dict())
def __repr__(self):
items= []
for k,v in self.__dict__.items():
if type(v) not in (str,float,int,range) and not isinstance(v,Dict2Data):
if isinstance(v,np.ndarray):
v = "<{}:shape={}>".format(v.__class__.__name__,np.shape(v))
elif type(v) in (list,tuple):
v = ("<{}:len={}>".format(v.__class__.__name__,len(v)) if len(v) > 10 else v)
else:
v = v.__class__
if isinstance(v,Dict2Data):
v = repr(v).replace("\n","\n ")
items.append(f" {k} = {v}")
return "Data(\n{}\n)".format('\n'.join(items))
def __getstate__(self):
pass #This is for pickling
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError(f"Outside assignment is restricted for already present attribute.")
else:
self.__dict__[name] = value
# Dictionary-wise access
def keys(self):
return self.__dict__.keys()
def __getitem__(self,key):
return self.__dict__[key]
def items(self):
return self.__dict__.items()
show_doc(Dict2Data.to_dict)
show_doc(Dict2Data.to_json)
show_doc(Dict2Data.to_pickle)
show_doc(Dict2Data.to_tuple)
x = Dict2Data({'A':1,'B':2})
print('Dict: ',x.to_dict())
print('JSON: ',x.to_json())
print('Pickle: ',x.to_pickle())
print('Tuple: ',x.to_tuple())
x['A']
```
## Parser Functions
```
#export
def read_asxml(path=None):
"""
- Reads a big vasprun.xml file into memory once and then apply commands. If current folder contains `vasprun.xml` file, it automatically picks it.
- **Parameters**
- path : Path/To/vasprun.xml
- **Returns**
- xml_data : Xml object to use in other functions
"""
if(path==None):
path='./vasprun.xml'
if not os.path.isfile(path):
print("File: '{}'' does not exist!".format(path))
return # This is important to stop further errors.
elif 'vasprun.xml' not in path:
print("File name should be '*vasprun.xml'.")
return # This is important to stop further errors.
else:
fsize = gu.get_file_size(path)
value = float(fsize.split()[0])
print_str = """
Memory Consumption Warning!
---------------------------
File: {} is large ({}). It may consume a lot of memory (generally 3 times the file size).
An alternative way is to parse vasprun.xml is by using `Vasp2Visual` module in Powershell by command `pivotpy.load_export('path/to/vasprun.xml'), which runs underlying powershell functions to load data whith efficient memory managment. It works on Windows/Linux/MacOS if you have powershell core and Vasp2Visual installed on it.
""".format(path,fsize)
if 'MB' in fsize and value > 200:
print(gu.color.y(textwrap.dedent(print_str)))
elif 'GB' in fsize and value > 1:
print(gu.color.y(textwrap.dedent(print_str)))
tree = ET.parse(path)
xml_data = tree.getroot()
return xml_data
#export
def xml2dict(xmlnode_or_filepath):
"""Convert xml node or xml file content to dictionary. All output text is in string format, so further processing is required to convert into data types/split etc.
- The only paramenter `xmlnode_or_filepath` is either a path to an xml file or an `xml.etree.ElementTree.Element` object.
- Each node has `tag,text,attr,nodes` attributes. Every text element can be accessed via
`xml2dict()['nodes'][index]['nodes'][index]...` tree which makes it simple.
"""
if isinstance(xmlnode_or_filepath,str):
node = read_asxml(xmlnode_or_filepath)
else:
node = xmlnode_or_filepath
text = node.text.strip() if node.text else ''
nodes = [xml2dict(child) for child in list(node)]
return {'tag': node.tag,'text': text, 'attr':node.attrib, 'nodes': nodes}
#export
def exclude_kpts(xml_data=None):
"""
- Returns number of kpoints to exclude used from IBZKPT.
- **Parameters**
- xml_data : From `read_asxml` function
- **Returns**
- int : Number of kpoints to exclude.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
for kpts in xml_data.iter('varray'):
if(kpts.attrib=={'name': 'weights'}):
weights=[float(arr.text.strip()) for arr in kpts.iter('v')]
exclude=[]
[exclude.append(item) for item in weights if item!=weights[-1]];
skipk=len(exclude) #that much to skip
return skipk
#export
def get_ispin(xml_data=None):
"""
- Returns value of ISPIN.
- **Parameters**
- xml_data : From `read_asxml` function
- **Returns**
- int : Value of ISPIN.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
for item in xml_data.iter('i'):
if(item.attrib=={'type': 'int', 'name': 'ISPIN'}):
return int(item.text)
#export
def get_summary(xml_data=None):
"""
- Returns overview of system parameters.
- **Parameters**
- xml_data : From `read_asxml` function
- **Returns**
- Data : pivotpy.Dict2Data with attibutes accessible via dot notation.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
for i_car in xml_data.iter('incar'):
incar={car.attrib['name']:car.text.strip() for car in i_car}
n_ions=[int(atom.text) for atom in xml_data.iter('atoms')][0]
type_ions=[int(atom_types.text) for atom_types in xml_data.iter('types')][0]
elem=[info[0].text.strip() for info in xml_data.iter('rc')]
elem_name=[]; #collect IONS names
[elem_name.append(item) for item in elem[:-type_ions] if item not in elem_name]
elem_index=[0]; #start index
[elem_index.append((int(entry)+elem_index[-1])) for entry in elem[-type_ions:]];
ISPIN=get_ispin(xml_data=xml_data)
NELECT = int([i.text.strip().split('.')[0] for i in xml_data.iter('i') if i.attrib['name']=='NELECT'][0])
# Fields
try:
for pro in xml_data.iter('partial'):
dos_fields=[field.text.strip() for field in pro.iter('field')]
dos_fields = [field for field in dos_fields if 'energy' not in field]
except:
dos_fields = []
for i in xml_data.iter('i'): #efermi for condition required.
if(i.attrib=={'name': 'efermi'}):
efermi=float(i.text)
#Writing information to a dictionary
info_dic={'SYSTEM':incar['SYSTEM'],'NION':n_ions,'NELECT':NELECT,'TypeION':type_ions,
'ElemName':elem_name,'ElemIndex':elem_index,'E_Fermi': efermi,'ISPIN':ISPIN,
'fields':dos_fields,'incar':incar}
return Dict2Data(info_dic)
import pivotpy.vr_parser as vp
xml_data=vp.read_asxml(path= '../vasprun.xml')
get_summary(xml_data=xml_data).to_tuple()
#export
def join_ksegments(kpath,kseg_inds=[]):
"""Joins a broken kpath's next segment to previous. `kseg_inds` should be list of first index of next segment"""
path_list = np.array(kpath)
if kseg_inds:
for ind in kseg_inds:
path_list[ind:] -= path_list[ind] - path_list[ind-1]
return list(path_list)
def get_kpts(xml_data=None,skipk=0,kseg_inds=[]):
r"""Returns kpoints and calculated kpath.
Parameters:
xml_data
From `read_asxml` function.
skipk : int
Number of initil kpoints to skip.
kseg_inds : list
List of indices of kpoints where path is broken.
Returns:
Data : pivotpy.Dict2Data
with attibutes `kpath` and `kpoints`.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
for kpts in xml_data.iter('varray'):
if(kpts.attrib=={'name': 'kpointlist'}):
kpoints=[[float(item) for item in arr.text.split()] for arr in kpts.iter('v')]
kpoints=np.array(kpoints[skipk:])
#KPath solved.
kpath=[0];pts=kpoints
[kpath.append(np.round(np.sqrt(np.sum((pt1-pt2)**2))+kpath[-1],6)) for pt1,pt2 in zip(pts[:-1],pts[1:])]
# If broken path, then join points.
kpath = join_ksegments(kpath,kseg_inds)
return Dict2Data({'NKPTS':len(kpoints),'kpoints':kpoints,'kpath':kpath})
get_kpts(xml_data=xml_data,skipk=10)
#export
def get_tdos(xml_data=None,spin_set=1,elim=[]):
"""
- Returns total dos for a spin_set (default 1) and energy limit. If spin-polarized calculations, gives SpinUp and SpinDown keys as well.
- **Parameters**
- xml_data : From `read_asxml` function
- spin_set : int, default is 1.and
- elim : List [min,max] of energy, default empty.
- **Returns**
- Data : pivotpy.Dict2Data with attibutes E_Fermi, ISPIN,tdos.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
tdos=[]; #assign for safely exit if wrong spin set entered.
ISPIN = get_ispin(xml_data=xml_data)
for neighbor in xml_data.iter('dos'):
for item in neighbor[1].iter('set'):
if(ISPIN==1 and spin_set==1):
if(item.attrib=={'comment': 'spin 1'}):
tdos=np.array([[float(entry) for entry in arr.text.split()] for arr in item])
if(ISPIN==2 and spin_set==1):
if(item.attrib=={'comment': 'spin 1'}):
tdos_1=np.array([[float(entry) for entry in arr.text.split()] for arr in item])
if(item.attrib=={'comment': 'spin 2'}):
tdos_2=np.array([[float(entry) for entry in arr.text.split()] for arr in item])
tdos = {'SpinUp':tdos_1,'SpinDown':tdos_2}
if(spin_set!=1): #can get any
if(item.attrib=={'comment': 'spin {}'.format(spin_set)}):
tdos=np.array([[float(entry) for entry in arr.text.split()] for arr in item])
for i in xml_data.iter('i'): #efermi for condition required.
if(i.attrib=={'name': 'efermi'}):
efermi=float(i.text)
dos_dic= {'E_Fermi':efermi,'ISPIN':ISPIN,'tdos':tdos}
#Filtering in energy range.
if elim: #check if elim not empty
if(ISPIN==1 and spin_set==1):
up_ind=np.max(np.where(tdos[:,0]-efermi<=np.max(elim)))+1
lo_ind=np.min(np.where(tdos[:,0]-efermi>=np.min(elim)))
tdos=tdos[lo_ind:up_ind,:]
if(ISPIN==2 and spin_set==1):
up_ind=np.max(np.where(tdos['SpinUp'][:,0]-efermi<=np.max(elim)))+1
lo_ind=np.min(np.where(tdos['SpinUp'][:,0]-efermi>=np.min(elim)))
tdos = {'SpinUp':tdos_1[lo_ind:up_ind,:],'SpinDown':tdos_2[lo_ind:up_ind,:]}
if(spin_set!=1):
up_ind=np.max(np.where(tdos[:,0]-efermi<=np.max(elim)))+1
lo_ind=np.min(np.where(tdos[:,0]-efermi>=np.min(elim)))
tdos=tdos[lo_ind:up_ind,:]
dos_dic= {'E_Fermi':efermi,'ISPIN':ISPIN,'grid_range':range(lo_ind,up_ind),'tdos':tdos}
return Dict2Data(dos_dic)
get_tdos(xml_data=xml_data,spin_set=1,elim=[])
#export
def get_evals(xml_data=None,skipk=None,elim=[]):
"""
- Returns eigenvalues as numpy array. If spin-polarized calculations, gives SpinUp and SpinDown keys as well.
- **Parameters**
- xml_data : From `read_asxml` function
- skipk : Number of initil kpoints to skip.
- elim : List [min,max] of energy, default empty.
- **Returns**
- Data : pivotpy.Dict2Data with attibutes evals and related parameters.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
evals=[]; #assign for safely exit if wrong spin set entered.
ISPIN=get_ispin(xml_data=xml_data)
if skipk!=None:
skipk=skipk
else:
skipk=exclude_kpts(xml_data=xml_data) #that much to skip by default
for neighbor in xml_data.iter('eigenvalues'):
for item in neighbor[0].iter('set'):
if(ISPIN==1):
if(item.attrib=={'comment': 'spin 1'}):
evals=np.array([[float(th.text.split()[0]) for th in thing] for thing in item])[skipk:]
NBANDS=len(evals[0])
if(ISPIN==2):
if(item.attrib=={'comment': 'spin 1'}):
eval_1=np.array([[float(th.text.split()[0]) for th in thing] for thing in item])[skipk:]
if(item.attrib=={'comment': 'spin 2'}):
eval_2=np.array([[float(th.text.split()[0]) for th in thing] for thing in item])[skipk:]
evals={'SpinUp':eval_1,'SpinDown':eval_2}
NBANDS=len(eval_1[0])
for i in xml_data.iter('i'): #efermi for condition required.
if(i.attrib=={'name': 'efermi'}):
efermi=float(i.text)
evals_dic={'E_Fermi':efermi,'ISPIN':ISPIN,'NBANDS':NBANDS,'evals':evals,'indices': range(NBANDS)}
if elim: #check if elim not empty
if(ISPIN==1):
up_ind=np.max(np.where(evals[:,:]-efermi<=np.max(elim))[1])+1
lo_ind=np.min(np.where(evals[:,:]-efermi>=np.min(elim))[1])
evals=evals[:,lo_ind:up_ind]
if(ISPIN==2):
up_ind=np.max(np.where(eval_1[:,:]-efermi<=np.max(elim))[1])+1
lo_ind=np.min(np.where(eval_1[:,:]-efermi>=np.min(elim))[1])
evals={'SpinUp':eval_1[:,lo_ind:up_ind],'SpinDown':eval_2[:,lo_ind:up_ind]}
NBANDS = int(up_ind - lo_ind) #update Bands
evals_dic['NBANDS'] = NBANDS
evals_dic['indices'] = range(lo_ind,up_ind)
evals_dic['evals'] = evals
return Dict2Data(evals_dic)
get_evals(xml_data=xml_data,skipk=10,elim=[-5,5])
#export
def get_bands_pro_set(xml_data=None,
spin_set=1,
skipk=0,
bands_range=None,
set_path=None):
"""
- Returns bands projection of a spin_set(default 1). If spin-polarized calculations, gives SpinUp and SpinDown keys as well.
- **Parameters**
- xml_data : From `read_asxml` function
- skipk : Number of initil kpoints to skip (Default 0).
- spin_set : Spin set to get, default is 1.
- bands_range : If elim used in `get_evals`,that will return bands_range to use here. Note that range(0,2) will give 2 bands 0,1 but tuple (0,2) will give 3 bands 0,1,2.
- set_path : path/to/_set[1,2,3,4].txt, works if `split_vasprun` is used before.
- **Returns**
- Data : pivotpy.Dict2Data with attibutes of bands projections and related parameters.
"""
if(bands_range!=None):
check_list = list(bands_range)
if check_list==[]:
return print(gu.color.r("No bands prjections found in given energy range."))
# Try to read _set.txt first. instance check is important.
if isinstance(set_path,str) and os.path.isfile(set_path):
_header = islice2array(set_path,nlines=1,raw=True,exclude=None)
_shape = [int(v) for v in _header.split('=')[1].strip().split(',')]
NKPTS, NBANDS, NIONS, NORBS = _shape
if NORBS == 3:
fields = ['s','p','d']
elif NORBS == 9:
fields = ['s','py','pz','px','dxy','dyz','dz2','dxz','x2-y2']
else:
fields = [str(i) for i in range(NORBS)] #s,p,d in indices.
COUNT = NIONS*NBANDS*(NKPTS-skipk)*NORBS
start = NBANDS*NIONS*skipk
nlines = None # Read till end.
if bands_range:
_b_r = list(bands_range)
# First line is comment but it is taken out by exclude in islice2array.
start = [[NIONS*NBANDS*k + NIONS*b for b in _b_r] for k in range(skipk,NKPTS)]
start = [s for ss in start for s in ss] #flatten
nlines = NIONS # 1 band has nions
NBANDS = _b_r[-1]-_b_r[0]+1 # upadte after start
NKPTS = NKPTS-skipk # Update after start, and bands_range.
COUNT = NIONS*NBANDS*NKPTS*NORBS
data = islice2array(set_path,start=start,nlines=nlines,count=COUNT)
data = data.reshape((NKPTS,NBANDS,NIONS,NORBS)).transpose([2,0,1,3])
return Dict2Data({'labels':fields,'pros':data})
# if above not worked, read from main vasprun.xml file.
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
#Collect Projection fields
fields=[];
for pro in xml_data.iter('projected'):
for arr in pro.iter('field'):
if('eig' not in arr.text and 'occ' not in arr.text):
fields.append(arr.text.strip())
NORBS = len(fields)
#Get NIONS for reshaping data
NIONS=[int(atom.text) for atom in xml_data.iter('atoms')][0]
for spin in xml_data.iter('set'):
if spin.attrib=={'comment': 'spin{}'.format(spin_set)}:
k_sets = [kp for kp in spin.iter('set') if 'kpoint' in kp.attrib['comment']]
k_sets = k_sets[skipk:]
NKPTS = len(k_sets)
band_sets = []
for k_s in k_sets:
b_set = [b for b in k_s.iter('set') if 'band' in b.attrib['comment']]
if bands_range == None:
band_sets.extend(b_set)
else:
b_r = list(bands_range)
band_sets.extend(b_set[b_r[0]:b_r[-1]+1])
NBANDS = int(len(band_sets)/len(k_sets))
try:
# Error prone solution but 5 times fater than list comprehension.
bands_pro = (float(t) for band in band_sets for l in band.iter('r') for t in l.text.split())
COUNT = NKPTS*NBANDS*NORBS*NIONS # Must be counted for performance.
data = np.fromiter(bands_pro,dtype=float,count=COUNT)
except:
# Alternate slow solution
print("Error using `np.fromiter`.\nFalling back to (slow) list comprehension...",end=' ')
bands_pro = (l.text for band in band_sets for l in band.iter('r'))
bands_pro = [[float(t) for t in text.split()] for text in bands_pro]
data = np.array(bands_pro)
del bands_pro # Release memory
print("Done.")
data = data.reshape((NKPTS,NBANDS,NIONS,NORBS)).transpose((2,0,1,3))
return Dict2Data({'labels':fields,'pros':data})
get_bands_pro_set(xml_data,skipk=0,spin_set=1,bands_range=range(0, 1))
#export
def get_dos_pro_set(xml_data=None,spin_set=1,dos_range=None):
"""
- Returns dos projection of a spin_set(default 1) as numpy array. If spin-polarized calculations, gives SpinUp and SpinDown keys as well.
- **Parameters**
- xml_data : From `read_asxml` function
- spin_set : Spin set to get, default 1.
- dos_range : If elim used in `get_tdos`,that will return dos_range to use here..
- **Returns**
- Data : pivotpy.Dict2Data with attibutes of dos projections and related parameters.
"""
if(dos_range!=None):
check_list=list(dos_range)
if(check_list==[]):
return print(gu.color.r("No DOS prjections found in given energy range."))
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
n_ions=get_summary(xml_data=xml_data).NION
for pro in xml_data.iter('partial'):
dos_fields=[field.text.strip()for field in pro.iter('field')]
#Collecting projections.
dos_pro=[]; set_pro=[]; #set_pro=[] in case spin set does not exists
for ion in range(n_ions):
for node in pro.iter('set'):
if(node.attrib=={'comment': 'ion {}'.format(ion+1)}):
for spin in node.iter('set'):
if(spin.attrib=={'comment': 'spin {}'.format(spin_set)}):
set_pro=[[float(entry) for entry in r.text.split()] for r in spin.iter('r')]
dos_pro.append(set_pro)
if dos_range==None: #full grid computed.
dos_pro=np.array(dos_pro) #shape(NION,e_grid,pro_fields)
else:
dos_range=list(dos_range)
min_ind=dos_range[0]
max_ind=dos_range[-1]+1
dos_pro=np.array(dos_pro)[:,min_ind:max_ind,:]
final_data=np.array(dos_pro) #shape(NION,e_grid,pro_fields)
return Dict2Data({'labels':dos_fields,'pros':final_data})
#export
def get_structure(xml_data=None):
"""
- Returns structure's volume,basis,positions and rec-basis.
- **Parameters**
- xml_data : From `read_asxml` function.
- **Returns**
- Data : pivotpy.Dict2Data with attibutes volume,basis,positions rec_basis and labels.
"""
if(xml_data==None):
xml_data=read_asxml()
if not xml_data:
return
SYSTEM = [i.text for i in xml_data.iter('i') if i.attrib['name'] == 'SYSTEM'][0]
for final in xml_data.iter('structure'):
if(final.attrib=={'name': 'finalpos'}):
for i in final.iter('i'):
volume=float(i.text)
for arr in final.iter('varray'):
if(arr.attrib=={'name': 'basis'}):
basis=[[float(a) for a in v.text.split()] for v in arr.iter('v')]
if(arr.attrib=={'name': 'rec_basis'}):
rec_basis=[[float(a) for a in v.text.split()] for v in arr.iter('v')]
if(arr.attrib=={'name': 'positions'}):
positions=[[float(a) for a in v.text.split()] for v in arr.iter('v')]
# element labels
types = [int(_type.text) for _type in xml_data.iter('types')][0]
elems = [info[0].text.strip() for info in xml_data.iter('rc')]
_inds = np.array([int(a) for a in elems[-types:]])
_nums = [k + 1 for i in _inds for k in range(i)]
labels = [f"{e} {i}" for i, e in zip(_nums,elems)]
INDS = np.cumsum([0,*_inds]).astype(int)
Names = list(np.unique(elems[:-types]))
unique_d = {e:range(INDS[i],INDS[i+1]) for i,e in enumerate(Names)}
st_dic={'SYSTEM':SYSTEM,'volume': volume,'basis': np.array(basis),'rec_basis': np.array(rec_basis),'positions': np.array(positions),'labels':labels,'unique': unique_d}
return Dict2Data(st_dic)
get_structure(xml_data=xml_data)
```
## Quick Export for Bandstructure
A fully comprehensive command that uses all functions and returns data for spin set 1 (set 1 and 2 if spin-polarized calculations) could be constructed for immediate usage. It is `export_vasrun()`.
```
#export
def export_vasprun(path=None,
skipk=None,
elim=[],
kseg_inds=[],
shift_kpath=0,
try_pwsh = True
):
"""
- Returns a full dictionary of all objects from `vasprun.xml` file. It first try to load the data exported by powershell's `Export-VR(Vasprun)`, which is very fast for large files. It is recommended to export large files in powershell first.
- **Parameters**
- path : Path to `vasprun.xml` file. Default is `'./vasprun.xml'`.
- skipk : Default is None. Automatically detects kpoints to skip.
- elim : List [min,max] of energy interval. Default is [], covers all bands.
- kseg_inds : List of indices of kpoints where path is broken.
- shift_kpath: Default 0. Can be used to merge multiple calculations on single axes side by side.
- try_pwsh : Default is True and tries to load data exported by `Vasp2Visual` in Powershell.
- **Returns**
- Data : Data accessible via dot notation containing nested Data objects:
- sys_info : System Information
- dim_info : Contains information about dimensions of returned objects.
- kpoints : numpy array of kpoints with excluded IBZKPT points
- kpath : 1D numpy array directly accessible for plot.
- bands : Data containing bands.
- tdos : Data containing total dos.
- pro_bands : Data containing bands projections.
- pro_dos : Data containing dos projections.
- poscar : Data containing basis,positions, rec_basis and volume.
"""
# Try to get files if exported data in PowerShell.
if try_pwsh:
req_files = ['Bands.txt','tDOS.txt','pDOS.txt','Projection.txt','SysInfo.py']
if path and os.path.isfile(path):
req_files = [os.path.join(
os.path.dirname(os.path.abspath(path)),f) for f in req_files]
logic = [os.path.isfile(f) for f in req_files]
if not False in logic:
print('Loading from PowerShell Exported Data...')
return load_export(path=(path if path else './vasprun.xml'))
# Proceed if not files from PWSH
if path==None:
path='./vasprun.xml'
try:
xml_data = read_asxml(path=path)
except:
return
base_dir = os.path.split(os.path.abspath(path))[0]
set_paths = [os.path.join(base_dir,"_set{}.txt".format(i)) for i in (1,2)]
#First exclude unnecessary kpoints. Includes only same weight points
if skipk!=None:
skipk=skipk
else:
skipk = exclude_kpts(xml_data=xml_data) #that much to skip by default
info_dic = get_summary(xml_data=xml_data) #Reads important information of system.
#KPOINTS
kpts = get_kpts(xml_data=xml_data,skipk=skipk,kseg_inds=kseg_inds)
#EIGENVALS
eigenvals = get_evals(xml_data=xml_data,skipk=skipk,elim=elim)
#TDOS
tot_dos = get_tdos(xml_data=xml_data,spin_set=1,elim=elim)
#Bands and DOS Projection
if elim:
bands_range = eigenvals.indices #indices in range form.
grid_range=tot_dos.grid_range
else:
bands_range=None #projection function will read itself.
grid_range=None
if(info_dic.ISPIN==1):
pro_bands = get_bands_pro_set(xml_data=xml_data,spin_set=1,skipk=skipk,bands_range=bands_range,set_path=set_paths[0])
pro_dos = get_dos_pro_set(xml_data=xml_data,spin_set=1,dos_range=grid_range)
if(info_dic.ISPIN==2):
pro_1 = get_bands_pro_set(xml_data=xml_data,spin_set=1,skipk=skipk,bands_range=bands_range,set_path=set_paths[0])
pro_2 = get_bands_pro_set(xml_data=xml_data,spin_set=2,skipk=skipk,bands_range=bands_range,set_path=set_paths[1])
pros={'SpinUp': pro_1.pros,'SpinDown': pro_2.pros}#accessing spins in dictionary after .pro.
pro_bands={'labels':pro_1.labels,'pros': pros}
pdos_1 = get_dos_pro_set(xml_data=xml_data,spin_set=1,dos_range=grid_range)
pdos_2 = get_dos_pro_set(xml_data=xml_data,spin_set=1,dos_range=grid_range)
pdos={'SpinUp': pdos_1.pros,'SpinDown': pdos_2.pros}#accessing spins in dictionary after .pro.
pro_dos={'labels':pdos_1.labels,'pros': pdos}
#Structure
poscar = get_structure(xml_data=xml_data)
poscar = {'SYSTEM':info_dic.SYSTEM,**poscar.to_dict()}
#Dimensions dictionary.
dim_dic={'kpoints':'(NKPTS,3)','kpath':'(NKPTS,1)','bands':'⇅(NKPTS,NBANDS)','dos':'⇅(grid_size,3)','pro_dos':'⇅(NION,grid_size,en+pro_fields)','pro_bands':'⇅(NION,NKPTS,NBANDS,pro_fields)'}
#Writing everything to be accessible via dot notation
kpath=[k+shift_kpath for k in kpts.kpath] # shift kpath for side by side calculations.
full_dic={'sys_info':info_dic,'dim_info':dim_dic,'kpoints':kpts.kpoints,'kpath':kpath,'bands':eigenvals,
'tdos':tot_dos,'pro_bands':pro_bands,'pro_dos':pro_dos,'poscar': poscar}
return Dict2Data(full_dic)
export_vasprun(path='E:/Research/graphene_example/ISPIN_1/bands/vasprun.xml',elim=[-1,0],try_pwsh=True)
#export
def _validate_evr(path_evr=None,**kwargs):
"Validates data given for plotting functions. Returns a tuple of (Boolean,data)."
if type(path_evr) == Dict2Data:
vr = path_evr
elif path_evr is None:
path_evr = './vasprun.xml'
if isinstance(path_evr,str) and os.path.isfile(path_evr):
# kwargs -> skipk=skipk,elim=elim,kseg_inds=kseg_inds
vr = export_vasprun(path=path_evr,**kwargs)
# Apply a robust final check.
try:
vr.bands;vr.kpath
return (True,vr)
except:
return (False,path_evr)
```
## Joining Multiple Calculations
- Sometimes one may need to compare two or more bandstructures in same figure, for that reason, it is easy to export two calculations and plot on same axis.
- There is another situation, if you have a large supercell and split calculations into multiple ones, joining that calculations works same way, you will add the last value of first kpath into all values of next kpath and next last to next and so on, by just using `shift_kpath` in `export_vasprun` and plot each export on same axis, this will align bandstructures side by side on same axis.
## Load Exported Vasprun from PowerShell
On Windows, it will work automatically. On Linux/Mac it may require path to powershell executable.
```
#export
def load_export(path= './vasprun.xml',
kseg_inds =[],
shift_kpath = 0,
path_to_ps='pwsh',
skipk = None,
max_filled = 10,
max_empty = 10,
keep_files = True
):
"""
- Returns a full dictionary of all objects from `vasprun.xml` file exported using powershell.
- **Parameters**
- path : Path to `vasprun.xml` file. Default is `'./vasprun.xml'`.
- skipk : Default is None. Automatically detects kpoints to skip.
- path_to_ps : Path to `powershell.exe`. Automatically picks on Windows and Linux if added to PATH.
- kseg_inds : List of indices of kpoints where path is broken.
- shift_kpath: Default 0. Can be used to merge multiple calculations side by side.
- keep_files : Could be use to clean exported text files. Default is True.
- max_filled : Number of filled bands below and including VBM. Default is 10.
- max_empty : Number of empty bands above VBM. Default is 10.
- **Returns**
- Data : Data accessible via dot notation containing nested Data objects:
- sys_info : System Information
- dim_info : Contains information about dimensions of returned objects.
- kpoints : numpy array of kpoints with excluded IBZKPT points
- kpath : 1D numpy array directly accessible for plot.
- bands : Data containing bands.
- tdos : Data containing total dos.
- pro_bands : Data containing bands projections.
- pro_dos : Data containing dos projections.
- poscar : Data containing basis,positions, rec_basis and volume.
"""
that_loc, file_name = os.path.split(os.path.abspath(path)) # abspath is important to split.
with gu.set_dir(that_loc):
# Goes there and work
i = 0
required_files = ['Bands.txt','tDOS.txt','pDOS.txt','Projection.txt','SysInfo.py']
for _file in required_files:
if os.path.isfile(_file):
i = i + 1
if i < 5:
if skipk != None:
gu.ps2std(path_to_ps=path_to_ps,ps_command='Import-Module Vasp2Visual; Export-VR -InputFile {} -MaxFilled {} -MaxEmpty {} -SkipK {}'.format(path,max_filled,max_empty,skipk))
else:
gu.ps2std(path_to_ps=path_to_ps,ps_command='Import-Module Vasp2Visual; Export-VR -InputFile {} -MaxFilled {} -MaxEmpty {}'.format(path,max_filled,max_empty))
# Enable loading SysInfo.py file as source.
_vars = SourceFileLoader("SysInfo", "./SysInfo.py").load_module()
SYSTEM = _vars.SYSTEM
NKPTS = _vars.NKPTS
NBANDS = _vars.NBANDS
NFILLED = _vars.NFILLED
TypeION = _vars.TypeION
NION = _vars.NION
NELECT = _vars.NELECT
nField_Projection = _vars.nField_Projection
E_Fermi = _vars.E_Fermi
ISPIN = _vars.ISPIN
ElemIndex = _vars.ElemIndex
ElemName = _vars.ElemName
poscar = {'SYSTEM': SYSTEM,
'volume':_vars.volume,
'basis' : np.array(_vars.basis),
'rec_basis': np.array(_vars.rec_basis),
'positions': np.array(_vars.positions)
}
fields = _vars.fields
incar = _vars.INCAR
# Elements Labels
elem_labels = []
for i, name in enumerate(ElemName):
for ind in range(ElemIndex[i],ElemIndex[i+1]):
elem_labels.append(f"{name} {str(ind - ElemIndex[i] + 1)}")
poscar.update({'labels': elem_labels})
# Unique Elements Ranges
unique_d = {}
for i,e in enumerate(ElemName):
unique_d.update({e:range(ElemIndex[i],ElemIndex[i+1])})
poscar.update({'unique': unique_d})
# Load Data
bands= np.loadtxt('Bands.txt').reshape((-1,NBANDS+4)) #Must be read in 2D even if one row only.
start = int(open('Bands.txt').readline().split()[4][1:])
pro_bands= np.loadtxt('Projection.txt').reshape((-1,NBANDS*nField_Projection))
pro_dos = np.loadtxt('pDOS.txt')
dos= np.loadtxt('tDOS.txt')
# Keep or delete only if python generates files (i < 5 case.)
if(keep_files==False and i==5):
for file in required_files:
os.remove(file)
# Returns back
# Work now!
sys_info = {'SYSTEM': SYSTEM,'NION': NION,'NELECT':NELECT,'TypeION': TypeION,'ElemName': ElemName,
'E_Fermi': E_Fermi,'fields':fields, 'incar': incar,'ElemIndex': ElemIndex,'ISPIN': ISPIN}
dim_info = {'kpoints': '(NKPTS,3)','kpath': '(NKPTS,1)','bands': '⇅(NKPTS,NBANDS)','dos': '⇅(grid_size,3)',
'pro_dos': '⇅(NION,grid_size,en+pro_fields)','pro_bands': '⇅(NION,NKPTS,NBANDS,pro_fields)'}
bands_dic,tdos_dic,pdos_dic,pro_dic,kpath={},{},{},{},[]
if(ISPIN==1):
kpath = bands[:,3]
kpoints = bands[:,:3]
evals = bands[:,4:]
bands_dic = {'E_Fermi': E_Fermi, 'ISPIN': ISPIN, 'NBANDS': NBANDS, 'evals': evals, 'indices': range(start,start+NBANDS)}
tdos_dic = {'E_Fermi': E_Fermi, 'ISPIN': ISPIN,'tdos': dos}
pdos = pro_dos.reshape(NION,-1,nField_Projection+1)
pdos_dic = {'labels': fields,'pros': pdos}
pros = pro_bands.reshape(NION,NKPTS,NBANDS,-1)
pro_dic = {'labels': fields,'pros': pros}
if(ISPIN==2):
# Bands
kpath = bands[:NKPTS,3]
kpoints = bands[:NKPTS,:3]
SpinUp = bands[:NKPTS,4:]
SpinDown= bands[NKPTS:,4:]
evals = {'SpinUp':SpinUp,'SpinDown': SpinDown}
bands_dic = {'E_Fermi': E_Fermi, 'ISPIN': ISPIN, 'NBANDS': NBANDS, 'evals': evals,'indices': range(start,start+NBANDS)}
# tDOS
dlen = int(np.shape(dos)[0]/2)
SpinUp = dos[:dlen,:]
SpinDown= dos[dlen:,:]
tdos = {'SpinUp':SpinUp,'SpinDown': SpinDown}
tdos_dic= {'E_Fermi': E_Fermi, 'ISPIN': ISPIN,'tdos': tdos}
# pDOS
plen = int(np.shape(pro_dos)[0]/2)
SpinUp = pro_dos[:plen,:].reshape(NION,-1,nField_Projection+1)
SpinDown= pro_dos[plen:,:].reshape(NION,-1,nField_Projection+1)
pdos = {'SpinUp':SpinUp,'SpinDown': SpinDown}
pdos_dic= {'labels': fields,'pros': pdos}
# projections
pblen = int(np.shape(pro_bands)[0]/2)
SpinUp = pro_bands[:pblen,:].reshape(NION,NKPTS,NBANDS,-1)
SpinDown= pro_bands[pblen:,:].reshape(NION,NKPTS,NBANDS,-1)
pros = {'SpinUp': SpinUp,'SpinDown': SpinDown}
pro_dic = {'labels': fields,'pros': pros}
# If broken path, then join points.
kpath = join_ksegments(kpath,kseg_inds)
kpath=[k+shift_kpath for k in kpath.copy()] # Shift kpath
full_dic = {'sys_info': sys_info,'dim_info': dim_info,'kpoints': kpoints,'kpath':kpath, 'bands':bands_dic,'tdos':tdos_dic,'pro_bands': pro_dic ,'pro_dos': pdos_dic,
'poscar':poscar}
return Dict2Data(full_dic)
```
This back and forth data transport is required in [pivotpy-dash](https://github.com/massgh/pivotpy-dash) app where data is stored in browser in json format, but needs to by python objects for figures.
## Write Clean data to JSON or Pickle file
Use `dump_vasprun` to write output of `export_vasprun` or `load_export` to pickle/json file. Pickle is useful for quick load in python while json is useful to transfer data into any language.
```
#export
def dump_dict(dict_data = None, dump_to = 'pickle',outfile = None,indent=1):
"""
- Dump an `export_vasprun` or `load_export`'s `Data` object or any dictionary to json or pickle string/file. It convert `Dict2Data` to dictionary before serializing to json/pickle, so json/pickle.loads() of converted Data would be a simple dictionary, pass that to `Dict2Data` to again make accessible via dot notation.
- **Parameters**
- dict_data : Any dictionary/Dict2Data object containg numpy arrays, including `export_vasprun` or `load_export` output.
- dump_to : Defualt is `pickle` or `json`.
- outfile : Defualt is None and return string. File name does not require extension.
- indent : Defualt is 1. Only works for json.
"""
if dump_to not in ['pickle','json']:
return print("`dump_to` expects 'pickle' or 'json', got '{}'".format(dump_to))
try: dict_obj = dict_data.to_dict() # Change Data object to dictionary
except: dict_obj = dict_data
if dump_to == 'pickle':
if outfile == None:
return pickle.dumps(dict_obj)
outfile = outfile.split('.')[0] + '.pickle'
with open(outfile,'wb') as f:
pickle.dump(dict_obj,f)
if dump_to == 'json':
if outfile == None:
return json.dumps(dict_obj,cls = gu.EncodeFromNumpy,indent=indent)
outfile = outfile.split('.')[0] + '.json'
with open(outfile,'w') as f:
json.dump(dict_obj,f,cls = gu.EncodeFromNumpy,indent=indent)
return None
#export
def load_from_dump(file_or_str,keep_as_dict=False):
"""
- Loads a json/pickle dumped file or string by auto detecting it.
- **Parameters**
- file_or_str : Filename of pickl/json or their string.
- keep_as_dict: Defualt is False and return `Data` object. If True, returns dictionary.
"""
out = {}
if not isinstance(file_or_str,bytes):
try: #must try, else fails due to path length issue
if os.path.isfile(file_or_str):
if '.pickle' in file_or_str:
with open(file_or_str,'rb') as f:
out = pickle.load(f)
elif '.json' in file_or_str:
with open(file_or_str,'r') as f:
out = json.load(f,cls = gu.DecodeToNumpy)
else: out = json.loads(file_or_str,cls = gu.DecodeToNumpy)
# json.loads required in else and except both as long str > 260 causes issue in start of try block
except: out = json.loads(file_or_str,cls = gu.DecodeToNumpy)
elif isinstance(file_or_str,bytes):
out = pickle.loads(file_or_str)
if type(out) is dict and keep_as_dict == False:
return Dict2Data(out)
return out
import pivotpy as pp
evr = pp.export_vasprun('../vasprun.xml')
s = dump_dict(evr.poscar,dump_to='pickle')
#print(s)
load_from_dump(s)
```
## Parse Text Files with Flexibility
- The function `islice2array` is used to read text files which have patterns of text and numbers inline, such as EIGENVAL and PROCAR. With all the options of this function, reading and parsing of such files should take a few lines of code only. It can be used to read txt,csv tsv as well with efficent speed.
- It reads a file without fully loading into memory and you can still access slices of data in the file. That partial data fetching from file is very handy.
```
#export
def islice2array(path_or_islice,dtype=float,delimiter='\s+',
include=None,exclude='#',raw=False,fix_format = True,
start=0,nlines=None,count=-1,cols=None,new_shape=None
):
"""
- Reads a sliced array from txt,csv type files and return to array. Also manages if columns lengths are not equal and return 1D array. It is faster than loading whole file into memory. This single function could be used to parse EIGENVAL, PROCAR, DOCAR and similar files with just a combination of `exclude, include,start,stop,step` arguments.
- **Parameters**
- path_or_islice: Path/to/file or `itertools.islice(file_object)`. islice is interesting when you want to read different slices of an opened file and do not want to open it again and again. For reference on how to use it just execute `pivotpy.export_potential??` in a notebook cell or ipython terminal to see how islice is used extensively.
- dtype: float by default. Data type of output array, it is must have argument.
- start,nlines: The indices of lines to start reading from and number of lines after start respectively. Only work if `path_or_islice` is a file path. both could be None or int, while start could be a list to read slices from file provided that nlines is int. The spacing between adjacent indices in start should be equal to or greater than nlines as pointer in file do not go back on its own. These parameters are in output of `slice_data`
> Note: `start` should count comments if `exclude` is None. You can use `slice_data` function to get a dictionary of `start,nlines, count, cols, new_shape` and unpack in argument instead of thinking too much.
- count: `np.size(output_array) = nrows x ncols`, if it is known before execution, performance is increased. This parameter is in output of `slice_data`.
- delimiter: Default is `\s+`. Could be any kind of delimiter valid in numpy and in the file.
- cols: List of indices of columns to pick. Useful when reading a file like PROCAR which e.g. has text and numbers inline. This parameter is in output of `slice_data`.
- include: Default is None and includes everything. String of patterns separated by | to keep, could be a regular expression.
- exclude: Default is '#' to remove comments. String of patterns separated by | to drop,could be a regular expression.
- raw : Default is False, if True, returns list of raw strings. Useful to select `cols`.
- fix_format: Default is True, it sepearates numbers with poor formatting like 1.000-2.000 to 1.000 2.000 which is useful in PROCAR. Keep it False if want to read string literally.
- new_shape : Tuple of shape Default is None. Will try to reshape in this shape, if fails fallbacks to 2D or 1D. This parameter is in output of `slice_data`.
- **Examples**
> `islice2array('path/to/PROCAR',start=3,include='k-point',cols=[3,4,5])[:2]`
> array([[ 0.125, 0.125, 0.125],
> [ 0.375, 0.125, 0.125]])
> `islice2array('path/to/EIGENVAL',start=7,exclude='E',cols=[1,2])[:2]`
> array([[-11.476913, 1. ],
> [ 0.283532, 1. ]])
> Note: Slicing a dimension to 100% of its data is faster than let say 80% for inner dimensions, so if you have to slice more than 50% of an inner dimension, then just load full data and slice after it.
"""
if nlines is None and isinstance(start,(list,np.ndarray)):
print("`nlines = None` with `start = array/list` is useless combination.")
return np.array([]) # return empty array.
def _fixing(_islice,include=include, exclude=exclude,fix_format=fix_format,nlines=nlines,start=start):
if include:
_islice = (l for l in _islice if re.search(include,l))
if exclude:
_islice = (l for l in _islice if not re.search(exclude,l))
# Make slices here after comment excluding.
if isinstance(nlines,int) and isinstance(start,(list,np.ndarray)):
#As islice moves the pointer as it reads, start[1:]-nlines-1
# This confirms spacing between two indices in start >= nlines
start = [start[0],*[s2-s1-nlines for s1,s2 in zip(start,start[1:])]]
_islice = chain(*(islice(_islice,s,s+nlines) for s in start))
elif isinstance(nlines,int) and isinstance(start,int):
_islice = islice(_islice,start,start+nlines)
elif nlines is None and isinstance(start,int):
_islice = islice(_islice,start,None)
# Negative connected digits to avoid, especially in PROCAR
if fix_format:
_islice = (re.sub(r"(\d)-(\d)",r"\1 -\2",l) for l in _islice)
return _islice
def _gen(_islice,cols=cols):
for line in _islice:
line = line.strip().replace(delimiter,' ').split()
if line and cols is not None: # if is must here.
line = [line[i] for i in cols]
for chars in line:
yield dtype(chars)
#Process Now
if isinstance(path_or_islice,str) and os.path.isfile(path_or_islice):
with open(path_or_islice,'r') as f:
_islice = islice(f,0,None) # Read full, Will fix later.
_islice = _fixing(_islice)
if raw:
return ''.join(_islice)
# Must to consume islice when file is open
data = np.fromiter(_gen(_islice),dtype=dtype,count=count)
else:
_islice = _fixing(path_or_islice)
if raw:
return ''.join(_islice)
data = np.fromiter(_gen(_islice),dtype=dtype,count=count)
if new_shape:
try: data = data.reshape(new_shape)
except: pass
elif cols: #Otherwise single array.
try: data = data.reshape((-1,len(cols)))
except: pass
return data
#export
def slice_data(dim_inds,old_shape):
"""
- Returns a dictionary that can be unpacked in arguments of isclice2array function. This function works only for regular txt/csv/tsv data files which have rectangular data written.
- **Parameters**
- dim_inds : List of indices array or range to pick from each dimension. Inner dimensions are more towards right. Last itmes in dim_inds is considered to be columns. If you want to include all values in a dimension, you can put -1 in that dimension. Note that negative indexing does not work in file readig, -1 is s special case to fetch all items.
- old_shape: Shape of data set including the columns length in right most place.
- **Example**
- You have data as 3D arry where third dimension is along column.
> 0 0
> 0 2
> 1 0
> 1 2
- To pick [[0,2], [1,2]], you need to give
> slice_data(dim_inds = [[0,1],[1],-1], old_shape=(2,2,2))
> {'start': array([1, 3]), 'nlines': 1, 'count': 2}
- Unpack above dictionary in `islice2array` and you will get output array.
- Note that dimensions are packed from right to left, like 0,2 is repeating in 2nd column.
"""
# Columns are treated diffiernetly.
if dim_inds[-1] == -1:
cols = None
else:
cols = list(dim_inds[-1])
r_shape = old_shape[:-1]
dim_inds = dim_inds[:-1]
for i,ind in enumerate(dim_inds.copy()):
if ind == -1:
dim_inds[i] = range(r_shape[i])
nlines = 1
#start = [[NIONS*NBANDS*k + NIONS*b for b in _b_r] for k in range(skipk,NKPTS)] #kind of thing.
_prod_ = product(*dim_inds)
_mult_ = [np.product(r_shape[i+1:]) for i in range(len(r_shape))]
_out_ = np.array([np.dot(p,_mult_) for p in _prod_]).astype(int)
# check if innermost dimensions could be chunked.
step = 1
for i in range(-1,-len(dim_inds),-1):
_inds = np.array(dim_inds[i]) #innermost
if np.max(_inds[1:] - _inds[:-1]) == 1: # consecutive
step = len(_inds)
_out_ = _out_[::step] # Pick first indices
nlines = step*nlines
# Now check if all indices picked then make chunks in outer dimensions too.
if step != r_shape[i]: # Can't make chunk of outer dimension if inner is not 100% picked.
break # Stop more chunking
new_shape = [len(inds) for inds in dim_inds] #dim_inds are only in rows.
new_shape.append(old_shape[-1])
return {'start':_out_,'nlines':nlines,'count': nlines*len(_out_),'cols':cols,'new_shape':tuple(new_shape)}
slice_data([list(range(1,7)),-1,-1,range(2)],old_shape=[52,768,64,9])
```
## Process Largs `vasprun.xml` Files
You can split a large vasprun.xml file in a small `_vasprun.xml` file which does not contain projected data, and `_set[1,2,3,4].txt` file(s) which contain projected data of each spin set. These spin set text files can be processed by `islice2array` function efficiently.
```
#export
def split_vasprun(path=None):
"""
- Splits a given vasprun.xml file into a smaller _vasprun.xml file plus _set[1,2,3,4].txt files which contain projected data for each spin set.
- **Parameters**
- path: path/to/vasprun.xml file.
- **Output**
- _vasprun.xml file with projected data.
- _set1.txt for projected data of colinear calculation.
- _set1.txt for spin up data and _set2.txt for spin-polarized case.
- _set[1,2,3,4].txt for each spin set of non-colinear calculations.
"""
if not path:
path = './vasprun.xml'
if not os.path.isfile(path):
return print("{!r} does not exist!".format(path))
base_dir = os.path.split(os.path.abspath(path))[0]
out_file = os.path.join(base_dir,'_vasprun.xml')
out_sets = [os.path.join(base_dir,'_set{}.txt'.format(i)) for i in range(1,5)]
# process
with open(path,'r') as f:
lines = islice(f,None)
indices = [i for i,l in enumerate(lines) if re.search('projected|/eigenvalues',l)]
f.seek(0)
print("Writing {!r} ...".format(out_file),end=' ')
with open(out_file,'w') as outf:
outf.write(''.join(islice(f,0,indices[1])))
f.seek(0)
outf.write(''.join(islice(f,indices[-1]+1,None)))
print('Done')
f.seek(0)
middle = islice(f,indices[-2]+1,indices[-1]) #projected words excluded
spin_inds = [i for i,l in enumerate(middle) if re.search('spin',l)][1:] #first useless.
if len(spin_inds)>1:
set_length = spin_inds[1]-spin_inds[0] # Must define
else:
set_length = indices[-1]-indices[-2] #It is technically more than set length, but fine for 1 set
f.seek(0) # Must be at zero
N_sets = len(spin_inds)
# Let's read shape from out_file as well.
xml_data = read_asxml(out_file)
_summary = get_summary(xml_data)
NIONS = _summary.NION
NORBS = len(_summary.fields)
NBANDS = get_evals(xml_data).NBANDS
NKPTS = get_kpts(xml_data).NKPTS
del xml_data # free meory now.
for i in range(N_sets): #Reads every set
print("Writing {!r} ...".format(out_sets[i]),end=' ')
start = (indices[-2]+1+spin_inds[0] if i==0 else 0) # pointer is there next time.
stop_ = start + set_length # Should move up to set length only.
with open(out_sets[i],'w') as setf:
setf.write(" # Set: {} Shape: (NKPTS[NBANDS[NIONS]],NORBS) = {},{},{},{}\n".format(i+1,NKPTS,NBANDS,NIONS,NORBS))
middle = islice(f,start,stop_)
setf.write(''.join(l.lstrip().replace('/','').replace('<r>','') for l in middle if '</r>' in l))
print('Done')
#hide_input
import pivotpy as pp
pp.nav_links(1)
```
| github_jupyter |
**Question1.** Create a function that takes three arguments a, b, c and returns the sum of the
numbers that are evenly divided by c from the range a, b inclusive.
<br>**Examples**
<br>evenly_divisible(1, 10, 20) ➞ 0
<br># No number between 1 and 10 can be evenly divided by 20.
<br>
<br>evenly_divisible(1, 10, 2) ➞ 30
<br># 2 + 4 + 6 + 8 + 10 = 30
<br>
<br>evenly_divisible(1, 10, 3) ➞ 18
<br># 3 + 6 + 9 = 18
**Answer:**
```
def evenly_divisible(a, b, c):
sum = 0
for i in range(a, b+1):
if i % c == 0:
sum += i
return sum
for elem in [(1, 10, 20), (1, 10, 2), (1, 10, 3)]:
print(evenly_divisible(a=elem[0], b=elem[1], c=elem[2]))
```
**Question2.** Create a function that returns True if a given inequality expression is correct and
False otherwise.
<br>**Examples**
<br>correct_signs("3 < 7 < 11") ➞ True
<br>
<br>correct_signs("13 > 44 > 33 > 1") ➞ False
<br>
<br>correct_signs("1 < 2 < 6 < 9 > 3") ➞ True
**Answer:**
```
import re
def correct_signs(expression):
elem_list = re.findall(r'(\d+ \> \d+)|(\d+ \< \d+)', expression)
elem_list = [''.join(list(elem)).replace(' ', '') for elem in elem_list]
for elem in elem_list:
if '>' in elem:
temp_split = elem.split('>')
if temp_split[0] > temp_split[1]:
pass
else:
return False
elif '<' in elem:
temp_split = elem.split('<')
if temp_split[0] < temp_split[1]:
pass
else:
return False
return True
for text in ["3 < 7 < 11", "13 > 44 > 33 > 1", "1 < 2 < 6 < 9 > 3"]:
print(correct_signs(text))
```
**Question3.** Create a function that replaces all the vowels in a string with a specified character.
<br>**Examples**
<br>replace_vowels("the aardvark", "#") ➞ "th# ##rdv#rk"
<br>
<br>replace_vowels("minnie mouse", "?") ➞ "m?nn?? m??s?"
<br>
<br>replace_vowels("shakespeare", "*") ➞ "sh\*k\*sp\*\*r\*"
**Answer:**
```
def replace_vowels(text, char):
for vowel in ['a', 'e', 'i', 'o', 'u']:
text = text.replace(vowel, char)
return text
for elem in [("the aardvark", "#"), ("minnie mouse", "?"), ("shakespeare", "*")]:
print(replace_vowels(text=elem[0], char=elem[1]))
```
**Question4.** Write a function that calculates the factorial of a number recursively.
<br>**Examples**
<br>factorial(5) ➞ 120
<br>factorial(3) ➞ 6
<br>factorial(1) ➞ 1
<br>factorial(0) ➞ 1
**Answer:**
```
def factorial(x):
if x == 0:
return 1
else:
return factorial(x-1) * x
for i in [5, 3, 1, 0]:
print(factorial(i))
```
**Question 5** Hamming distance is the number of characters that differ between two strings.
<br>To illustrate:
<br>String1: "abcbba"
<br>String2: "abcbda"
<br>Hamming Distance: 1 - "b" vs. "d" is the only difference.
<br>Create a function that computes the hamming distance between two strings.
<br>**Examples**
<br>hamming_distance("abcde", "bcdef") ➞ 5
<br>hamming_distance("abcde", "abcde") ➞ 0
<br>hamming_distance("strong", "strung") ➞ 1
**Answer:**
```
def hamming_distance(text1, text2):
distance = 0
for i in range(len(text1)):
if text1[i] == text2[i]:
pass
else:
distance += 1
return distance
for elem in [("abcde", "bcdef"), ("abcde", "abcde"), ("strong", "strung")]:
print(hamming_distance(text1=elem[0], text2=elem[1]))
```
| github_jupyter |
### Se pide, usando regresión lineal:
Dibujar con una línea la relación que hay entre la altura y la edad de los alumnos de la clase:
- ¿Es una buena técnica para este tipo de problemas? ¿Por qué?
- ¿Qué error se comete? Calcula los errores que está cometiendo tu modelo uno a uno (lo tienes que calcular tú con python). Aparte, usa el MSE y el RMSE. ¿Alguno es mejor para este problema?
- Representa la matriz de correlación, ¿los datos están correlacionados?
- ¿Qué ocurre si la altura se multiplica por dos? ¿Y si se multiplica solo la edad?
```
lista_edad_altura = [("Clara Piniella", 30, 1.66),("Daniel Walker", 34, 1.87), ("Leonardo Frazzetto", 31, 1.78),("Xinru Yang", 24, 1.60), ("Jorge Garcia", 28, 1.79), ("Jonathan Suárez", 27, 1.86),("Marina Serrano", 25, 1.54), ("Karina Inche", 30, 1.61), ("Mary Meza", 32, 1.52),("Borja Puig", 37, 1.82), ("Mauro Garcia-Oliva", 45, 1.77), ("José Carlos Batista", 28, 1.70),("Isabel Palomares",24,1.78),("Gina Garrido",57,1.63), ("Sonia Cobo", 29, 1.73), ("Miguel Barquero", 35, 1.85),("Nacho Astorga",47,1.78), ("Adrià Gallardo", 24, 1.70), ("Juan Bayon", 38, 1.64), ("Nacho Fontal", 35, 1.82)]
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
x=[]
y=[]
for value in lista_edad_altura:
x.append(value[1])
y.append(value[2])
x=np.array(x)
y=np.array(y)
plt.scatter(x,y, edgecolor='black')
```
No es un bueno modelo porque los puntos no siguen ninguna relacion.
```
x = x.reshape(-1, 1)
x.shape
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2)
lin_reg = LinearRegression()
lin_reg.fit(x_train,y_train)
x_val = x_val.reshape(-1,1)
y_val = y_val.reshape(-1,1)
y_val_predict = lin_reg.predict(x_val)
y_val_predict
y_val
lista_dif=[]
for i,value in enumerate(y_val_predict):
lista_dif.append(value-y_val[i])
lista_dif
print('MAE:', metrics.mean_absolute_error(y_val, y_val_predict))
print('MSE:', metrics.mean_squared_error(y_val, y_val_predict))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_val, y_val_predict)))
y
df=pd.DataFrame(x,y)
df.head()
df.reset_index(inplace=True)
d={"index":"Altura",0:"Edad"}
df.rename(columns = d, inplace = True)
sns.heatmap(df.corr(), annot = True)
```
No están correlacionados
Las dispersiones serían dos veces mayores. Multiplicar el valor por 2 no implica un cambio con respecto
| github_jupyter |
# Deep Convolutional GANs
In this notebook, you'll build a GAN using convolutional layers in the generator and discriminator. This is called a Deep Convolutional GAN, or DCGAN for short. The DCGAN architecture was first explored in 2016 and has seen impressive results in generating new images; you can read the [original paper, here](https://arxiv.org/pdf/1511.06434.pdf).
You'll be training DCGAN on the [Street View House Numbers](http://ufldl.stanford.edu/housenumbers/) (SVHN) dataset. These are color images of house numbers collected from Google street view. SVHN images are in color and much more variable than MNIST.
<img src='assets/svhn_dcgan.png' width=80% />
So, our goal is to create a DCGAN that can generate new, realistic-looking images of house numbers. We'll go through the following steps to do this:
* Load in and pre-process the house numbers dataset
* Define discriminator and generator networks
* Train these adversarial networks
* Visualize the loss over time and some sample, generated images
#### Deeper Convolutional Networks
Since this dataset is more complex than our MNIST data, we'll need a deeper network to accurately identify patterns in these images and be able to generate new ones. Specifically, we'll use a series of convolutional or transpose convolutional layers in the discriminator and generator. It's also necessary to use batch normalization to get these convolutional networks to train.
Besides these changes in network structure, training the discriminator and generator networks should be the same as before. That is, the discriminator will alternate training on real and fake (generated) images, and the generator will aim to trick the discriminator into thinking that its generated images are real!
```
# import libraries
import matplotlib.pyplot as plt
import numpy as np
import pickle as pkl
%matplotlib inline
```
## Getting the data
Here you can download the SVHN dataset. It's a dataset built-in to the PyTorch datasets library. We can load in training data, transform it into Tensor datatypes, then create dataloaders to batch our data into a desired size.
```
import torch
from torchvision import datasets
from torchvision import transforms
# Tensor transform
transform = transforms.ToTensor()
# SVHN training datasets
svhn_train = datasets.SVHN(root='data/', split='train', download=True, transform=transform)
batch_size = 128
num_workers = 0
# build DataLoaders for SVHN dataset
train_loader = torch.utils.data.DataLoader(dataset=svhn_train,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
```
### Visualize the Data
Here I'm showing a small sample of the images. Each of these is 32x32 with 3 color channels (RGB). These are the real, training images that we'll pass to the discriminator. Notice that each image has _one_ associated, numerical label.
```
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
plot_size=20
for idx in np.arange(plot_size):
ax = fig.add_subplot(2, plot_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.transpose(images[idx], (1, 2, 0)))
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
```
### Pre-processing: scaling from -1 to 1
We need to do a bit of pre-processing; we know that the output of our `tanh` activated generator will contain pixel values in a range from -1 to 1, and so, we need to rescale our training images to a range of -1 to 1. (Right now, they are in a range from 0-1.)
```
# current range
img = images[0]
print('Min: ', img.min())
print('Max: ', img.max())
# helper scale function
def scale(x, feature_range=(-1, 1)):
''' Scale takes in an image x and returns that image, scaled
with a feature_range of pixel values from -1 to 1.
This function assumes that the input x is already scaled from 0-1.'''
# assume x is scaled to (0, 1)
# scale to feature_range and return scaled x
Max,Min=feature_range
x=x*(Max-Min)+Min
return x
# scaled range
scaled_img = scale(img)
print('Scaled min: ', scaled_img.min())
print('Scaled max: ', scaled_img.max())
```
---
# Define the Model
A GAN is comprised of two adversarial networks, a discriminator and a generator.
## Discriminator
Here you'll build the discriminator. This is a convolutional classifier like you've built before, only without any maxpooling layers.
* The inputs to the discriminator are 32x32x3 tensor images
* You'll want a few convolutional, hidden layers
* Then a fully connected layer for the output; as before, we want a sigmoid output, but we'll add that in the loss function, [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss), later
<img src='assets/conv_discriminator.png' width=80%/>
For the depths of the convolutional layers I suggest starting with 32 filters in the first layer, then double that depth as you add layers (to 64, 128, etc.). Note that in the DCGAN paper, they did all the downsampling using only strided convolutional layers with no maxpooling layers.
You'll also want to use batch normalization with [nn.BatchNorm2d](https://pytorch.org/docs/stable/nn.html#batchnorm2d) on each layer **except** the first convolutional layer and final, linear output layer.
#### Helper `conv` function
In general, each layer should look something like convolution > batch norm > leaky ReLU, and so we'll define a function to put these layers together. This function will create a sequential series of a convolutional + an optional batch norm layer. We'll create these using PyTorch's [Sequential container](https://pytorch.org/docs/stable/nn.html#sequential), which takes in a list of layers and creates layers according to the order that they are passed in to the Sequential constructor.
Note: It is also suggested that you use a **kernel_size of 4** and a **stride of 2** for strided convolutions.
```
import torch.nn as nn
import torch.nn.functional as F
# helper conv function
def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a convolutional layer, with optional batch normalization.
"""
layers = []
conv_layer = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, bias=False)
# append conv layer
layers.append(conv_layer)
if batch_norm:
# append batchnorm layer
layers.append(nn.BatchNorm2d(out_channels))
# using Sequential container
return nn.Sequential(*layers)
class Discriminator(nn.Module):
def __init__(self, conv_dim=32):
super(Discriminator, self).__init__()
self.conv_dim=conv_dim
# complete init function
self.conv1=conv(3,conv_dim,4,batch_norm=False)
self.conv2=conv(conv_dim,conv_dim*2,4,batch_norm=True)
self.conv3=conv(conv_dim*2,conv_dim*4,4,batch_norm=True)
self.fc1=nn.Linear(4*4*conv_dim*4,1)
self.lrelu=nn.LeakyReLU(0.2)
def forward(self, x):
# complete forward function
x=self.lrelu(self.conv1(x))
x=self.lrelu(self.conv2(x))
x=self.lrelu(self.conv3(x))
x=x.view(-1,4*4*self.conv_dim*4)
x=self.fc1(x)
return x
```
## Generator
Next, you'll build the generator network. The input will be our noise vector `z`, as before. And, the output will be a $tanh$ output, but this time with size 32x32 which is the size of our SVHN images.
<img src='assets/conv_generator.png' width=80% />
What's new here is we'll use transpose convolutional layers to create our new images.
* The first layer is a fully connected layer which is reshaped into a deep and narrow layer, something like 4x4x512.
* Then, we use batch normalization and a leaky ReLU activation.
* Next is a series of [transpose convolutional layers](https://pytorch.org/docs/stable/nn.html#convtranspose2d), where you typically halve the depth and double the width and height of the previous layer.
* And, we'll apply batch normalization and ReLU to all but the last of these hidden layers. Where we will just apply a `tanh` activation.
#### Helper `deconv` function
For each of these layers, the general scheme is transpose convolution > batch norm > ReLU, and so we'll define a function to put these layers together. This function will create a sequential series of a transpose convolutional + an optional batch norm layer. We'll create these using PyTorch's Sequential container, which takes in a list of layers and creates layers according to the order that they are passed in to the Sequential constructor.
Note: It is also suggested that you use a **kernel_size of 4** and a **stride of 2** for transpose convolutions.
```
# helper deconv function
def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a transposed-convolutional layer, with optional batch normalization.
"""
## TODO: Complete this function
## create a sequence of transpose + optional batch norm layers
layers = []
conv_layer = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding, bias=False)
# append conv layer
layers.append(conv_layer)
if batch_norm:
# append batchnorm layer
layers.append(nn.BatchNorm2d(out_channels))
# using Sequential container
return nn.Sequential(*layers)
class Generator(nn.Module):
def __init__(self, z_size, conv_dim=32):
super(Generator, self).__init__()
self.conv_dim=conv_dim
# complete init function
self.fc1=nn.Linear(z_size,4*4*conv_dim*4)
self.deconv1=deconv(conv_dim*4,conv_dim*2,4,batch_norm=True)
self.deconv2=deconv(conv_dim*2,conv_dim,4,batch_norm=True)
self.deconv3=deconv(conv_dim,3,4,batch_norm=False)
self.lrelu=nn.LeakyReLU(0.2)
self.tanh=nn.Tanh()
def forward(self, x):
# complete forward function
x=self.fc1(x)
x=x.view(-1,self.conv_dim*4,4,4)
x=self.lrelu(self.deconv1(x))
x=self.lrelu(self.deconv2(x))
x=self.tanh(self.deconv3(x))
return x
```
## Build complete network
Define your models' hyperparameters and instantiate the discriminator and generator from the classes defined above. Make sure you've passed in the correct input arguments.
```
# define hyperparams
conv_dim = 32
z_size = 100
# define discriminator and generator
D = Discriminator(conv_dim)
G = Generator(z_size=z_size, conv_dim=conv_dim)
print(D)
print()
print(G)
```
### Training on GPU
Check if you can train on GPU. If you can, set this as a variable and move your models to GPU.
> Later, we'll also move any inputs our models and loss functions see (real_images, z, and ground truth labels) to GPU as well.
```
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
# move models to GPU
G.cuda()
D.cuda()
print('GPU available for training. Models moved to GPU')
else:
print('Training on CPU.')
```
---
## Discriminator and Generator Losses
Now we need to calculate the losses. And this will be exactly the same as before.
### Discriminator Losses
> * For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_real_loss + d_fake_loss`.
* Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
The losses will by binary cross entropy loss with logits, which we can get with [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss). This combines a `sigmoid` activation function **and** and binary cross entropy loss in one function.
For the real images, we want `D(real_images) = 1`. That is, we want the discriminator to classify the the real images with a label = 1, indicating that these are real. The discriminator loss for the fake data is similar. We want `D(fake_images) = 0`, where the fake images are the _generator output_, `fake_images = G(z)`.
### Generator Loss
The generator loss will look similar only with flipped labels. The generator's goal is to get `D(fake_images) = 1`. In this case, the labels are **flipped** to represent that the generator is trying to fool the discriminator into thinking that the images it generates (fakes) are real!
```
def real_loss(D_out, smooth=False):
batch_size = D_out.size(0)
# label smoothing
if smooth:
# smooth, real labels = 0.9
labels = torch.ones(batch_size)*0.9
else:
labels = torch.ones(batch_size) # real labels = 1
# move labels to GPU if available
if train_on_gpu:
labels = labels.cuda()
# binary cross entropy with logits loss
criterion = nn.BCEWithLogitsLoss()
# calculate loss
loss = criterion(D_out.squeeze(), labels)
return loss
def fake_loss(D_out):
batch_size = D_out.size(0)
labels = torch.zeros(batch_size) # fake labels = 0
if train_on_gpu:
labels = labels.cuda()
criterion = nn.BCEWithLogitsLoss()
# calculate loss
loss = criterion(D_out.squeeze(), labels)
return loss
```
## Optimizers
Not much new here, but notice how I am using a small learning rate and custom parameters for the Adam optimizers, This is based on some research into DCGAN model convergence.
### Hyperparameters
GANs are very sensitive to hyperparameters. A lot of experimentation goes into finding the best hyperparameters such that the generator and discriminator don't overpower each other. Try out your own hyperparameters or read [the DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf) to see what worked for them.
```
import torch.optim as optim
# params
lr = 0.0002
beta1=0.5
beta2=0.999
# Create optimizers for the discriminator and generator
d_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2])
```
---
## Training
Training will involve alternating between training the discriminator and the generator. We'll use our functions `real_loss` and `fake_loss` to help us calculate the discriminator losses in all of the following cases.
### Discriminator training
1. Compute the discriminator loss on real, training images
2. Generate fake images
3. Compute the discriminator loss on fake, generated images
4. Add up real and fake loss
5. Perform backpropagation + an optimization step to update the discriminator's weights
### Generator training
1. Generate fake images
2. Compute the discriminator loss on fake images, using **flipped** labels!
3. Perform backpropagation + an optimization step to update the generator's weights
#### Saving Samples
As we train, we'll also print out some loss statistics and save some generated "fake" samples.
**Evaluation mode**
Notice that, when we call our generator to create the samples to display, we set our model to evaluation mode: `G.eval()`. That's so the batch normalization layers will use the population statistics rather than the batch statistics (as they do during training), *and* so dropout layers will operate in eval() mode; not turning off any nodes for generating samples.
```
import pickle as pkl
# training hyperparams
num_epochs = 30
# keep track of loss and generated, "fake" samples
samples = []
losses = []
print_every = 300
# Get some fixed data for sampling. These are images that are held
# constant throughout training, and allow us to inspect the model's performance
sample_size=16
fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))
fixed_z = torch.from_numpy(fixed_z).float()
# train the network
for epoch in range(num_epochs):
for batch_i, (real_images, _) in enumerate(train_loader):
batch_size = real_images.size(0)
# important rescaling step
real_images = scale(real_images)
# ============================================
# TRAIN THE DISCRIMINATOR
# ============================================
d_optimizer.zero_grad()
# 1. Train with real images
# Compute the discriminator losses on real images
if train_on_gpu:
real_images = real_images.cuda()
D_real = D(real_images)
d_real_loss = real_loss(D_real)
# 2. Train with fake images
# Generate fake images
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
# move x to GPU, if available
if train_on_gpu:
z = z.cuda()
fake_images = G(z)
# Compute the discriminator losses on fake images
D_fake = D(fake_images)
d_fake_loss = fake_loss(D_fake)
# add up loss and perform backprop
d_loss = d_real_loss + d_fake_loss
d_loss.backward()
d_optimizer.step()
# =========================================
# TRAIN THE GENERATOR
# =========================================
g_optimizer.zero_grad()
# 1. Train with fake images and flipped labels
# Generate fake images
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
if train_on_gpu:
z = z.cuda()
fake_images = G(z)
# Compute the discriminator losses on fake images
# using flipped labels!
D_fake = D(fake_images)
g_loss = real_loss(D_fake) # use real loss to flip labels
# perform backprop
g_loss.backward()
g_optimizer.step()
# Print some loss stats
if batch_i % print_every == 0:
# append discriminator loss and generator loss
losses.append((d_loss.item(), g_loss.item()))
# print discriminator and generator loss
print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.format(
epoch+1, num_epochs, d_loss.item(), g_loss.item()))
## AFTER EACH EPOCH##
# generate and save sample, fake images
G.eval() # for generating samples
if train_on_gpu:
fixed_z = fixed_z.cuda()
samples_z = G(fixed_z)
samples.append(samples_z)
G.train() # back to training mode
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
```
## Training loss
Here we'll plot the training losses for the generator and discriminator, recorded after each epoch.
```
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
```
## Generator samples from training
Here we can view samples of images from the generator. We'll look at the images we saved during training.
```
# helper function for viewing a list of passed in sample images
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(16,4), nrows=2, ncols=8, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
img = img.detach().cpu().numpy()
img = np.transpose(img, (1, 2, 0))
img = ((img +1)*255 / (2)).astype(np.uint8) # rescale to pixel range (0-255)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((32,32,3)))
_ = view_samples(-1, samples)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from itertools import product
from sklearn import preprocessing
from itemrep_inference import topk_distance
data_path = '../data/logs_test_300421_1.p'
data = pd.read_pickle(data_path)
data.head()
data.info()
data.describe(include='all')
data.created_at = pd.to_datetime(data.created_at)
data.created_at.plot();
```
### Observations -> Actions
- zero variance in pk -> drop this variable
- during data collection, event was simulated on multiple timepoints, therefore this kind of step lines in time plot. We will focus on only last simulation -> select records happended after 13:30 on 30-apr-21
- client_id and cookie_id, both can be treated as user_identifier. Let's go with cookie_id -> drop client_ip
- map cookie_id to dash_basic_auth login profiles. this action is specific to this POC's MVP (minimum viable product) requirements
- remove view events - focus is only on click events
- drop url
- drop duplicates
```
def label_encoding(df, colname):
le = preprocessing.LabelEncoder()
le.fit(df[colname])
le_mappings = dict(zip(le.classes_, le.transform(le.classes_)))
return le_mappings
def preprocess(df):
df = df.drop_duplicates()
df = df[df.created_at > '2021-04-30 13:30:00']
umap = {x:'user'+str(i) for i,x in enumerate(data.cookie_id.unique())}
df = df.replace({"cookie_id": umap})
user_mapping = label_encoding(df, 'cookie_id')
item_mapping = label_encoding(df, 'item_id')
df = df[df.event_name=='click']
df.reset_index(drop=True, inplace=True)
df = df[['cookie_id','item_id','created_at']]
df.columns = ['userid','itemid','timestamp']
return df, [user_mapping,item_mapping]
dataw, maps = preprocess(data)
dataw.head()
print('User Mappings: {}'.format(maps[0]))
print('Item Mappings: {}'.format(maps[1]))
umap_inverse = {v: k for k, v in maps[0].items()}
imap_inverse = {v: k for k, v in maps[1].items()}
print('Inverse User Mappings: {}'.format(umap_inverse))
print('Inverse Item Mappings: {}'.format(imap_inverse))
dataw = dataw.replace({"userid": maps[0], "itemid": maps[1]})
dataw.head()
def create_pivot(df):
df['count'] = 1
schemadf = pd.DataFrame(list(product(list(maps[0].values()), list(maps[1].values()))), columns=['userid', 'itemid'])
schemadf['count'] = 0
df = df.append(schemadf)
dfp = pd.pivot_table(df, values='count', index='userid',
columns='itemid', aggfunc=np.sum)
dfp = dfp.fillna(0, downcast='infer')
dfp = dfp.astype('int')
return dfp
interactions = create_pivot(dataw)
interactions.style.background_gradient(cmap='Greens',high=interactions.max().max()/2)
```
https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
```
def recommend(userid, topk=2):
uid = maps[0][userid]
_temp = interactions.iloc[uid]
_temp = _temp[_temp!=0]
_tempdf = pd.DataFrame(columns=['itemid','distance'])
for row in _temp.iteritems():
_temp1 = topk_distance(imap_inverse[row[0]])
_temp2 = pd.DataFrame(list(_temp1.items()), columns=['itemid','distance'])
_temp2['weight'] = row[1]
_tempdf = _tempdf.append(_temp2)
_tempdf = _tempdf[_tempdf['distance']!=0]
_tempdf['distance']+=1
_tempdf['score'] = np.sqrt(_tempdf['weight'])/np.log(_tempdf['distance'])
_tempdf = _tempdf.set_index('itemid')
_tempdf = _tempdf[['score']].groupby(['itemid']).mean()
_tempdf = _tempdf.sort_values(by='score', ascending=False)
return _tempdf.index.values[:topk]
recommend('user1')
import pickle
import os
artifact_path = 'artifacts'
pickle.dump(maps[0], open(os.path.join(artifact_path,"usermap.p"), "wb"))
pickle.dump(maps[1], open(os.path.join(artifact_path,"itemmap.p"), "wb"))
pickle.dump(umap_inverse, open(os.path.join(artifact_path,"usermap_inv.p"), "wb"))
pickle.dump(imap_inverse, open(os.path.join(artifact_path,"itemmap_inv.p"), "wb"))
pickle.dump(interactions, open(os.path.join(artifact_path,"interactions.p"), "wb"))
```
| github_jupyter |
# [Numba](https://lectures.quantecon.org/py/numba.html)
## 1. Overview
In our lecture on [NumPy](https://lectures.quantecon.org/py/numpy.html) we learned one method to **improve speed and efficiency in numerical work**
That method, called *vectorization*, involved sending array processing operations in batch to efficient low level code
In the last few years, a new Python library called [Numba](http://numba.pydata.org/) has appeared that solves many of these problems
It does so through something called **just in time (JIT) compilation**
## 2.Where are the Bottlenecks?
### Dynamic Typing
```
a, b = 10, 10
a + b
a, b = 'foo', 'bar'
a + b
a, b = ['foo'], ['bar']
a + b
```
#### Static Types
For example, consider the following C code, which sums the integers from 1 to 10
The variables `i` and `sum` are explicitly declared to be integers
```
#include <stdio.h>
int main(void) {
int i;
int sum = 0;
for (i = 1; i <= 10; i++) {
sum = sum + i;
}
printf("sum = %d\n", sum);
return 0;
}
```
### Data Access
#### Summing with Compiled Code
#### Summing in Pure Python
Python在速度方面或有欠缺
## 3. Vectorization
### Operations on Arrays
```
import random
import numpy as np
import quantecon as qe # v 0.3.8 会报错,v 0.3.7 可以
```
Now let’s try this non-vectorized code
```
qe.tic() # Start timing
n = 100_000
sum = 0
for i in range(n):
x = random.uniform(0, 1)
sum += x**2
qe.toc() # End timing
```
Now compare this vectorized code
```
qe.tic()
n = 100_000
x = np.random.uniform(0, 1, n)
np.sum(x**2)
qe.toc()
```
Vectorized code is typically fast and efficient
It is also surprisingly flexible, in the sense that many operations can be vectorized
### Universal Functions
Many functions provided by NumPy are so-called universal functions — also called `ufuncs`
For example, `np.cos` is a `ufunc`:
```
np.cos(1.0)
np.cos(np.linspace(0, 1, 3))
```
For example, consider the problem of maximizing a function f of two variables `(x,y)` over the square `[−a,a]×[−a,a]`
For `f` and `a` let’s choose
<center>$ f(x,y) = \frac{cos(x^2+y^2)}{1+x^2+y^2}\ and \ a=3$</center>
Here’s a plot of `f`
```
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
def f(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2)
xgrid = np.linspace(-3, 3, 50)
ygrid = xgrid
x, y = np.meshgrid(xgrid, ygrid)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x,
y,
f(x, y),
rstride=2, cstride=2,
cmap=cm.jet,
alpha=0.7,
linewidth=0.25)
ax.set_zlim(-0.5, 1.0)
plt.show()
```
To **maximize** it, we’re going to use a naive grid search:
1. Evaluate `f` for all `(x,y)` in a grid on the square
2. Return the maximum of observed values
Here’s a non-vectorized version that uses Python loops
```
def f(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2)
grid = np.linspace(-3, 3, 1000)
m = -np.inf # inf 无穷
qe.tic()
for x in grid:
for y in grid:
z = f(x, y)
if z > m:
m = z
qe.toc()
def f(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2)
grid = np.linspace(-3, 3, 1000)
x, y = np.meshgrid(grid, grid)
qe.tic()
np.max(f(x, y))
qe.toc()
```
### Pros and Cons of Vectorization
At its best, vectorization yields fast, simple code
However, it’s not without disadvantages
One issue is that it can be highly memory intensive
Another issue is that not all algorithms can be vectorized
## 4.Numba
### Prerequisites
`conda install numba`
`conda update anaconda`
### An Example
Let’s take the difference equation to be the quadratic map
<center>$x_{t+1}=4x_t(1-x_t)$</center>
Here’s the plot of a typical trajectory, starting from $x_0=0.1$, with $t$ on the x-axis
```
def qm(x0, n):
x = np.empty(n+1)
x[0] = x0
for t in range(n):
x[t+1] = 4 * x[t] * (1 - x[t])
return x
x = qm(0.1, 250)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, 'b-', lw=2, alpha=0.8)
ax.set_xlabel('time', fontsize=16)
plt.show()
```
To speed this up using Numba is trivial using Numba’s `jit` function
```
from numba import jit
qm_numba = jit(qm) # qm_numba is now a 'compiled' version of qm
qe.util.tic()
qm(0.1, int(10**6))
time1 = qe.util.toc()
qe.util.tic()
qm_numba(0.1, int(10**6))
time2 = qe.util.toc()
```
The first execution is relatively slow because of JIT compilation (see below)
Next time and all subsequent times it runs much faster:
```
qe.util.tic()
qm_numba(0.1, int(10**6))
time2 = qe.util.toc()
time2
```
```
time1 / time2 # Calculate speed gain
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-17-50fee0c0fd4a> in <module>()
----> 1 time1 / time2 # Calculate speed gain
TypeError: unsupported operand type(s) for /: 'str' and 'str'
```
```
from functools import reduce
def str2float(s):
def f(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '.': '.'}[s]
def g(x, y):
return x*10 + y
pos = s.find('.')
front = reduce(g,map(f,s[0:pos]))
rear = reduce(g,map(f,s[pos+1:]))/10**(len(s)-pos-1)
return front+rear
print('str2float(\'123.456\') =', str2float('123.456'))
str2float(time1)/str2float(time2)
```
#### Decorator Notation
If you don’t need a separate name for the “numbafied” version of qm, you can just put `@jit` before the function
```
@jit
def qm(x0, n):
x = np.empty(n+1)
x[0] = x0
for t in range(n):
x[t+1] = 4 * x[t] * (1 - x[t])
return x
```
### How and When it Works
#### A Gotcha: Global Variables
```
a = 1
@jit
def add_x(x):
return a + x
print(add_x(10))
a = 2
print(add_x(10))
```
Notice that changing the global had no effect on the value returned by the function
When Numba compiles machine code for functions, it treats global variables as constants to ensure type stability
#### Numba for vectorization
```
from numba import vectorize
@vectorize
def f_vec(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2)
grid = np.linspace(-3, 3, 1000)
x, y = np.meshgrid(grid, grid)
np.max(f_vec(x, y)) # Run once to compile
qe.tic()
np.max(f_vec(x, y))
qe.toc()
```
For example, when NumPy computes `np.cos(x**2 + y**2)` it first creates the intermediate arrays `x**2` and `y**2`, then it creates the array `np.cos(x**2 + y**2)`
In our `@vectorize` version using Numba, the entire operator is reduced to a single vectorized process and none of these intermediate arrays are created
We can gain further speed improvements using Numba’s automatic parallelization feature by specifying `target=’parallel’`
```
@vectorize('float64(float64, float64)', target='parallel')
def f_vec(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2)
np.max(f_vec(x, y)) # Run once to compile
qe.tic()
np.max(f_vec(x, y))
qe.toc()
```
| github_jupyter |
# Experiments with kernel machines
In this notebook we will use simple two-dimensional data sets to illustrate the behavior of the support vector machine and the Perceptron, when used with quadratic and RBF kernels.
## 1. Basic training procedure
```
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.svm import SVC
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
import pandas as pd
```
The directory containing this notebook should also contain two-dimensional data files, `data1.txt` through `data5.txt`. These files contain one data point per line, along with a label (either -1 or 1), like:
* `3 8 -1` (meaning that point `x=(3,8)` has label `y=-1`)
The next procedure, **learn_and_display_SVM**, loads one of these data sets, invokes `sklearn.SVC` to learn a classifier, and then displays the data as well as the boundary. It is invoked as follows:
* `learn_and_display_SVM(datafile, kernel_type, C_value, s_value)`
where
* `datafile` is one of `'data1.txt'` through `'data5.txt'` (or another file in the same format)
* `kernel_type` is either `'quadratic'` or `'rbf'`
* `C_value` is the setting of the soft-margin parameter `C` (default: 1.0)
* `s_value` (for the RBF kernel) is the scaling parameter `s` (default: 1.0)
```
np.zeros(3,dtype=bool)
def learn_and_display_SVM(datafile, kernel_type='rbf', C_value=1.0, s_value=1.0):
data = np.loadtxt(datafile)
n,d = data.shape
# Create training set x and labels y
x = data[:,0:2]
y = data[:,2]
# Now train a support vector machine and identify the support vectors
if kernel_type == 'rbf':
clf = SVC(kernel='rbf', C=C_value, gamma=1.0/(s_value*s_value))
if kernel_type == 'quadratic':
clf = SVC(kernel='poly', degree=2, C=C_value, coef0=1.0)
clf.fit(x,y)
sv = np.zeros(n,dtype=bool)
sv[clf.support_] = True
notsv = np.logical_not(sv)
# Determine the x1- and x2- limits of the plot
x1min = min(x[:,0]) - 1
x1max = max(x[:,0]) + 1
x2min = min(x[:,1]) - 1
x2max = max(x[:,1]) + 1
plt.xlim(x1min,x1max)
plt.ylim(x2min,x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1)*notsv,0], x[(y==1)*notsv,1], 'ro')
plt.plot(x[(y==1)*sv,0], x[(y==1)*sv,1], 'ro', markersize=10)
plt.plot(x[(y==-1)*notsv,0], x[(y==-1)*notsv,1], 'k^')
plt.plot(x[(y==-1)*sv,0], x[(y==-1)*sv,1], 'k^', markersize=10)
# Construct a grid of points and evaluate classifier at each grid points
grid_spacing = 0.05
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, grid_spacing), np.arange(x2min, x2max, grid_spacing))
grid = np.c_[xx1.ravel(), xx2.ravel()]
Z = clf.decision_function(grid)
# Quantize the values to -1, -0.5, 0, 0.5, 1 for display purposes
for i in range(len(Z)):
Z[i] = min(Z[i],1.0)
Z[i] = max(Z[i],-1.0)
if (Z[i] > 0.0) and (Z[i] < 1.0):
Z[i] = 0.5
if (Z[i] < 0.0) and (Z[i] > -1.0):
Z[i] = -0.5
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2)
plt.show()
```
## 2. Experiments with the quadratic kernel
Let's try out SVM on some examples, starting with the quadratic kernel.
```
learn_and_display_SVM('data1.txt', 'quadratic', 1)
```
Also try `data2.txt` through `data5.txt`. Also try changing the value of `C` (the third parameter) to see how that affects the boundary and margin.
```
learn_and_display_SVM('data2.txt', 'quadratic', 1)
```
## 3. Experiments with the RBF kernel
Now experiment with the RBF kernel, on the same five data sets. This time there are two parameters to play with: `C` and `sigma`.
```
learn_and_display_SVM('data1.txt', 'rbf', 10.0, 10.0)
learn_and_display_SVM('data2.txt', 'rbf', 10.0, 10.0)
```
## 4. The kernel Perceptron
<font color="magenta">**For you to do:**</font> Implement the kernel Perceptron algorithm as specified in lecture. Your algorithm should allow both the quadratic and RBF kernel, and should follow roughly the same signature as the SVM routine above:
* `learn_and_display_Perceptron(datafile, kernel_type, s_value)`
Recall that the Perceptron algorithm does not always converge; you will need to explicitly check for this.
- http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics.pairwise
```
from sklearn.metrics.pairwise import rbf_kernel, polynomial_kernel
from numpy import linalg as LA
a = np.arange(8).reshape(4, 2)
b = np.array([3, 0])
a[0,]-b
LA.norm(a[0,]-b, 2) # l2 norm distance
a
a[1,:].reshape(1, 2)
rbf_kernel(a, b.reshape(1, 2), gamma=.5)
np.exp(-1/2 * np.power(LA.norm(a[3,]-b,2), 2)) # rbf kernel
aa = np.zeros((4,))
aa[3] = 3
aa
def kernel_f(x, z, kernel_type='rbf', s=1.0):
# define Kernel function
# x is all training set, z is a single data point
if kernel_type == 'rbf':
K = rbf_kernel(x, z, gamma=s)
if kernel_type == 'quadratic':
K = polynomial_kernel(x, z, degree=2, gamma=s)
return K # K is a n*1 matrix
def evaluate_classifier(X, y, alpha, b, z, kernel_type='rbf', s=1.0):
K = kernel_f(X, z.reshape(1,2), kernel_type=kernel_type, s=s)
# print(K)
y_hat = np.sign(np.sum(np.multiply(alpha.reshape(-1,1),
np.multiply(y.reshape(-1,1), K.reshape(-1,1)))) + b)
return y_hat
```
**converged means all data points have be classified correctly**
```
def train_perceptron(x,y,n_iters=100, kernel_type='rbf', s=1.0):
n, d = x.shape
alpha = np.zeros(n)
b = 0
iters = 0
done = False
converged = True
while not(done):
done = True
I = np.random.permutation(n)
for i in range(n):
j = I[i]
z = x[j, :].reshape(1, 2)
y_j = y[j]
# K = kernel_f(x, z)
y_hat_j = evaluate_classifier(X=x, y=y.reshape(n, 1), alpha=alpha, b=b, z=z,
kernel_type=kernel_type, s=s)
if y_j*y_hat_j <= 0:
alpha[j] += 1
b += y_j
done = False
iters += 1
if iters > n_iters:
done = True
converged = False
if converged:
print("Perceptron algorithm: iterations until convergence: ", iters)
else:
print("Perceptron algorithm: did not converge within the specified number of iterations")
return alpha, b, converged
###
### Any auxiliary functions that you need
###
def learn_and_display_Perceptron(datafile, kernel_type='rbf', s_value=1.0, max_iter=100):
data = np.loadtxt(datafile)
n,d = data.shape
# max_iter = 100
# Create training set x and labels y
x = data[:,0:2]
y = data[:,2]
alpha,b,converged = train_perceptron(x,y,n_iters=max_iter,kernel_type=kernel_type,s=s_value)
print(alpha, b)
sv = alpha.astype(bool)
# sv[clf.support_] = True
notsv = np.logical_not(sv)
# Determine the x1- and x2- limits of the plot
x1min = min(x[:,0]) - 1
x1max = max(x[:,0]) + 1
x2min = min(x[:,1]) - 1
x2max = max(x[:,1]) + 1
plt.xlim(x1min,x1max)
plt.ylim(x2min,x2max)
# Plot the data points
plt.plot(x[(y==1)*notsv,0], x[(y==1)*notsv,1], 'ro')
plt.plot(x[(y==1)*sv,0], x[(y==1)*sv,1], 'ro', markersize=10)
plt.plot(x[(y==-1)*notsv,0], x[(y==-1)*notsv,1], 'k^')
plt.plot(x[(y==-1)*sv,0], x[(y==-1)*sv,1], 'k^', markersize=10)
# Construct a grid of points at which to evaluate the classifier
if converged:
grid_spacing = 0.05
# classify all points in grid, each grid's area is 0.05*0.05
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, grid_spacing), np.arange(x2min, x2max, grid_spacing))
grid = np.c_[xx1.ravel(), xx2.ravel()]
Z = np.array([evaluate_classifier(x,y,alpha,b,pt,kernel_type=kernel_type,s=s_value) for pt in grid])
# Show the classifier's boundary using a color plot
Z = Z.reshape(xx1.shape)
plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-3, vmax=3)
plt.show()
```
<font color="magenta">Experiment with your routine, on the same five data sets.</font>
```
learn_and_display_Perceptron('data5.txt', kernel_type='quadratic', max_iter=10000)
learn_and_display_Perceptron('data2.txt', kernel_type='rbf', max_iter=10000)
```
#### some operations of numpy
```
alpha = np.zeros(10)
alpha[0] = 1
print(alpha)
y = np.ones(10)
K = np.random.rand(10).reshape(10,1)
K
np.multiply(alpha.reshape(-1,1), y.reshape(-1,1))
np.multiply(K, np.multiply(alpha, y)) # need to be very careful about the shape of each element
# 2^2 elements in each dimension, great way to do Cartesian product
xx1, xx2 = np.meshgrid(np.arange(1, 2, 0.5), np.arange(3, 4, 0.5))
grid = np.c_[xx1.ravel(), xx2.ravel()]
grid
[print(i) for i in grid]
sv = np.zeros(4, dtype=bool) # convert 0 to False, non-zero to True
sv
alpha = np.array([0, 2, 3, 0])
alpha
alpha.astype(bool)
np.sign(-1), np.sign(2), np.sign(0)
```
| github_jupyter |
# Programming_Assingment20
### Question1
Create a function that takes a list of strings and integers, and filters out the list so that it
returns a list of integers only.
Examples
filter_list([1, 2, 3, 'a', 'b', 4]) ➞ [1, 2, 3, 4]
filter_list(['A', 0, 'Edabit', 1729, 'Python', '1729']) ➞ [0, 1729]
filter_list(['Nothing', 'here']) ➞ []
```
lst = [1, 2, 3, 'a', 'b', 4]
def filter_list(lst):
intLst = []
for i in lst:
if type(i) == int:
intLst.append(i)
return intLst
filter_list([1, 2, 3, 'a', 'b', 4])
filter_list(['A', 0, 'Edabit', 1729, 'Python', '1729'])
filter_list(['Nothing', 'here'])
```
### Question2
Given a list of numbers, create a function which returns the list but with each element's
index in the list added to itself. This means you add 0 to the number at index 0, add 1 to the
number at index 1, etc...
Examples
add_indexes([0, 0, 0, 0, 0]) ➞ [0, 1, 2, 3, 4]
add_indexes([1, 2, 3, 4, 5]) ➞ [1, 3, 5, 7, 9]
add_indexes([5, 4, 3, 2, 1]) ➞ [5, 5, 5, 5, 5]
```
def add_indexes(lst):
ind = 0
index = []
for i in lst:
index.append(lst.index(i,ind) + i)
ind+=1
return index
add_indexes([0, 0, 0, 0, 0])
add_indexes([1, 2, 3, 4, 5])
add_indexes([5, 4, 3, 2, 1])
```
### Question3
Create a function that takes the height and radius of a cone as arguments and returns the
volume of the cone rounded to the nearest hundredth. See the resources tab for the formula.
Examples
cone_volume(3, 2) ➞ 12.57
cone_volume(15, 6) ➞ 565.49
cone_volume(18, 0) ➞ 0
```
import math
pi = math.pi
# Function to calculate Volume of Cone
def cone_volume(r, h):
return round((1 / 3) * pi * r * r * h)
# Driver Code
radius = float(5)
height = float(12)
print( "Volume Of Cone : ", cone_volume(radius, height) )
cone_volume(3, 2)
cone_volume(15, 6)
cone_volume(18, 0)
```
### Question4
This Triangular Number Sequence is generated from a pattern of dots that form a triangle.
The first 5 numbers of the sequence, or dots, are:
1, 3, 6, 10, 15
This means that the first triangle has just one dot, the second one has three dots, the third one
has 6 dots and so on.
Write a function that gives the number of dots with its corresponding triangle number of the
sequence.
Examples
triangle(1) ➞ 1
triangle(6) ➞ 21
triangle(215) ➞ 23220
A Rule
We can make a "Rule" so we can calculate any triangular number.
First, rearrange the dots like this:
triangular numbers 1 to 5
Then double the number of dots, and form them into a rectangle:
triangular numbers when doubled become n by n+1 rectangles
Now it is easy to work out how many dots: just multiply n by n+1
Dots in rectangle = n(n+1)
But remember we doubled the number of dots, so
Dots in triangle = n(n+1)/2
We can use xn to mean "dots in triangle n", so we get the rule:
Rule: xn = n(n+1)/2
```
def triangle(n):
return n*(n+1)*0.5
n = int(input('Enter the trinalge number :'))
print("The {}th triangle has {} dots ".format(n,int(triangle(n))))
triangle(215)
triangle(1)
```
### Question5
Create a function that takes a list of numbers between 1 and 10 (excluding one number) and
returns the missing number.
Examples
missing_num([1, 2, 3, 4, 6, 7, 8, 9, 10]) ➞ 5
missing_num([7, 2, 3, 6, 5, 9, 1, 4, 8]) ➞ 10
missing_num([10, 5, 1, 2, 4, 6, 8, 3, 9]) ➞ 7
```
def missing_num(lst):
total = sum([x for x in range(11)])
sum_Of_list = sum(lst)
return total - sum_Of_list
print(missing_num([1, 2, 3, 4, 6, 7, 8, 9, 10]))
missing_num([7, 2, 3, 6, 5, 9, 1, 4, 8])
missing_num([10, 5, 1, 2, 4, 6, 8, 3, 9])
```
| github_jupyter |
Importing dependencies
```
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.metrics import accuracy_score
```
Data collection and Analysis
```
# load data from CSV to pandas dataframe
parkinsons_data = pd.read_csv('/Users/ajay_krsna/Documents/MyStuff/Code/Projects/parkinsons-diesease-detection/Data/parkinsons_data.csv')
# print first 5 rows of dataframe
parkinsons_data.head()
# number of rows and columns in the dataframe
parkinsons_data.shape
# get more information about the dataset
parkinsons_data.info()
# check for missing values
parkinsons_data.isnull().sum()
# get statistical information
parkinsons_data.describe()
# distribution of target variable --> column 'status'
parkinsons_data['status'].value_counts()
```
1 --> Parkinson's positive
0 --> Healthy
```
# group data based on the target variable
parkinsons_data.groupby('status').mean()
```
Data Pre-Processing
```
# separating the features and target
X = parkinsons_data.drop(columns=['name', 'status'], axis=1)
Y = parkinsons_data['status']
print(X)
print(Y)
# split into training and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=2)
print(X.shape, X_train.shape, X_test.shape)
```
Data Standardization
```
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
```
Model Training: Support Vector Machine
```
model = svm.SVC(kernel='linear')
# train SVM with training data
model.fit(X_train, Y_train)
```
Model Evaluation:
```
# accuracy score on training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(Y_train, X_train_prediction)
print('Accuracy score of training data: ', training_data_accuracy)
# accuracy score on test data
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(Y_test, X_test_prediction)
print('Accuracy score of training data: ', test_data_accuracy)
```
Parkinson's Predictive system
```
input_data = (214.28900,260.27700,77.97300,0.00567,0.00003,0.00295,0.00317,0.00885,0.01884,0.19000,0.01026,0.01161,0.01373,0.03078,0.04398,21.20900,0.462803,0.664357,-5.724056,0.190667,2.555477,0.148569)
# change input data into numpy array
input_data_as_np_array = np.asarray(input_data)
# reshape the numpy array
input_reshaped = input_data_as_np_array.reshape(1,-1)
# standardize the input data
standard_data = scaler.transform(input_reshaped)
prediction = model.predict(standard_data)
print(prediction)
if prediction[0] == 0:
print('The person is healthy')
else:
print("The person has Parkinson's disease")
```
| github_jupyter |
```
from gensim.models import KeyedVectors
from gensim.models import Word2Vec
from tqdm import tqdm, tqdm_notebook
from nltk.stem import LancasterStemmer, PorterStemmer
import preprocess
questions = {
}
questions[1]=preprocess.string_to_analogy("land:excavate::shoal:_,sound salvage dredge survey sidle,dredge")
#questions[2]=preprocess.string_to_analogy("sword:brandish::_:_,")
questions[3]=preprocess.string_to_analogy("volcano:quiescent::talent:_,imperious hyperbolical oblique latent pliant,latent")
questions[4]=preprocess.string_to_analogy("style:flamboyant::behavior:_,brazen lofty volatile insolent sassy,brazen")
questions[5]=preprocess.string_to_analogy("direct:confront::oblique:_,unsettle incite sidle stymie flourish,sidle")
questions[6]=preprocess.string_to_analogy("beforehand:trepidation::afterwards:_,bravado decadence hyperbole foolhardy rue,rue")
questions[7]=preprocess.string_to_analogy("person:odious::action:_,unsettling imperious heinous lofty haughty,heinous")
questions[8]=preprocess.string_to_analogy("naughtiness:permit::misbehavior:_,dredge confront pique countenance stymie,countenance")
questions[9]=preprocess.string_to_analogy("speech:hyperbole::behavior:_,trepidation quiescence bravado pique parsimony,bravado")
questions[10]=preprocess.string_to_analogy("emotional:resistance::physical:_,cadaver survey conflagration decadence friction,friction")
questions[11]=preprocess.string_to_analogy("interest:rouse::curiosity:_,assuage nettle dredge rue pique,pique")
questions[12]=preprocess.string_to_analogy("mien:haughty::countenance:_, flamboyant imperious befuddled canny brazen,imperious")
questions[13]=preprocess.string_to_analogy("threateningly:brandish::flamboyantly:_,confront flaunt fan incite garner,flaunt")
#questions[14]=preprocess.string_to_analogy
questions[15]=preprocess.string_to_analogy("latent:possibility::hidden:_,cache ire hovel visage urchin,cache")
#questions[16]=preprocess.string_to_analogy
questions[17]=preprocess.string_to_analogy("travel:blocked::attempt:_,rued incited salvage stymied unsettled,stymied")
questions[18]=preprocess.string_to_analogy("salesman:canny::businesswoman:_,entrepreneurial prolific shrewd haughty frugal,shrewd")
questions[19]=preprocess.string_to_analogy("brave:reckless::frugal:_,prolific audacious foolhardy poor parsimonious,parsimonious")
questions[20]=preprocess.string_to_analogy("personality:pliant::behavior:_,lofty insolent cooperative volatile popular,cooperative")
from gensim.parsing.porter import PorterStemmer
import binascii
file = open('./GoogleNews-vectors-negative300.bin','rb')
binascii.b2a_base64(file.read())
#p = PorterStemmer()
#p.stem_documents(file)
file = './GoogleNews-vectors-negative300.bin'
#print(f"Training from {file}")
print("Training model...")
model = KeyedVectors.load_word2vec_format(file, binary=True)
model.init_sims(replace=True)
print("Model trained")
model.vocab
import sys
import numpy as np
def test(classifiers=["simple"]):
results = {}
for classifier in tqdm(classifiers):
results[classifier] = np.empty((1,0), dtype=int)
for question_number in tqdm(questions):
data = questions[question_number]
first, second, third, answers, correct = data
if solve(first, second, third, answers, classifier=classifier) == correct:
results[classifier] = np.append(results[classifier], [1])
else:
results[classifier] = np.append(results[classifier], [0])
return results
def solve(first, second, third, answers, classifier="simple", verbose=False):
for answer in answers:
if answer not in model.vocab:
print("%s is not in vocab!", answer)
answers.remove(answer)
if classifier == "simple":
prediction = model.most_similar_cosmul(positive=[second, third], negative=[first], topn=1)[0][0]
prediction = model.most_similar_to_given(prediction, answers)
elif classifier == "minus_third":
prediction = model.most_similar_cosmul(positive=[second, first], negative=[third], topn=1)[0][0]
prediction = model.most_similar_to_given(prediction, answers)
elif classifier == "deep":
top_predictions = list(zip(*model.most_similar_cosmul(positive=["king", "woman"], negative=["man"], topn=5)))[0]
best = -1
for pred in top_predictions:
for answer in answers:
similarity = model.similarity(answer,pred)
if similarity > best:
best = similarity
prediction = answer
elif classifier == "deep_uniq":
top_predictions = list(zip(*model.most_similar_cosmul(positive=["king", "woman"], negative=["man"], topn=5)))[0]
best = []
for pred in top_predictions:
best.append(model.most_similar_to_given(pred,answers))
prediction = max(best, key=best.count)
elif classifier == "magnitude":
best = sys.maxsize
prediction = ""
word_sum = model[second] + model[third] - model[first]
for answer in answers:
answer_sum = word_sum - model[answer]
magnitude = answer_sum.dot(answer_sum)
if magnitude < best:
best = magnitude
prediction = answer
return prediction
results = test(["deep","deep_uniq","simple","magnitude"])
for agent in results:
print(str(results[agent]) + " " + agent + " " + str(np.sum(results[agent])))
print(results)
results["simple"] + results["magnitude"]
e = np.empty((1,0), dtype=int)
e = np.append(e, [0])
e = np.append(e, [0])
e
from sklearn.neighbors import KDTree
class ANNSearch:
word2idx = {}
idx2word = {}
data = []
def __init__(self, model):
for counter, key in enumerate(model.vocab.keys()):
self.data.append(model[key])
self.word2idx[key] = counter
self.idx2word[counter] = key
# leaf_size is a hyperparameter
self.data = np.array(self.data)
self.tree = KDTree(self.data, leaf_size=100)
def search_by_vector(self, v, k=10):
dists, inds = self.tree.query([v], k)
return zip(dists[0], [self.idx2word[idx] for idx in inds[0]])
def search(self, query, k=10):
vector = self.data[self.word2idx[query]]
return self.search_by_vector(vector, k)
```
| github_jupyter |
```
import pandas as pd
import plotly.graph_objects as go
import numpy as np
import requests
import json
from time import sleep
from google.colab import files
uploaded = files.upload()
!pip install chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
username="ssbyrne89"
api_key='oVZU9m2Nq00N1QppHpC8'
tls.set_credentials_file(username=username, api_key=api_key)
cnames
tlist = ["MO", "DRE", "WELL", "VTR", "LUMN", "PEAK", "O", "T", "MAA",
"KIM", "PLD", "UDR", "LB", "KMI", "LYB", "AEE", "CNP", "PM",
"VZ", "ED"]
test=f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MO&apikey=abc123'
parsed_test = json.loads(requests.get(test).text)
parsed_test = float(parsed_test["Time Series (Daily)"]["2021-03-31"]["4. close"])
yesterday_close_price = []
for ticker in tlist:
divs=f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={ticker}&apikey=******'
parsed_divs = json.loads(requests.get(divs).text)
parsed_divs = float(parsed_divs["Time Series (Daily)"]["2021-03-31"]["4. close"])
yesterday_close_price.append(parsed_divs)
x = len(yesterday_close_price) % 5
if x == 0:
sleep(65)
yesterday_close_price
Company_Name_and_twenty_yearAVG = pd.read_csv("/content/get_top_20_div_yields.csv", names=['CName', 'TwentyYearAVG'])
Company_Name_and_twenty_yearAVG = Company_Name_and_twenty_yearAVG[1:]
Company_Name_and_twenty_yearAVG
Company_Name_and_twenty_yearAVG["TwentyYearAVG"]
TwentyYearAVG=[]
for avg in Company_Name_and_twenty_yearAVG['TwentyYearAVG']:
TwentyYearAVG.append(avg)
TwentyYearAVG
print(type(Company_Name_and_twenty_yearAVG))
cnames=[]
for names in Company_Name_and_twenty_yearAVG["CName"]:
cnames.append(names)
print(type(cnames))
cnames
TwentyYearDivYieldAVG = pd.read_csv('/content/the20_companies_w_the_highest_20_Year_div_yield_average.csv')
TwentyYearDivYieldAVG
TwentyYearDivYieldAVG.columns[1:]
cnames
year2020div_yield = []
for dyield in TwentyYearDivYieldAVG[cnames].iloc[21]:
year2020div_yield.append(dyield)
year2020div_yield
fig = go.Figure()
fig.add_trace(go.Bar(x=cnames, y=yesterday_close_price, marker_color="forestgreen"))
fig.update_layout(title_text="Yesterday's Closing Prices as of April 1, 2021",
xaxis=dict(title='Company'),
yaxis=dict(title="Closing Price from March 31, 2021 in USD"))
py.plot(figure_or_data=fig, filename="Yesterday_Closing_Prices_as_of_4_1_2021", auto_open=True)
fig.show()
fig = go.Figure()
fig.add_trace(go.Bar(x=cnames, y=TwentyYearAVG, name="Twenty Year Div Yield AVG"))
fig.add_trace(go.Bar(x=cnames, y=year2020div_yield, name="Dividend Yield for 2020"))
fig.update_layout(title_text="2020 DivYields Juxtaposed to 20Year AVG DivYield",
xaxis=dict(title='Company'),
yaxis=dict(title='Dividend Yield '),
barmode="group")
py.plot(figure_or_data=fig, filename="2020_DivYields_Juxt_to_20Year_AVG", auto_open=True)
```
| github_jupyter |
# Сравнение метрик качества бинарной классификации
## Programming Assignment
В этом задании мы разберемся, в чем состоит разница между разными метриками качества. Мы остановимся на задаче бинарной классификации (с откликами 0 и 1), но рассмотрим ее как задачу предсказания вероятности того, что объект принадлежит классу 1. Таким образом, мы будем работать с вещественной, а не бинарной целевой переменной.
Задание оформлено в стиле демонстрации с элементами Programming Assignment. Вам нужно запустить уже написанный код и рассмотреть предложенные графики, а также реализовать несколько своих функций. Для проверки запишите в отдельные файлы результаты работы этих функций на указанных наборах входных данных, это можно сделать с помощью предложенных в заданиях функций write_answer_N, N - номер задачи. Загрузите эти файлы в систему.
Для построения графиков нужно импортировать соответствующие модули.
Библиотека seaborn позволяет сделать графики красивее. Если вы не хотите ее использовать, закомментируйте третью строку.
Более того, для выполнения Programming Assignment модули matplotlib и seaborn не нужны (вы можете не запускать ячейки с построением графиков и смотреть на уже построенные картинки).
```
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
```
### Что предсказывают алгоритмы
Для вычисления метрик качества в обучении с учителем нужно знать только два вектора: вектор правильных ответов и вектор предсказанных величин; будем обозначать их actual и predicted. Вектор actual известен из обучающей выборки, вектор predicted возвращается алгоритмом предсказания. Сегодня мы не будем использовать какие-то алгоритмы классификации, а просто рассмотрим разные векторы предсказаний.
В нашей формулировке actual состоит из нулей и единиц, а predicted - из величин из интервала [0, 1] (вероятности класса 1). Такие векторы удобно показывать на scatter plot.
Чтобы сделать финальное предсказание (уже бинарное), нужно установить порог T: все объекты, имеющие предсказание выше порога, относят к классу 1, остальные - к классу 0.
```
# рисует один scatter plot
def scatter(actual, predicted, T):
plt.scatter(actual, predicted)
plt.xlabel("Labels")
plt.ylabel("Predicted probabilities")
plt.plot([-0.2, 1.2], [T, T])
plt.axis([-0.1, 1.1, -0.1, 1.1])
# рисует несколько scatter plot в таблице, имеющей размеры shape
def many_scatters(actuals, predicteds, Ts, titles, shape):
plt.figure(figsize=(shape[1]*5, shape[0]*5))
i = 1
for actual, predicted, T, title in zip(actuals, predicteds, Ts, titles):
ax = plt.subplot(shape[0], shape[1], i)
ax.set_title(title)
i += 1
scatter(actual, predicted, T)
```
Идеальная ситуация: существует порог T, верно разделяющий вероятности, соответствующие двум классам. Пример такой ситуации:
```
actual_0 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
predicted_0 = np.array([ 0.19015288, 0.23872404, 0.42707312, 0.15308362, 0.2951875 ,
0.23475641, 0.17882447, 0.36320878, 0.33505476, 0.202608 ,
0.82044786, 0.69750253, 0.60272784, 0.9032949 , 0.86949819,
0.97368264, 0.97289232, 0.75356512, 0.65189193, 0.95237033,
0.91529693, 0.8458463 ])
plt.figure(figsize=(5, 5))
scatter(actual_0, predicted_0, 0.5)
```
Интервалы вероятностей для двух классов прекрасно разделяются порогом T = 0.5.
Чаще всего интервалы накладываются - тогда нужно аккуратно подбирать порог.
Самый неправильный алгоритм делает все наоборот: поднимает вероятности класса 0 выше вероятностей класса 1. Если так произошло, стоит посмотреть, не перепутались ли метки 0 и 1 при создании целевого вектора из сырых данных.
Примеры:
```
actual_1 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1.])
predicted_1 = np.array([ 0.41310733, 0.43739138, 0.22346525, 0.46746017, 0.58251177,
0.38989541, 0.43634826, 0.32329726, 0.01114812, 0.41623557,
0.54875741, 0.48526472, 0.21747683, 0.05069586, 0.16438548,
0.68721238, 0.72062154, 0.90268312, 0.46486043, 0.99656541,
0.59919345, 0.53818659, 0.8037637 , 0.272277 , 0.87428626,
0.79721372, 0.62506539, 0.63010277, 0.35276217, 0.56775664])
actual_2 = np.array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
predicted_2 = np.array([ 0.07058193, 0.57877375, 0.42453249, 0.56562439, 0.13372737,
0.18696826, 0.09037209, 0.12609756, 0.14047683, 0.06210359,
0.36812596, 0.22277266, 0.79974381, 0.94843878, 0.4742684 ,
0.80825366, 0.83569563, 0.45621915, 0.79364286, 0.82181152,
0.44531285, 0.65245348, 0.69884206, 0.69455127])
many_scatters([actual_0, actual_1, actual_2], [predicted_0, predicted_1, predicted_2],
[0.5, 0.5, 0.5], ["Perfect", "Typical", "Awful algorithm"], (1, 3))
```
Алгоритм может быть осторожным и стремиться сильно не отклонять вероятности от 0.5, а может рисковать - делать предсказания близакими к нулю или единице.
```
# рискующий идеальный алгоитм
actual_0r = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
predicted_0r = np.array([ 0.23563765, 0.16685597, 0.13718058, 0.35905335, 0.18498365,
0.20730027, 0.14833803, 0.18841647, 0.01205882, 0.0101424 ,
0.10170538, 0.94552901, 0.72007506, 0.75186747, 0.85893269,
0.90517219, 0.97667347, 0.86346504, 0.72267683, 0.9130444 ,
0.8319242 , 0.9578879 , 0.89448939, 0.76379055])
# рискующий хороший алгоритм
actual_1r = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
predicted_1r = np.array([ 0.13832748, 0.0814398 , 0.16136633, 0.11766141, 0.31784942,
0.14886991, 0.22664977, 0.07735617, 0.07071879, 0.92146468,
0.87579938, 0.97561838, 0.75638872, 0.89900957, 0.93760969,
0.92708013, 0.82003675, 0.85833438, 0.67371118, 0.82115125,
0.87560984, 0.77832734, 0.7593189, 0.81615662, 0.11906964,
0.18857729])
many_scatters([actual_0, actual_1, actual_0r, actual_1r],
[predicted_0, predicted_1, predicted_0r, predicted_1r],
[0.5, 0.5, 0.5, 0.5],
["Perfect careful", "Typical careful", "Perfect risky", "Typical risky"],
(2, 2))
```
Также интервалы могут смещаться. Если алгоритм боится ошибок false positive, то он будет чаще делать предсказания, близкие к нулю.
Аналогично, чтобы избежать ошибок false negative, логично чаще предсказывать большие вероятности.
```
actual_10 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1.])
predicted_10 = np.array([ 0.29340574, 0.47340035, 0.1580356 , 0.29996772, 0.24115457, 0.16177793,
0.35552878, 0.18867804, 0.38141962, 0.20367392, 0.26418924, 0.16289102,
0.27774892, 0.32013135, 0.13453541, 0.39478755, 0.96625033, 0.47683139,
0.51221325, 0.48938235, 0.57092593, 0.21856972, 0.62773859, 0.90454639, 0.19406537,
0.32063043, 0.4545493 , 0.57574841, 0.55847795 ])
actual_11 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
predicted_11 = np.array([ 0.35929566, 0.61562123, 0.71974688, 0.24893298, 0.19056711, 0.89308488,
0.71155538, 0.00903258, 0.51950535, 0.72153302, 0.45936068, 0.20197229, 0.67092724,
0.81111343, 0.65359427, 0.70044585, 0.61983513, 0.84716577, 0.8512387 ,
0.86023125, 0.7659328 , 0.70362246, 0.70127618, 0.8578749 , 0.83641841,
0.62959491, 0.90445368])
many_scatters([actual_1, actual_10, actual_11], [predicted_1, predicted_10, predicted_11],
[0.5, 0.5, 0.5], ["Typical", "Avoids FP", "Avoids FN"], (1, 3))
```
Мы описали разные характеры векторов вероятностей. Далее мы будем смотреть, как метрики оценивают разные векторы предсказаний, поэтому обязательно выполните ячейки, создающие векторы для визуализации.
### Метрики, оценивающие бинарные векторы предсказаний
Есть две типичные ситуации, когда специалисты по машинному обучению начинают изучать характеристики метрик качества:
1. при участии в соревновании или решении прикладной задачи, когда вектор предсказаний оценивается по конкретной метрике, и нужно построить алгоритм, максимизирующий эту метрику.
1. на этапе формализации задачи машинного обучения, когда есть требования прикладной области, и нужно предложить математическую метрику, которая будет соответствовать этим требованиям.
Далее мы вкратце рассмотрим каждую метрику с этих двух позиций.
#### Precision и recall; accuracy
Для начала разберемся с метриками, оценивающие качество уже после бинаризации по порогу T, то есть сравнивающие два бинарных вектора: actual и predicted.
Две популярные метрики - precision и recall. Первая показывает, как часто алгоритм предсказывает класс 1 и оказывается правым, а вторая - как много объектов класса 1 алгоритм нашел.
Также рассмотрим самую простую и известную метрику - accuracy; она показывает долю правильных ответов.
Выясним преимущества и недостатки этих метрик, попробовав их на разных векторах вероятностей.
```
from sklearn.metrics import precision_score, recall_score, accuracy_score
T = 0.5
print("Алгоритмы, разные по качеству:")
for actual, predicted, descr in zip([actual_0, actual_1, actual_2],
[predicted_0 > T, predicted_1 > T, predicted_2 > T],
["Perfect:", "Typical:", "Awful:"]):
print(descr, "precision =", precision_score(actual, predicted), "recall =", \
recall_score(actual, predicted), ";",\
"accuracy =", accuracy_score(actual, predicted))
print()
print("Осторожный и рискующий алгоритмы:")
for actual, predicted, descr in zip([actual_1, actual_1r],
[predicted_1 > T, predicted_1r > T],
["Typical careful:", "Typical risky:"]):
print(descr, "precision =", precision_score(actual, predicted), "recall =", \
recall_score(actual, predicted), ";",\
"accuracy =", accuracy_score(actual, predicted))
print()
print("Разные склонности алгоритмов к ошибкам FP и FN:")
for actual, predicted, descr in zip([actual_10, actual_11],
[predicted_10 > T, predicted_11 > T],
["Avoids FP:", "Avoids FN:"]):
print(descr, "precision =", precision_score(actual, predicted), "recall =", \
recall_score(actual, predicted), ";",\
"accuracy =", accuracy_score(actual, predicted))
```
Все три метрики легко различают простые случаи хороших и плохих алгоритмов. Обратим внимание, что метрики имеют область значений [0, 1], и потому их легко интерпретировать.
Метрикам не важны величины вероятностей, им важно только то, сколько объектов неправильно зашли за установленную границу (в данном случае T = 0.5).
Метрика accuracy дает одинаковый вес ошибкам false positive и false negative, зато пара метрик precision и recall однозначно идентифицирует это различие. Собственно, их для того и используют, чтобы контролировать ошибки FP и FN.
Мы измерили три метрики, фиксировав порог T = 0.5, потому что для почти всех картинок он кажется оптимальным. Давайте посмотрим на последней (самой интересной для этих метрик) группе векторов, как меняются precision и recall при увеличении порога.
```
from sklearn.metrics import precision_recall_curve
precs = []
recs = []
threshs = []
labels = ["Typical", "Avoids FP", "Avoids FN"]
for actual, predicted in zip([actual_1, actual_10, actual_11],
[predicted_1, predicted_10, predicted_11]):
prec, rec, thresh = precision_recall_curve(actual, predicted)
precs.append(prec)
recs.append(rec)
threshs.append(thresh)
plt.figure(figsize=(15, 5))
for i in range(3):
ax = plt.subplot(1, 3, i+1)
plt.plot(threshs[i], precs[i][:-1], label="precision")
plt.plot(threshs[i], recs[i][:-1], label="recall")
plt.xlabel("threshold")
ax.set_title(labels[i])
plt.legend()
```
При увеличении порога мы делаем меньше ошибок FP и больше ошибок FN, поэтому одна из кривых растет, а вторая - падает. По такому графику можно подобрать оптимальное значение порога, при котором precision и recall будут приемлемы. Если такого порога не нашлось, нужно обучать другой алгоритм.
Оговоримся, что приемлемые значения precision и recall определяются предметной областью. Например, в задаче определения, болен ли пациент определенной болезнью (0 - здоров, 1 - болен), ошибок false negative стараются избегать, требуя recall около 0.9. Можно сказать человеку, что он болен, и при дальнейшей диагностике выявить ошибку; гораздо хуже пропустить наличие болезни.
__<font color="green" size=5>Programming assignment: problem 1. </font>__ Фиксируем порог T = 0.65; по графикам можно примерно узнать, чему равны метрики на трех выбранных парах векторов (actual, predicted). Вычислите точные precision и recall для этих трех пар векторов.
6 полученных чисел запишите в текстовый файл в таком порядке:
precision_1 recall_1 precision_10 recall_10 precision_11 recall_11
Цифры XXX после пробела соответствуют таким же цифрам в названиях переменных actual_XXX и predicted_XXX.
Передайте ответ в функцию write_answer_1. Полученный файл загрузите в форму.
```
############### Programming assignment: problem 1 ###############
T = 0.65
precision_1 = precision_score(actual_1, predicted_1 > T)
recall_1 = recall_score(actual_1, predicted_1 > T)
precision_10 = precision_score(actual_10, predicted_10 > T)
recall_10 = recall_score(actual_10, predicted_10 > T)
precision_11 = precision_score(actual_11, predicted_11 > T)
recall_11 = recall_score(actual_11, predicted_11 > T)
def write_answer_1(precision_1, recall_1, precision_10, recall_10, precision_11, recall_11):
answers = [precision_1, recall_1, precision_10, recall_10, precision_11, recall_11]
with open("pa_metrics_problem1.txt", "w") as fout:
fout.write(" ".join([str(num) for num in answers]))
write_answer_1(precision_1, recall_1, precision_10, recall_10, precision_11, recall_11)
!cat pa_metrics_problem1.txt
```
#### F1-score
Очевидный недостаток пары метрик precision-recall - в том, что их две: непонятно, как ранжировать алгоритмы. Чтобы этого избежать, используют F1-метрику, которая равна среднему гармоническому precision и recall.
F1-метрика будет равна 1, если и только если precision = 1 и recall = 1 (идеальный алгоритм).
(: Обмануть F1 сложно: если одна из величин маленькая, а другая близка к 1 (по графикам видно, что такое соотношение иногда легко получить), F1 будет далека от 1. F1-метрику сложно оптимизировать, потому что для этого нужно добиваться высокой полноты и точности одновременно.
Например, посчитаем F1 для того же набора векторов, для которого мы строили графики (мы помним, что там одна из кривых быстро выходит в единицу).
```
from sklearn.metrics import f1_score
T = 0.7
print("Разные склонности алгоритмов к ошибкам FP и FN:")
for actual, predicted, descr in zip([actual_1, actual_10, actual_11],
[predicted_1 > T, predicted_10 > T, predicted_11 > T],
["Typical:", "Avoids FP:", "Avoids FN:"]):
print(descr, "f1 =", f1_score(actual, predicted))
```
F1-метрика в двух последних случаях, когда одна из парных метрик равна 1, значительно меньше, чем в первом, сбалансированном случае.
__<font color="green" size=5>Programming assignment: problem 2. </font>__ На precision и recall влияют и характер вектора вероятностей, и установленный порог.
Для тех же пар (actual, predicted), что и в предыдущей задаче, найдите оптимальные пороги, максимизирующие F1_score. Будем рассматривать только пороги вида T = 0.1 * k, k - целое; соответственно, нужно найти три значения k. Если f1 максимизируется при нескольких значениях k, укажите наименьшее из них.
Запишите найденные числа k в следующем порядке:
k_1, k_10, k_11
Цифры XXX после пробела соответствуют таким же цифрам в названиях переменных actual_XXX и predicted_XXX.
Передайте ответ в функцию write_answer_2. Загрузите файл в форму.
Если вы запишите список из трех найденных k в том же порядке в переменную ks, то с помощью кода ниже можно визуализировать найденные пороги:
```
############### Programming assignment: problem 2 ###############
T = 0.1
ks = [5, 3, 6]
many_scatters([actual_1, actual_10, actual_11], [predicted_1, predicted_10, predicted_11],
np.array(ks)*0.1, ["Typical", "Avoids FP", "Avoids FN"], (1, 3))
def write_answer_2(k_1, k_10, k_11):
answers = [k_1, k_10, k_11]
with open("pa_metrics_problem2.txt", "w") as fout:
fout.write(" ".join([str(num) for num in answers]))
write_answer_2(ks[0], ks[1], ks[2])
```
### Метрики, оценивающие векторы вероятностей класса 1
Рассмотренные метрики удобно интерпретировать, но при их использовании мы не учитываем большую часть информации, полученной от алгоритма. В некоторых задачах вероятности нужны в чистом виде, например, если мы предсказываем, выиграет ли команда в футбольном матче, и величина вероятности влияет на размер ставки за эту команду. Даже если в конце концов мы все равно бинаризуем предсказание, хочется следить за характером вектора вероятности.
#### Log_loss
Log_loss вычисляет правдоподобие меток в actual с вероятностями из predicted, взятое с противоположным знаком:
$log\_loss(actual, predicted) = - \frac 1 n \sum_{i=1}^n (actual_i \cdot \log (predicted_i) + (1-actual_i) \cdot \log (1-predicted_i))$, $n$ - длина векторов.
Соответственно, эту метрику нужно минимизировать.
Вычислим ее на наших векторах:
```
from sklearn.metrics import log_loss
print("Алгоритмы, разные по качеству:")
for actual, predicted, descr in zip([actual_0, actual_1, actual_2],
[predicted_0, predicted_1, predicted_2],
["Perfect:", "Typical:", "Awful:"]):
print(descr, log_loss(actual, predicted))
print
print("Осторожный и рискующий алгоритмы:")
for actual, predicted, descr in zip([actual_0, actual_0r, actual_1, actual_1r],
[predicted_0, predicted_0r, predicted_1, predicted_1r],
["Ideal careful", "Ideal risky", "Typical careful:", "Typical risky:"]):
print(descr, log_loss(actual, predicted))
print()
print("Разные склонности алгоритмов к ошибкам FP и FN:")
for actual, predicted, descr in zip([actual_10, actual_11],
[predicted_10, predicted_11],
["Avoids FP:", "Avoids FN:"]):
print(descr, log_loss(actual, predicted))
```
Как и предыдущие метрики, log_loss хорошо различает идеальный, типичный и плохой случаи. Но обратите внимание, что интерпретировать величину достаточно сложно: метрика не достигает нуля никогда и не имеет верхней границы. Поэтому даже для идеального алгоритма, если смотреть только на одно значение log_loss, невозможно понять, что он идеальный.
Но зато эта метрика различает осторожный и рискующий алгоритмы. Как мы видели выше, в случаях Typical careful и Typical risky количество ошибок при бинаризации по T = 0.5 примерно одинаковое, в случаях Ideal ошибок вообще нет. Однако за неудачно угаданные классы в Typical рискующему алгоритму приходится платить большим увеличением log_loss, чем осторожному алгоритму. С другой стороны, за удачно угаданные классы рискованный идеальный алгоритм получает меньший log_loss, чем осторожный идеальный алгоритм.
Таким образом, log_loss чувствителен и к вероятностям, близким к 0 и 1, и к вероятностям, близким к 0.5.
Ошибки FP и FN обычный Log_loss различать не умеет.
Однако нетрудно сделать обобщение log_loss на случай, когда нужно больше штрафовать FP или FN: для этого достаточно добавить выпуклую (то есть неотрицательную и суммирующуюся к единице) комбинацию из двух коэффициентов к слагаемым правдоподобия. Например, давайте штрафовать false positive:
$weighted\_log\_loss(actual, predicted) = -\frac 1 n \sum_{i=1}^n (0.3\, \cdot actual_i \cdot \log (predicted_i) + 0.7\,\cdot (1-actual_i)\cdot \log (1-predicted_i))$
Если алгоритм неверно предсказывает большую вероятность первому классу, то есть объект на самом деле принадлежит классу 0, то первое слагаемое в скобках равно нулю, а второе учитывается с большим весом.
__<font color="green" size=5>Programming assignment: problem 3. </font>__ Напишите функцию, которая берет на вход векторы actual и predicted и возвращает модифицированный Log-Loss, вычисленный по формуле выше. Вычислите ее значение (обозначим его wll) на тех же векторах, на которых мы вычисляли обычный log_loss, и запишите в файл в следующем порядке:
wll_0 wll_1 wll_2 wll_0r wll_1r wll_10 wll_11
Цифры XXX после пробела соответствуют таким же цифрам в названиях переменных actual_XXX и predicted_XXX.
Передайте ответ в функцию write_answer3. Загрузите файл в форму.
```
############### Programming assignment: problem 3 ###############
def log_loss_2(actual, predicted, alpha=0.7):
return - (np.sum((1 - alpha)*actual*np.log(predicted)
+ alpha * (1 - actual) * np.log(1 - predicted)).astype('float64') / predicted.shape[0])
def write_answer_3(wll_0, wll_1, wll_2, wll_0r, wll_1r, wll_10, wll_11):
answers = [wll_0, wll_1, wll_2, wll_0r, wll_1r, wll_10, wll_11]
with open("pa_metrics_problem3.txt", "w") as fout:
fout.write(" ".join([str(num) for num in answers]))
print("Алгоритмы, разные по качеству:")
for actual, predicted, descr in zip([actual_0, actual_1, actual_2],
[predicted_0, predicted_1, predicted_2],
["Perfect:", "Typical:", "Awful:"]):
print(descr, log_loss_2(actual, predicted))
print
print("Осторожный и рискующий алгоритмы:")
for actual, predicted, descr in zip([actual_0r, actual_1r, actual_10, actual_11],
[predicted_0r, predicted_1r, predicted_10, predicted_11],
["Ideal careful", "Ideal risky", "Typical careful:", "Typical risky:"]):
print(descr, log_loss_2(actual, predicted))
wll_0 = log_loss_2(actual_0, predicted_0)
wll_1 = log_loss_2(actual_1, predicted_1)
wll_2 = log_loss_2(actual_2, predicted_2)
wll_0r = log_loss_2(actual_0r, predicted_0r)
wll_1r = log_loss_2(actual_1r, predicted_1r)
wll_10 = log_loss_2(actual_10, predicted_10)
wll_11 = log_loss_2(actual_11, predicted_11)
write_answer_3(wll_0, wll_1, wll_2, wll_0r, wll_1r, wll_10, wll_11)
```
Обратите внимание на разницу weighted_log_loss между случаями Avoids FP и Avoids FN.
#### ROC и AUC
При построении ROC-кривой (receiver operating characteristic) происходит варьирование порога бинаризации вектора вероятностей, и вычисляются величины, зависящие от числа ошибок FP и FN. Эти величины задаются так, чтобы в случае, когда существует порог для идеального разделения классов, ROC-кривая проходила через определенную точку - верхний левый угол квадрата [0, 1] x [0, 1]. Кроме того, она всегда проходит через левый нижний и правый верхний углы. Получается наглядная визуализация качества алгоритма. С целью охарактеризовать эту визуализацию численно, ввели понятие AUC - площадь под ROC-кривой.
Есть несложный и эффективный алгоритм, который за один проход по выборке вычисляет ROC-кривую и AUC, но мы не будем вдаваться в детали.
Построим ROC-кривые для наших задач:
```
from sklearn.metrics import roc_curve, roc_auc_score
plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
aucs = ""
for actual, predicted, descr in zip([actual_0, actual_1, actual_2],
[predicted_0, predicted_1, predicted_2],
["Perfect", "Typical", "Awful"]):
fpr, tpr, thr = roc_curve(actual, predicted)
plt.plot(fpr, tpr, label=descr)
aucs += descr + ":%3f"%roc_auc_score(actual, predicted) + " "
plt.xlabel("false positive rate")
plt.ylabel("true positive rate")
plt.legend(loc=4)
plt.axis([-0.1, 1.1, -0.1, 1.1])
plt.subplot(1, 3, 2)
for actual, predicted, descr in zip([actual_0, actual_0r, actual_1, actual_1r],
[predicted_0, predicted_0r, predicted_1, predicted_1r],
["Ideal careful", "Ideal Risky", "Typical careful", "Typical risky"]):
fpr, tpr, thr = roc_curve(actual, predicted)
aucs += descr + ":%3f"%roc_auc_score(actual, predicted) + " "
plt.plot(fpr, tpr, label=descr)
plt.xlabel("false positive rate")
plt.ylabel("true positive rate")
plt.legend(loc=4)
plt.axis([-0.1, 1.1, -0.1, 1.1])
plt.subplot(1, 3, 3)
for actual, predicted, descr in zip([actual_1, actual_10, actual_11],
[predicted_1, predicted_10, predicted_11],
["Typical", "Avoids FP", "Avoids FN"]):
fpr, tpr, thr = roc_curve(actual, predicted)
aucs += descr + ":%3f"%roc_auc_score(actual, predicted) + " "
plt.plot(fpr, tpr, label=descr)
plt.xlabel("false positive rate")
plt.ylabel("true positive rate")
plt.legend(loc=4)
plt.axis([-0.1, 1.1, -0.1, 1.1])
print(aucs)
```
Чем больше объектов в выборке, тем более гладкой выглядит кривая (хотя на самом деле она все равно ступенчатая).
Как и ожидалось, кривые всех идеальных алгоритмов проходят через левый верхний угол. На первом графике также показана типичная ROC-кривая (обычно на практике они не доходят до "идеального" угла).
AUC рискующего алгоритма значительном меньше, чем у осторожного, хотя осторожный и рискущий идеальные алгоритмы не различаются по ROC или AUC. Поэтому стремиться увеличить зазор между интервалами вероятностей классов смысла не имеет.
Наблюдается перекос кривой в случае, когда алгоритму свойственны ошибки FP или FN. Однако по величине AUC это отследить невозможно (кривые могут быть симметричны относительно диагонали (0, 1)-(1, 0)).
После того, как кривая построена, удобно выбирать порог бинаризации, в котором будет достигнут компромисс между FP или FN. Порог соответствует точке на кривой. Если мы хотим избежать ошибок FP, нужно выбирать точку на левой стороне квадрата (как можно выше), если FN - точку на верхней стороне квадрата (как можно левее). Все промежуточные точки будут соответствовать разным пропорциям FP и FN.
__<font color="green" size=5>Programming assignment: problem 4. </font>__ На каждой кривой найдите точку, которая ближе всего к левому верхнему углу (ближе в смысле обычного евклидова расстояния), этой точке соответствует некоторый порог бинаризации. Запишите в выходной файл пороги в следующем порядке:
T_0 T_1 T_2 T_0r T_1r T_10 T_11
Цифры XXX после пробела соответствуют таким же цифрам в названиях переменных actual_XXX и predicted_XXX.
Если порогов, минимизирующих расстояние, несколько, выберите __наибольший__.
Передайте ответ в функцию write_answer_4. Загрузите файл в форму.
Пояснение: функция roc_curve возвращает три значения: FPR (массив абсции точек ROC-кривой), TPR (массив ординат точек ROC-кривой) и thresholds (массив порогов, соответствующих точкам).
Рекомендуем отрисовывать найденную точку на графике с помощью функции plt.scatter.
```
############### Programming assignment: problem 4 ###############
def best_thr(actual, predicted):
fpr, tpr, thr = roc_curve(actual, predicted)
print(thr)
idx = np.argmin((np.ones(tpr.shape) - tpr) ** 2 + fpr ** 2)
return thr[idx]
def write_answer_4(T_0, T_1, T_2, T_0r, T_1r, T_10, T_11):
answers = [T_0, T_1, T_2, T_0r, T_1r, T_10, T_11]
with open("pa_metrics_problem4.txt", "w") as fout:
fout.write(" ".join([str(num) for num in answers]))
T_0 = best_thr(actual_0, predicted_0)
T_1 = best_thr(actual_1, predicted_1)
T_2 = best_thr(actual_2, predicted_2)
T_0r = best_thr(actual_0r, predicted_0r)
T_1r = best_thr(actual_1r, predicted_1r)
T_10 = best_thr(actual_10, predicted_10)
T_11 = best_thr(actual_11, predicted_11)
write_answer_4(T_0, T_1, T_2, T_0r, T_1r, T_10, T_11)
!cat pa_metrics_problem4.txt
```
Наподобие roc_curve, строят также precision-recall curve и ищут площадь под ней.
### Заключение
Мы рассмотрели несколько метрик бинарной классификации. Некоторые из них, например, log_loss, обобщаются на многоклассовый случай. Если метрику сложно обобщить в виде формулы, задачу многоклассовой классификации рассматривают как совокупность задач бинарной классификации и затем особыми способами усредняют метрику (например, micro и macro averaging).
На практике всегда полезно визуализировать векторы, которые выдает ваш алгоритм, чтобы понимать, какие он делает ошибки при разных порогах и как метрика реагирует на выдаваемые векторы предсказаний.
| github_jupyter |
#LOAD LIBRARIES
```
from google.colab import drive
drive.mount('/content/drive')
# Load Libraries
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import chardet
import multiprocessing
import random
from sklearn.preprocessing import scale
from sklearn.preprocessing import OneHotEncoder
from sklearn import preprocessing
import timeit
```
#DATASET
## Read Dataset
```
df = pd.read_csv('/content/drive/My Drive/Copia de final_severity_dataset.csv')
df
```
#Split in Train/Validación/Test
We will use the usually recommended ratios as an example:
Train: 70%.
Validation: 15%.
Test: 15%.
```
df.columns
X=pd.DataFrame()
y = df['State']
X = df.loc[:, df.columns != 'State']
X = X.loc[:, X.columns != 'Fumador_No']
X = X.loc[:, X.columns != 'Prob_lethality']
X = X.apply(pd.to_numeric)
X
perc_values = [0.7, 0.15, 0.15];
from sklearn.model_selection import train_test_split
X_train_rand, X_valtest_rand, y_train_rand, y_valtest_rand = train_test_split(X, y, test_size=perc_values[1] + perc_values[2], random_state=1);
X_val_rand, X_test_rand, y_val_rand, y_test_rand = train_test_split(X_valtest_rand, y_valtest_rand, test_size= perc_values[2] / (perc_values[1] + perc_values[2]), random_state=1)
print('Train data size = ' + str(X_train_rand.shape))
print('Train target size = ' + str(y_train_rand.shape))
print('Validation data size = ' + str(X_val_rand.shape))
print('Validation target size = ' + str(y_val_rand.shape))
print('Test data size = ' + str(X_test_rand.shape))
print('Test target size = ' + str(y_test_rand.shape))
```
#RANDOM FOREST
1) Import model.
In this case we have to go outside of scikit-learn to apply Random Forest Classifier model.
```
from sklearn.ensemble import RandomForestClassifier
```
2) Import metric
```
from sklearn.metrics import roc_auc_score as auc;
from sklearn.metrics import confusion_matrix as confusion_matrix;
from sklearn.metrics import accuracy_score as acc;
```
3) Define the method
```
model = RandomForestClassifier(n_estimators = 10, random_state = 1)
model
```
4) Call the fit method to train the model
```
start = timeit.default_timer()
model.fit(X_train_rand, np.array(y_train_rand))
stop = timeit.default_timer()
print('Time: ', stop - start)
```
5) Call the predict method to generate the predictions.
```
start = timeit.default_timer()
pred_train = model.predict(X_train_rand)
pred_val = model.predict(X_val_rand)
pred_test = model.predict(X_test_rand)
stop = timeit.default_timer()
print('Time: ', stop - start)
# Label test
lb = preprocessing.LabelBinarizer()
lb.fit(y_test_rand)
y_test_lb = lb.transform(y_test_rand)
val_lb = lb.transform(pred_test)
#Label train
lb.fit(y_train_rand)
y_train_lb = lb.transform(y_train_rand)
val_train_lb = lb.transform(pred_train)
#Label validation
lb.fit(y_val_rand)
y_val_lb = lb.transform(y_val_rand)
val_val_lb = lb.transform(pred_val)
```
6) Calculate metrics using the predictions obtained in the previous step.
```
auc_train = auc(y_train_lb, val_train_lb, average='macro');
auc_val = auc(y_val_lb, val_val_lb, average='macro');
auc_test = auc(y_test_lb, val_lb, average='macro');
results = pd.DataFrame()
results = results.append(pd.DataFrame(data={'model':['Random Forest (Default)'],'auc_train':[auc_train],'auc_val':[auc_val],'auc_test':[auc_test]}, columns=['model', 'auc_train','auc_val', 'auc_test']), ignore_index=True)
results
acc_train = acc(y_train_lb, val_train_lb);
acc_val = acc(y_val_lb, val_val_lb);
acc_test = acc(y_test_lb, val_lb);
results2 = pd.DataFrame()
results2 = results2.append(pd.DataFrame(data={'model':['Random Forest (Default)'],'acc_train':[acc_train],'acc_val':[acc_val],'acc_test':[acc_test]}, columns=['model', 'acc_train','acc_val', 'acc_test']), ignore_index=True)
results2
```
## Importance of features
```
importances = list(model.feature_importances_)
# tuplas de importancia y variable
feature_importances = [(feature, round(importance, 3)) for feature, importance in zip(X, importances)]
# Ordenamos las variables por importancia
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
[print(pair) for pair in feature_importances];
import seaborn as sns
r4=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]
variables = [0]*len(feature_importances)
pesos = [0]*len(feature_importances)
for x in range(len(feature_importances)):
variables[x], pesos[x] = feature_importances[x]
plt.figure(figsize=(25,10))
#Plot the data:
my_colors = sns.color_palette("Blues")
plt.bar(r4, pesos, width = 0.8, edgecolor='black',color = my_colors)
plt.xticks([r for r in range(len(r4))],variables, rotation=90,fontsize=15)
label = pesos
for i in range(len(r4)):
plt.text(x = r4[i]-0.2 , y = pesos[i]+0.005, s = label[i], size = 10)
```
| github_jupyter |
# `logictools` WaveDrom Tutorial
[WaveDrom](http://wavedrom.com) is a tool for rendering digital timing waveforms. The waveforms are defined in a simple textual format.
This notebook will show how to render digital waveforms using the pynq library.
The __`logictools`__ overlay uses the same format as WaveDrom to specify and generate real signals on the board.
A full tutorial of WaveDrom can be found [here](http://wavedrom.com/tutorial.html)
### Step 1: Import the `draw_wavedrom()` method from the pynq library
```
from pynq.lib.logictools.waveform import draw_wavedrom
```
A simple function to add wavedrom diagrams into a jupyter notebook. It utilizes the wavedrom java script library.
<font color="DodgerBlue">**Example usage:**</font>
```python
from pynq.lib.logictools.waveform import draw_wavedrom
clock = {'signal': [{'name': 'clk', 'wave': 'h....l...'}]}
draw_wavedrom(clock)
```
<font color="DodgerBlue">**Method:**</font>
```python
def draw_wavedrom(data, width=None):
# Note the optional argument width forces the width in pixels
```
### Step 2: Specify and render a waveform
```
from pynq.lib.logictools.waveform import draw_wavedrom
clock = {'signal': [{'name': 'clock_0', 'wave': 'hlhlhlhlhlhlhlhl'}],
'foot': {'tock': 1},
'head': {'text': 'Clock Signal'}}
draw_wavedrom(clock)
```
### Step 3: Adding more signals to the waveform
```
from pynq.lib.logictools.waveform import draw_wavedrom
pattern = {'signal': [{'name': 'clk', 'wave': 'hl' * 8},
{'name': 'clkn', 'wave': 'lh' * 8},
{'name': 'data0', 'wave': 'l.......h.......'},
{'name': 'data1', 'wave': 'h.l...h...l.....'}],
'foot': {'tock': 1},
'head': {'text': 'Pattern'}}
draw_wavedrom(pattern)
```
__Adding multiple wave groups and spaces__
```
from pynq.lib.logictools.waveform import draw_wavedrom
pattern_group = {'signal': [['Group1',
{'name': 'clk', 'wave': 'hl' * 8},
{'name': 'clkn', 'wave': 'lh' * 8},
{'name': 'data0', 'wave': 'l.......h.......'},
{'name': 'data1', 'wave': 'h.l...h...l.....'}],
{},
['Group2',
{'name': 'data2', 'wave': 'l...h..l.h......'},
{'name': 'data3', 'wave': 'l.h.' * 4}]],
'foot': {'tock': 1},
'head': {'text': 'Pattern'}}
draw_wavedrom(pattern_group)
```
# WaveDrom for real-time pattern generation and trace analysis
### The __`logictools`__ overlay uses WaveJSON format to specify and generate real signals on the board.

* As shown in the figure above, the Pattern Generator is an output-only block that specifies a sequence of logic values (patterns) which appear on the output pins of the ARDUINO interface. The logictools API for Pattern Generator accepts **WaveDrom** specification syntax with some enhancements.
* The Trace Analyzer is an input-only block that captures and records all the IO signals. These signals may be outputs driven by the generators or inputs to the PL that are driven by external circuits. The Trace Analyzer allows us to verify that the output signals we have specified from the generators are being applied correctly. It also allows us to debug and analyze the operation of the external interface.
* The signals generated or captured by both the blocks can be displayed in the notebook by populating the WaveJSON dictionary that we have seen in this notebook. Users can access this dictionary through the provided API to extend or modify the waveform with special annotations.
* we use a subset of the wave tokens that are allowed by WaveDrom to specify the waveforms for the Pattern Generator. However, users can call the `draw_waveform()` method on the dictionary populated by the Trace Analyzer to extend and modify the dictionary with annotations.
__In the example below, we are going to generate 3 signals on the Arduino interface pins D0, D1 and D2 using the Pattern Generator. Since all IOs are accessible to the Trace analyzer, we will capture the data on the pins as well. This operation will serve as an internal loopback. __
### Step 1: Download the `logictools` overlay and specify the pattern
The pattern to be generated is specified in the WaveJSON format. The Waveform class is used to display the specified waveform.
```
from pynq.lib.logictools import Waveform
from pynq.overlays.logictools import LogicToolsOverlay
from pynq.lib.logictools import PatternGenerator
logictools_olay = LogicToolsOverlay('logictools.bit')
loopback_test = {'signal': [
['stimulus',
{'name': 'output0', 'pin': 'D0', 'wave': 'lh' * 8},
{'name': 'output1', 'pin': 'D1', 'wave': 'l.h.' * 4},
{'name': 'output2', 'pin': 'D2', 'wave': 'l...h...' * 2}],
{},
['analysis',
{'name': 'input0', 'pin': 'D0'},
{'name': 'input1', 'pin': 'D1'},
{'name': 'input2', 'pin': 'D2'}]],
'foot': {'tock': 1},
'head': {'text': 'loopback_test'}}
waveform = Waveform(loopback_test)
waveform.display()
```
**Note:** Since there are no captured samples at this moment, the analysis group will be empty.
### Step 2: Run the pattern generator and trace the loopback signals.
This step populates the WaveJSON dict with the captured trace analyzer samples. The dict can now serve as an output that we can further modify. It is shown in the next step.
```
pattern_generator = logictools_olay.pattern_generator
pattern_generator.trace(num_analyzer_samples=16)
pattern_generator.setup(loopback_test,
stimulus_group_name='stimulus',
analysis_group_name='analysis')
pattern_generator.run()
pattern_generator.show_waveform()
```
### Step 3: View the output waveJSON dict.
```
import pprint
output_wavejson = pattern_generator.waveform.waveform_dict
pprint.pprint(output_wavejson)
```

### Step 4: Extending the output waveJSON dict with state annotation
```
state_list = ['S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7',
'S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7']
color_dict = {'white': '2', 'yellow': '3', 'orange': '4', 'blue': '5'}
output_wavejson['signal'].extend([{}, ['Annotation',
{'name': 'state',
'wave': color_dict['yellow'] * 8 +
color_dict['blue'] * 8,
'data': state_list}]])
```
__Note: __ The color_dict is a color code map as defined by WaveDrom
```
draw_wavedrom(output_wavejson)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/SMSinclair/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments/blob/master/Stephen_Sinclair_DS_Unit_1_Sprint_Challenge_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Data Science Unit 1 Sprint Challenge 4
## Exploring Data, Testing Hypotheses
In this sprint challenge you will look at a dataset of people being approved or rejected for credit.
https://archive.ics.uci.edu/ml/datasets/Credit+Approval
Data Set Information: This file concerns credit card applications. All attribute names and values have been changed to meaningless symbols to protect confidentiality of the data. This dataset is interesting because there is a good mix of attributes -- continuous, nominal with small numbers of values, and nominal with larger numbers of values. There are also a few missing values.
Attribute Information:
- A1: b, a.
- A2: continuous.
- A3: continuous.
- A4: u, y, l, t.
- A5: g, p, gg.
- A6: c, d, cc, i, j, k, m, r, q, w, x, e, aa, ff.
- A7: v, h, bb, j, n, z, dd, ff, o.
- A8: continuous.
- A9: t, f.
- A10: t, f.
- A11: continuous.
- A12: t, f.
- A13: g, p, s.
- A14: continuous.
- A15: continuous.
- A16: +,- (class attribute)
Yes, most of that doesn't mean anything. A16 (the class attribute) is the most interesting, as it separates the 307 approved cases from the 383 rejected cases. The remaining variables have been obfuscated for privacy - a challenge you may have to deal with in your data science career.
Sprint challenges are evaluated based on satisfactory completion of each part. It is suggested you work through it in order, getting each aspect reasonably working, before trying to deeply explore, iterate, or refine any given step. Once you get to the end, if you want to go back and improve things, go for it!
## Part 1 - Load and validate the data
- Load the data as a `pandas` data frame.
- Validate that it has the appropriate number of observations (you can check the raw file, and also read the dataset description from UCI).
- UCI says there should be missing data - check, and if necessary change the data so pandas recognizes it as na
- Make sure that the loaded features are of the types described above (continuous values should be treated as float), and correct as necessary
This is review, but skills that you'll use at the start of any data exploration. Further, you may have to do some investigation to figure out which file to load from - that is part of the puzzle.
```
# imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import style
from scipy.stats import ttest_1samp, ttest_ind, chisquare, chi2_contingency
from pandas.plotting import scatter_matrix
columns = ['A1','A2','A3','A4','A5','A6','A7','A8','A9','A10','A11','A12','A13',
'A14','A15','A16']
# Load the data as a pandas dataframe
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.data', header=None, names=columns)
df.head()
df.tail()
# Replace '?' with np.nan, change to class attribute to 0 or 1
df = df.replace({'?':np.nan, '-':0, '+':1})
df.head()
# Validate that it has the appropriate number of observations:
# crx.names specifies 690 instances, and 16 attributes(15 + a class attribute)
# (690,16) would be the appropriate number of observations
df.shape
df.describe()
df.describe(exclude='number')
# Check null values. They match those listed in crx.names:
# Missing Attribute Values:
# 37 cases (5%) have one or more missing values. The missing
# values from particular attributes are:
# A1: 12
# A2: 12
# A4: 6
# A5: 6
# A6: 9
# A7: 9
# A14: 13
df.isnull().sum()
# Make sure that the loaded features are of the types described above
# (continuous values should be treated as float), and correct as necessary
# Attribute Information:
# A1: b, a.
# A2: continuous.
# A3: continuous.
# A4: u, y, l, t.
# A5: g, p, gg.
# A6: c, d, cc, i, j, k, m, r, q, w, x, e, aa, ff.
# A7: v, h, bb, j, n, z, dd, ff, o.
# A8: continuous.
# A9: t, f.
# A10: t, f.
# A11: continuous.
# A12: t, f.
# A13: g, p, s.
# A14: continuous.
# A15: continuous.
# A16: +,- (class attribute)
# A2 is object and should be float
# A2 has NaN values which are causing this
# A11 is int64 and should be float
# A14 is object and should be float
# A14 has NaN values that are causing this
# A15 is int64 and shoudl be float
df.dtypes
# Do the ints first
df['A11'] = df['A11'].astype('float64')
df['A15'] = df['A15'].astype('float64')
# A11 and A15 are type float64
df.dtypes
# Now the objects
df['A2'] = df['A2'].astype('float64')
df['A14'] = df['A14'].astype('float64')
# All the continuous variables are type float64
df.dtypes
```
## Part 2 - Exploring data, Testing hypotheses
The only thing we really know about this data is that A16 is the class label. Besides that, we have 6 continuous (float) features and 9 categorical features.
Explore the data: you can use whatever approach (tables, utility functions, visualizations) to get an impression of the distributions and relationships of the variables. In general, your goal is to understand how the features are different when grouped by the two class labels (`+` and `-`).
For the 6 continuous features, how are they different when split between the two class labels? Choose two features to run t-tests (again split by class label) - specifically, select one feature that is *extremely* different between the classes, and another feature that is notably less different (though perhaps still "statistically significantly" different). You may have to explore more than two features to do this.
For the categorical features, explore by creating "cross tabs" (aka [contingency tables](https://en.wikipedia.org/wiki/Contingency_table)) between them and the class label, and apply the Chi-squared test to them. [pandas.crosstab](http://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.crosstab.html) can create contingency tables, and [scipy.stats.chi2_contingency](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2_contingency.html) can calculate the Chi-squared statistic for them.
There are 9 categorical features - as with the t-test, try to find one where the Chi-squared test returns an extreme result (rejecting the null that the data are independent), and one where it is less extreme.
**NOTE** - "less extreme" just means smaller test statistic/larger p-value. Even the least extreme differences may be strongly statistically significant.
Your *main* goal is the hypothesis tests, so don't spend too much time on the exploration/visualization piece. That is just a means to an end - use simple visualizations, such as boxplots or a scatter matrix (both built in to pandas), to get a feel for the overall distribution of the variables.
This is challenging, so manage your time and aim for a baseline of at least running two t-tests and two Chi-squared tests before polishing. And don't forget to answer the questions in part 3, even if your results in this part aren't what you want them to be.
```
accept = df[df['A16']==1]
reject = df[df['A16']==0]
scatter_matrix(df, alpha=0.2, figsize=(12,12))
boxplot = df.boxplot('A2', by='A16', figsize =(12,8))
boxplot
boxplot = df.boxplot('A3', by='A16', figsize =(12,8))
boxplot
boxplot = df.boxplot('A8', by='A16', figsize =(12,8))
boxplot
boxplot = df.boxplot('A11', by='A16', figsize =(12,8))
boxplot
boxplot = df.boxplot('A14', by='A16', figsize =(12,8))
boxplot
boxplot = df.boxplot('A15', by='A16', figsize =(12,8))
boxplot
# Check means on continuous features split by class label
pivot = df.pivot_table(df, index = 'A16')
# A15 looks extremely different between the class labels
ttest_ind(df[df['A16']==0]['A15'], df[df['A16']==1]['A15'])
# And the t-test confirms we can reject:
# H0: the difference between the means for A15 split by class label is 0.
statistic, pvalue = ttest_ind(df[df['A16']==0]['A15'], df[df['A16']==1]['A15'])
pvalue < .01
# A2 seems notably less different between the class labels
ttest_ind(df[df['A16']==0]['A2'], df[df['A16']==1]['A2'], nan_policy='omit')
# Nontheless, the t-test shows we can reject:
# H0: the difference between the means for A2 split by class label is 0.
statistic, pvalue = ttest_ind(df[df['A16']==0]['A15'], df[df['A16']==1]['A15'])
pvalue < .01
# For loop to print t-test results for all the numeric features
# It turns our A14 is the "least extreme" but the difference is still
# statistically significant
for col in pivot.columns:
stat, p = ttest_ind(df[df['A16']==0][col], df[df['A16']==1][col], nan_policy='omit')
print(f"Feature: {col}")
print(f"Statistic: {stat}")
print(f"P-value: {p}")
print("H0 rejected (95% confidence): " + str(p<.05))
print('---'*10)
# For the categorical features, explore by creating "cross tabs" (aka
# contingency tables) between them and the class label
contingencyA1 = pd.crosstab(df['A16'], df['A1'])
contingencyA1
# chi2_contingency returns:
# chi2: float
# p: float
# dof: int
# expected: ndarray, same shape as observed
chi2_contingency(contingencyA1)
chi2, p, dof, expected = chi2_contingency(contingencyA1)
p < .05
contingencyA4 = pd.crosstab(df['A16'], df['A4'])
contingencyA4
contingencyA5 = pd.crosstab(df['A16'], df['A5'])
contingencyA5
contingencyA6 = pd.crosstab(df['A16'], df['A6'])
contingencyA6
contingencyA7 = pd.crosstab(df['A16'], df['A7'])
contingencyA7
contingencyA9 = pd.crosstab(df['A16'], df['A9'])
contingencyA9
contingencyA10 = pd.crosstab(df['A16'], df['A10'])
contingencyA10
contingencyA12 = pd.crosstab(df['A16'], df['A12'])
contingencyA12
contingencyA13 = pd.crosstab(df['A16'], df['A13'])
contingencyA13
contingencyA13.columns.name
clist = [contingencyA1, contingencyA4, contingencyA5, contingencyA6,
contingencyA7, contingencyA9, contingencyA10, contingencyA12,
contingencyA13,]
# Take list of contingency tables for each categorical feature and print
# returned values from chi2_contingency
for cont in clist:
chi2, p, dof, expected = chi2_contingency(cont)
print(f"Feature: {cont.columns.name}")
print(f"Chi-Squared: {chi2}")
print(f"P-value: {p}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
print("H0 rejected (95% confidence): " + str(p<.05))
print('---'*10)
# Only in A1 and A12 were we unable to reject the null hypothesis.
# The most extreme difference between the observed and expected values was in
# A9.
```
## Part 3 - Analysis and Interpretation
Now that you've looked at the data, answer the following questions:
- Interpret and explain the two t-tests you ran - what do they tell you about the relationships between the continuous features you selected and the class labels?
- Interpret and explain the two Chi-squared tests you ran - what do they tell you about the relationships between the categorical features you selected and the class labels?
- What was the most challenging part of this sprint challenge?
Answer with text, but feel free to intersperse example code/results or refer to it from earlier.
##Interpret and explain the two t-tests you ran - what do they tell you about the relationships between the continuous features you selected and the class labels?
```
# I ran t-tests on all six continuous values, but will comment on the the tests
# for feature A11 and feature A14
for col in pivot.columns:
stat, p = ttest_ind(df[df['A16']==0][col], df[df['A16']==1][col], nan_policy='omit')
print(f"Feature: {col}")
print(f"Statistic: {stat}")
print(f"P-value: {p}")
print("H0 rejected (95% confidence): " + str(p<.05))
print('---'*10)
```
###Feature A11
* Null hypothesis: the difference between the mean of the values in A11 with class label 0 and the mean of those with class label 1 is equal to zero
* The p-value is incredibly small (7.957718568079967e-29), so we can confidently reject the null hypothesis. It is very unlikely that the differences in values was the result of chance.
* This tells us that the mean of the values for A11 are different for class label 0 and class label 1
###Feature A14
* Null hypothesis: the difference between the mean of the values in A14 with class label 0 and the mean of those with class label 1 is equal to zero
* The p-value is not as small as the one for the previous t-test, however it is still small enough to reject the null hypothesis at the 0.01 level
* This tells us that while the difference between the means of the values (grouped by class label) in A14 appears less extreme than the difference in A11, it is still very unlikely that the difference is merely due to chance.
##Interpret and explain the two Chi-squared tests you ran - what do they tell you about the relationships between the categorical features you selected and the class labels?
```
# I ran Chi-squared tests for all categorical variables, but will analyze A1 and
# A9.
for cont in clist:
chi2, p, dof, expected = chi2_contingency(cont)
print(f"Feature: {cont.columns.name}")
print(f"Chi-Squared: {chi2}")
print(f"P-value: {p}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
print("H0 rejected (95% confidence): " + str(p<.05))
print('---'*10)
```
### Chi-Square Test for Independence for A16 and A1
* Here we have two categorical variables and are trying to figure out if there is a significant association between them.
* Null Hypothesis: A16 and A1 are independent.
* After running the Chi-Squared test we get a p-value of 0.5768937883001118. This is much higher than the traditional 0.05 level, so we cannot reject the null hypothesis. Our test has provided no reason to doubt that A16 and A1 are independent.
* This result is reflected in the values below. The expected values and the observed values are incredibly close, which lends some intuitive credence to the null hypothesis
```
A1 expected
[[115.84070796 258.15929204]
[94.15929204 209.84070796]]
A1 observed
[[112 262]
[98 206]]
```
###Chi-Square Test for Independence for A16 and A9
* Null Hypothesis: A16 and A9 are independent.
* After running a Chi-Squared test we get a p-value of 3.1185900878457007e-79, an very small p-value that is well below the 0.01 level. Therefore, we can confidently reject the null hypothesis. It is incredibly unlikely that A16 and A9 are independent.
* If you look below, you can see an obvious difference between the expected and observed values for A9.
* Since we concluded that the two categorical variables are not independent, we can also conclude that there is some kind of statistical relationship between them.
```
A9 Expected
[[182.61884058 200.38115942]
[146.38115942 160.61884058]]
A9 Observed
[[306 77]
[23 284]]
```
##What was the most challenging part of this sprint challenge?
* At first I thought the most challenging part was parsing the text from the prompt for part 2 and confirming to myself that I was correct about what the minimum viable product for that part was the most difficult. Most of the coding went smoothly.
* What turned out to be the most challenging part was switching gears between parts 2 and 3. I assumed part 2 was the most difficult, and when I finished it I thought I could easily just type out a few results to adequately answer part 3. But when I started working on it I realized that doing it well it would take a fair amount of effort and require a different type of thinking that still demanded precision. So I had to stay focused even though I was a bit tired from the rest of the challenge.
| github_jupyter |
```
# selenium for web driving
import selenium
from selenium import webdriver
# time for pausing between navigation
import time
# Datetime for recording time of submission
import datetime
# os for file management
import os
# Build tuple of files to turn in
submission_dir = 'C:/Users/Will Koehrsen/Desktop/completed_assignments'
dir_list = list(os.listdir(submission_dir))
for directory in dir_list:
file_list = list(os.listdir(os.path.join(submission_dir, directory)))
if len(file_list) != 0:
file_tup = (directory, file_list[0])
print(file_tup)
# Using Chrome to access web
driver = webdriver.Chrome()
# Open the website
# driver.get('https://canvas.case.edu')
driver.get('https://worlddata.ai/FilterAttributes/apidata?s=WIVKPLAMQXBK')
# Password for Canvas
with open('C:/Users/Will Koehrsen/Desktop/cp.txt', 'r') as f:
cp = f.read()
# Locate id and password
id_box = driver.find_element_by_name('username')
pass_box = driver.find_element_by_name('password')
# Send information
id_box.send_keys('wjk68')
pass_box.send_keys(cp)
# Click login
login_button = driver.find_element_by_name('submit')
login_button.click()
# Find and click on list of courses
courses_button = driver.find_element_by_id('global_nav_courses_link')
courses_button.click()
# Wait for the page to load
time.sleep(2)
# Get the name of the folder
folder = file_tup[0]
# Class to select depends on folder
if folder == 'DSCI451':
class_select = driver.find_element_by_link_text('Applied Data Science Research (100/5047)')
elif folder == 'DCSI453':
class_select = driver.find_element_by_link_text('Data Science: Statistical Learning, Modeling and Prediction (100/5046)')
elif folder == 'EECS491':
class_select = driver.find_element_by_link_text('Artificial Intelligence: Probabilistic Graphical Models (100/10039)')
elif folder == 'EECS531':
class_select = driver.find_element_by_link_text('Computer Vision (100/10040)')
# Click on the specific class
class_select.click()
assignment_button = driver.find_element_by_link_text('Assignments')
assignment_button.click()
# Wait for the page to load
time.sleep(2)
# Locate the specific assignment
file_name = file_tup[1]
file_locator = file_name.split('.')[0]
specific_assigment = driver.find_element_by_link_text(file_locator)
specific_assigment.click()
# Click on the button to submit an assignment
try:
submit_assignment_button = driver.find_element_by_link_text('Submit Assignment')
except:
print('Assignment already submitted, re-submitting')
submit_assignment_button = driver.find_element_by_link_text('Re-submit Assignment')
submit_assignment_button.click()
# Wait for the page to load
time.sleep(2)
# Choose file button
choose_file = driver.find_element_by_name('attachments[0][uploaded_data]')
# Send the name of the file to the button
file_location = os.path.join(submission_dir, folder, file_name)
choose_file.send_keys(file_location)
# Wait for the page
time.sleep(2)
# Move the file to the submitted folder
submitted_dir = 'C:/Users/Will Koehrsen/Desktop/submitted_assignments'
submitted_dir = os.path.join(submitted_dir, folder)
submitted_file_name = 'Submitted ' + file_name
submitted_file_location = os.path.join(submitted_dir, submitted_file_name)
os.rename(file_location, submitted_file_location)
print('{} Assignment for Class {} successfully submitted at {}.'.format(file_name,
folder, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('Submitted assignment available at {}.'.format(submitted_file_location))
submit_assignment = driver.find_element_by_id('submit_file_button')
submit_assignent.click()
```
| github_jupyter |
# Математическая оптимизация
Многие математические модели задач сводятся к нахождению максимумов или минимумов. Например, в экономических задачах необходимо минимизировать затраты и максимизировать прибыль. Большинство задач машинного обучения также сводятся к нахождению минимума или максимума. Чаще всего задача формулируется как нахождение таких параметров модели, при которых разница между фактическими и предсказаннами значениями данных должна быть минимальной.
Например, мы хотим создать модель, которая должна предсказывать цену квартиры по её характеристикам. При этом необходимо минимизировать разницу между фактическими и предсказанными ценами. Или если модель должна предсказать вероятность какого либо заболевания по данным симптомам, то нужно максимизировать вероятность предсказания болезни у реальных больных и минимизровать вероятность у здоровых людей.
Задачи, в которых необходимо найти минимум или максимум называются задачами оптимизации. Как только задача сформулирована в виде задачи оптимизации, то появляется возможность решить её готовым набором методов.
Если у нас есть функция $f(x)$, то значения $x$, в которых эта функция достигает минимума или максимума называется точкой экстремума. Например, парабола $f(x)=x^2$ достигает своего минимума в точке $x=0$.
```
%matplotlib inline
%run code/math_examples.py
draw_parabola()
```
Часто на практике минимум нужно найти для многомерной функции. Например, пароболоид описывается функцией $f(x, y) = x^2 + y^2$ и достигает своего минимума в точке $x=0, y=0$
```
draw_paraboloid()
```
Однако найти точку экстремума для произвольной функции крайне сложно, так как у функции может быть множество локальных минимумов или максимумов. Например, ниже приведен график функции, у которой несколько локальных точек экстремума.
```
draw_mishra_bird()
```
У некоторых функций вообще может не быть ни максимумов ни минимумов. Ниже приведен график такой функции
```
draw_hyperbolic_paraboloid()
```
Так как в общем виде для произвольной функции решить задачу оптимизации крайне сложно, то на практике задачи оптимизации сводятся к таким функциям, для которых относительно легко можно решить эту задачу.
Одним из способов нахождения экстремумов является нахождение таких точек функции, при которых производная этой функции равна нулю: $f'(x) = 0$. Интуитивно это можно объяснить тем, что функция, если у нее существует производная, ведет себя как касательная для небольшого интервала. Если касательная в заданной точке имеет острый угол с осью x, то это значит, что функция растет вблизи этой точки. Если касательная имеет тупой угол с осью x, то это значит, что функция убывает вблизи этой точки. Для простых функций можно напрямую решить уравнение $f'(x) = 0$ и найти точку экстремума в аналитическом виде (т.е. в виде формулы). Например, для параболы производная равна $f'(x) = 2x$ и составив уравнение $2x=0$ можно найти, что $x=0$. Для более сложных функций, можно следовать направлению касательной и найти ближайжую точку экстремума. Как было сказано выше, у функции может быть несколько экстремумов и в таком случае необходимо найти точку, где функция достигает наименьшего или наибольшего значения среди всех локальных экстремумов. Такая точка называется глобальной точкой экстремума.
Ниже приведен пример ломаной линии, которая апроксимирует параболу. В точке $x=0$, где линия стала горизонтальной, функция достигает наименьшего значения.
```
draw_parabola(8)
```
В случае трехмерного пространства вместо касательной линии используется касательная плоскость. Для небольшого интервала функцию можно апроксимировать этой касательной плоскостью.
Ниже приведен пример апроксимации параболоида касательными плоскостями. В точке $x=0, y=0$, где плоскость стала горизонтальной, функция достигает наименьшего значения.
```
draw_paraboloid(8)
```
| github_jupyter |
# Design of Experiments
Prepared by Ric Alindayu (Chromewell Innovative Solutions, Inc.)
The Design of Experiments methodology is one of the methods used for optimizing experiments given a limited amount of resources. It has found its niche in the manufacturing industry because of increased costs from upscaled production.
To make the discussion simpler, we shall be using the pyDOE2 package in Python for doing simple calculations.
### Download the pyDOE2 package to your Jupyter notebook
Install the package to the notebook using the code below.
```
pip install pyDOE2
```
Next, import the following modules since we will be needing them in our exercise.
```
import pyDOE2 as doe # this gives us design matrices
import pandas as pd # it functions like Excel
```
### Problem: Two-Level Full Factorial Design
A $2^{3}$factorial design was used to develop a nitride etch process on a single-wafer plasma etching tool. The design factors are the gap between the electrodes, the gas flow ($C_{2}F_{6}$ is used as the reactant gas), and the RF power applied to the cathode. Each factor is run at two levels, and the design is replicated twice.
The response variable is the etch rate for silicon nitride (Å∕m).
The factor levels are provided below:
<img src="factortable.png">
#### Question 1: How many runs are required for a full factorial design of experiments with two levels and three factors?
We can use the pyDOE2 module to build a design matrix for us.
```
dmat = doe.ff2n(3)
print(dmat)
```
Let's make the array more friendly-looking.
```
# Creating the design matrix for a 2-level, n-factor design
def des_mat(n,fact):
design_matrix_raw = doe.ff2n(n)
design_matrix = pd.DataFrame(design_matrix_raw)
design_matrix.columns=fact
row_matrix = []
run_matrix = []
lowercaps = []
# generate lowercase letters
for i in range(len(design_matrix.columns)):
lowercaps.append(design_matrix.columns[i].lower())
# naming rows
for i in design_matrix.index:
for j in range(len(design_matrix.columns)):
if design_matrix.iloc[i,j] == 1.0:
row_matrix.append(i)
run_matrix.append(lowercaps[j])
data = {'row': row_matrix, 'run': run_matrix}
df = pd.DataFrame(data)
df = df.groupby(['row'])['run'].apply(''.join).reset_index() #combines the names according to rows
df=df.append({'row':0,'run':'(1)'},ignore_index=True)
df=df.sort_values('row').reset_index(drop=True)
return design_matrix.set_index([pd.Index(df['run'])])
dm = des_mat(3,['A','B','C'])
pd.DataFrame(dm)
```
Let's append the results of the two replicated experiments to the design matrix you generated.
```
run = list(dm.index)
raw_results = pd.DataFrame([[run[0],550,604],[run[1],669,650],[run[2],633,601],[run[3],642,635],[run[4],1037,1052],[run[5],749,868],[run[6],1075,1063],[run[7],729,860]],columns=['run','Replicate 1','Replicate 2'])
raw_results.set_index('run')
dm_merged = pd.merge(dm, raw_results, on='run')
pd.DataFrame(dm_merged)
```
Next, we shall be getting the average of the two replicates, and getting the levels of the two-factor and three factor interactions.
```
total_results = dm_merged["Replicate 1"] + dm_merged["Replicate 2"]
dm_merged["Total Sum"] = total_results
dm_merged["Average"] = total_results/2
dm_merged["AB"] = dm_merged["A"]*dm_merged["B"]
dm_merged["AC"] = dm_merged["A"]*dm_merged["C"]
dm_merged["BC"] = dm_merged["B"]*dm_merged["C"]
dm_merged["ABC"] = dm_merged["A"]*dm_merged["B"]*dm_merged["C"]
pd.DataFrame(dm_merged)
```
Next, let's see the effects of the main factors and their interactions.
```
# effect of factors
effects = ['A','B','C','AB','AC','BC','ABC']
effects_num = []
sum_of_squares = []
for i in range(len(effects)):
upper_level = []
lower_level = []
for j in dm_merged.index:
if dm_merged[effects[i]][j] == 1:
upper_level.append(dm_merged["Average"][j])
elif dm_merged[effects[i]][j] == -1:
lower_level.append(dm_merged["Average"][j])
effects_num.append((sum(upper_level) - sum(lower_level))/4)
sum_of_squares.append(((sum(upper_level) - sum(lower_level))*2)**2/16)
answer = {'Factor': effects, 'Effect Estimate': effects_num, 'Sum of Squares': sum_of_squares}
pd.DataFrame(answer)
```
You still have to calculate for the error sum of squares, but for simplicity, the effects of each main factor and interaction can be see.
#### Question 2: Which factors are directly proportional to the response?
#### Question 3: Which factors are indirectly proportional to the response?
#### Question 4: Which factors highly affect the etch rate of the plasma?
```
doe.ff2n(4)
```
| github_jupyter |
---
# A simple regression example using parametric and non-parametric methods
---
This is a simple example where we use two regression methods to enhance the overall data distribution from a dataset.
1. A Linear fit (<i>parametric</i> method).
2. The LOWESS method method (<i>non-parametric</i> method) that is often used in exploratory data analysis to reveal trends in the data.
LOWESS stands for Locally Weighted Scatterplot Smoothing.
We use a dataset that contains information about used-car resaling prices and their physical characteristics, such as engine power.
```
print(__doc__)
# Author: Pierre Gravel <pierre.gravel@iid.ulaval.ca>
# License: BSD
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# Hides warning messages
import warnings
warnings.filterwarnings("ignore")
```
Extract and clean the data:
```
# Get the headers for this dataset
cols = ["symboling", "normalized_losses", "make", "fuel_type", "aspiration",
"num_doors", "body_style", "drive_wheels", "engine_location",
"wheel_base", "length", "width", "height", "curb_weight", "engine_type",
"num_cylinders", "engine_size", "fuel_system", "bore", "stroke",
"compression_ratio", "horsepower", "peak_rpm", "city_mpg", "highway_mpg",
"price"]
# Extract the data
cars = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/autos//imports-85.data", names=cols)
# Replace unknowns by NaNs
cars = cars.replace('?', np.nan)
# Now let's make things numeric
num_vars = ['normalized_losses', "bore", "stroke", "horsepower", "peak_rpm", "price"]
for i in num_vars:
cars[i] = cars[i].astype('float64')
# Delete car data for which price and MPG is unavailable
cars = cars.dropna(subset = ['price', 'city_mpg'])
# Display the factors and the response (car price)
cars.head()
```
## Parametric method
The following figure shows a <i>robust linear</i> fit between the selling price and the cars mileage in cities.
The various body styles are indicated.
```
fig = plt.figure(figsize=(8, 8))
ax = sns.lmplot(x="city_mpg", y="price", data=cars, robust=True, height=5, aspect=1.8);
sns.scatterplot(x="city_mpg", y="price", hue="body_style", data=cars);
ax.set(xlabel='City Milage (miles/gallon)', ylabel='Selling Price ($US)')
ax.set(ylim=(0, 50000))
plt.savefig('Parametric fit (Linear fit) all cars.pdf')
plt.show()
```
The above linear fit does not look very good; it is usually better to do linear regressions with objects in the same
class, in this case with cars with the same body style. This is shown in the next figure for wagon vehicles.
The fit is better as the data points are uniformly spread along the line.
```
wagon = cars.loc[cars['body_style'] == 'wagon']
fig = plt.figure(figsize=(8, 8))
ax = sns.lmplot(x="city_mpg", y="price", data=wagon, robust=True, height=5, aspect=1.8);
sns.scatterplot(x="city_mpg", y="price", hue="body_style", data=wagon);
ax.set(xlabel='City Milage (miles/gallon)', ylabel='Selling Price ($US)')
ax.set(ylim=(0, 30000))
plt.savefig('Parametric fit (Linear fit) wagon cars.pdf')
plt.show()
```
## Non-Parametric method
The following figure shows a <i>robust non-linear</i> fit through the data. This is more a signal smoothing that an actual fit
since no fit parameter is extracted from the analysis. At each point, a linear fit is made using only the data in a
neighbourhood of that point.
This example shows a well-known fact for buyers on the used-car market; the cars with the largest mileage are less desirable,
and are sold at lower prices.
```
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
x = cars["city_mpg"].array
y = cars["price"].array
# Non-parametric fit
xy = lowess(y, x, frac=0.33, is_sorted=False, return_sorted=True)
fig = plt.figure(figsize=(8, 8))
ax = sns.scatterplot(x="city_mpg", y="price", hue="body_style", data=cars);
ax.plot(xy[:,0],xy[:,1],color='blue')
ax.set_ylim([y.min(), y.max()])
ax.set(xlabel='City Milage (miles/gallon)', ylabel='Selling Price ($US)')
ax.set(ylim=(0, 50000))
plt.savefig('Non-parametric fit (LOWESS) all cars.pdf')
plt.show()
```
| github_jupyter |
```
from mxnet import nd
from mxnet.gluon import nn
def conv_block(channels):
out = nn.Sequential()
out.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(channels, kernel_size=3, padding=1)
)
return out
class DenseBlock(nn.Block):
def __init__(self, layers, growth_rate, **kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.net = nn.Sequential()
for i in range(layers):
self.net.add(conv_block(growth_rate))
def forward(self, x):
for layer in self.net:
out = layer(x)
x = nd.concat(x, out, dim=1)
return x
dblk = DenseBlock(2, 10)
dblk.initialize()
x = nd.random.uniform(shape=(4,3,8,8))
dblk(x).shape
def transition_block(channels):
out = nn.Sequential()
out.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(channels, kernel_size=1),
nn.AvgPool2D(pool_size=2, strides=2)
)
return out
tblk = transition_block(10)
tblk.initialize()
tblk(x).shape
init_channels = 64
growth_rate = 32
block_layers = [6, 12, 24, 16]
num_classes = 10
def dense_net():
net = nn.Sequential()
# add name_scope on the outermost Sequential
with net.name_scope():
# first block
net.add(
nn.Conv2D(init_channels, kernel_size=7,
strides=2, padding=3),
nn.BatchNorm(),
nn.Activation('relu'),
nn.MaxPool2D(pool_size=3, strides=2, padding=1)
)
# dense blocks
channels = init_channels
for i, layers in enumerate(block_layers):
net.add(DenseBlock(layers, growth_rate))
channels += layers * growth_rate
if i != len(block_layers)-1:
net.add(transition_block(channels//2))
# last block
net.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.AvgPool2D(pool_size=1),
nn.Flatten(),
nn.Dense(num_classes)
)
return net
import sys
sys.path.append('..')
import gluonbook as gb
from mxnet import gluon
from mxnet import init
train_data, test_data = gb.load_data_fashion_mnist(
batch_size=64, resize=32)
ctx = gb.try_gpu()
net = dense_net()
net.initialize(ctx=ctx, init=init.Xavier())
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': 0.1})
gb.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=1)
```
## 小结
* Desnet通过将ResNet里的`+`替换成`concat`从而获得更稠密的连接。
## 练习
- DesNet论文中提交的一个优点是其模型参数比ResNet更小,想想为什么?
- DesNet被人诟病的一个问题是内存消耗过多。真的会这样吗?可以把输入换成$224\times 224$(需要改最后的`AvgPool2D`大小),来看看实际(GPU)内存消耗。
- 这里的FashionMNIST有必要用100+层的网络吗?尝试将其改简单看看效果。
| github_jupyter |
<a href="https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_inference_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
<img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
# UNet Industrial Inference Demo with TensorFlow Hub
## Overview
In this notebook, we will demo the process of inference with NVIDIA pre-trained UNet Industrial defects detection TensorFlow Hub modules.
NVIDIA pre-trained U-Net models for defect detection are adapted from the original version of the [U-Net model](https://arxiv.org/abs/1505.04597) which is
a convolutional auto-encoder for 2D image segmentation. U-Net was first introduced by
Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper:
[U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597).
### Requirement
1. Before running this notebook, please set the Colab runtime environment to GPU via the menu *Runtime => Change runtime type => GPU*.
```
!nvidia-smi
```
The below code checks whether a Tensor-Core GPU is present. Tensor Cores can accelerate large matrix operations by performing mixed-precision matrix multiply and accumulate calculations in a single operation.
```
%tensorflow_version 1.x
import tensorflow as tf
print(tf.__version__) # This notebook runs on TensorFlow 1.x.
from tensorflow.python.client import device_lib
def check_tensor_core_gpu_present():
local_device_protos = device_lib.list_local_devices()
for line in local_device_protos:
if "compute capability" in str(line):
compute_capability = float(line.physical_device_desc.split("compute capability: ")[-1])
if compute_capability>=7.0:
return True
print("Tensor Core GPU Present:", check_tensor_core_gpu_present())
tensor_core_gpu = check_tensor_core_gpu_present()
```
2. Next, we clone the NVIDIA Github UNet_Industrial repository and set up the workspace.
```
!git clone https://github.com/NVIDIA/DeepLearningExamples
%%bash
cd DeepLearningExamples
git checkout master
import os
WORKSPACE_DIR='/content/DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial/notebooks'
os.chdir(WORKSPACE_DIR)
print (os.getcwd())
!pip install tensorflow_hub==0.6.0
```
## Data download
We will first download some data for testing purposes, in particular, the [Weakly Supervised Learning for Industrial Optical Inspection (DAGM 2007)](https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html) dataset.
> The competition is inspired by problems from industrial image processing. In order to satisfy their customers' needs, companies have to guarantee the quality of their products, which can often be achieved only by inspection of the finished product. Automatic visual defect detection has the potential to reduce the cost of quality assurance significantly.
>
> The competitors have to design a stand-alone algorithm which is able to detect miscellaneous defects on various background textures.
>
> The particular challenge of this contest is that the algorithm must learn, without human intervention, to discern defects automatically from a weakly labeled (i.e., labels are not exact to the pixel level) training set, the exact characteristics of which are unknown at development time. During the competition, the programs have to be trained on new data without any human guidance.
**Source:** https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html
```
! ./download_and_preprocess_dagm2007_public.sh ./data
```
The final data directory should look like:
```
./data
raw_images
public
Class1
Class2
Class3
Class4
Class5
Class6
Class1_def
Class2_def
Class3_def
Class4_def
Class5_def
Class6_def
private
zip_files
```
Each data directory contains training images corresponding to one of the first 6 types of defects.
## Load UNet TF-Hub modules from Google Drive (Optional)
This step allows you to connect and load pretrained UNet TF-Hub modules from Google Drive (only if you have modules saved there - see this [notebook](https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_export.ipynb) on UNet TF-Hub module creation and export to Google Drive). Execute the below cell to authorize Colab to access your Google Drive content, then copy the saved TF-Hub modules to Colab.
```
from google.colab import drive
drive.mount('/content/gdrive')
!cp -r "/content/gdrive/My Drive/NVIDIA/Unet_modules" .
!ls Unet_modules
```
## Inference with UNet TF-Hub modules
Next, we will load one of the pretrained UNet TF-Hub modules (corresponding to one of the 10 classes of the DAGM 2007 dataset) and carry out inference.
In order to load TF-Hub modules, there are several options:
- Load from a local cache or directory
- Load from a remote repository
```
import tensorflow_hub as hub
# Loading from a local cache/directory
#module = hub.Module("Unet_modules/Class_1", trainable=False)
# Loading from a remote repository. The 10 NVIDIA UNet TF-Hub modules are available at
# https://tfhub.dev/nvidia/unet/industrial/class_1/1 (similarly for class 2, 3 ...) and
# https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_{1..10}
module = hub.Module("https://tfhub.dev/nvidia/unet/industrial/class_1/1") # or class_2, class_3 etc...
#module = hub.Module("https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_1/1.tar.gz") # or cls_as2, class_3 etc...
print(module.get_signature_names())
print(module.get_input_info_dict()) # When no signature is given, considers it as 'default'
print(module.get_output_info_dict())
```
As seen, this module expects inputs as grayscale images of size 512x512, and produce masks of the same size.
```
# Load a test image
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('./data/raw_images/public/Class1_def/1.png')
plt.figure(figsize = (10,10));
plt.imshow(img, cmap='gray');
```
As we can see in this figure, there exists a defective area in the top left corner. We will now start a TF session and carry out inference on the normalized test image with the loaded TF-Hub module.
```
# Image preprocessing
img = np.expand_dims(img, axis=2)
img = np.expand_dims(img, axis=0)
img = (img-0.5)/0.5
output = module(img)
print(output.shape)
import tensorflow as tf
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
pred = sess.run(output)
# Print out model predicted mask
plt.figure(figsize = (10,10));
plt.imshow(np.squeeze(pred), cmap='gray');
```
As expected, the TF-Hub module points out the correct defective area in this image. Please feel free to try out other defective images for Class 1 within `./data/raw_images/public/Class1_def/`, or load the other UNet modules and test data for other classes from 1 to 10.
```
!ls ./data/raw_images/public/Class1_def/
```
# Conclusion
In this notebook, we have walked through the process of loading a pretrained UNet-Industrial TF-Hub module and carrying out inference on a test image.
## What's next
Now it's time to try the UNet-Industrial TF Hub modules on your own data.
```
```
| github_jupyter |
# The GRAD-GPAD framework 🗿
➡️ Visualization
---
The `gradgpad` framework povides *Python* tooling to create novel visualization graphs to ease research pipeline and fair comparision on face-PAD topic.
This tutorial is a detailed description of main visualizations available in `gradgpad`. Additionaly, thanks to `ipywidgets` standard module we can interact with visualizations.
## Table of Contents 👩💻
1. Software Design ⚖️
2. Installation 💻
3. Import gradgpad
4. Histogram Plotter
5. DET Plotter
6. PAD Radar Plotter
7. Bias Percentiles Plotter
---
### <span style='color :blue' > 1. Software Design ⚖️ </span>
<img src="images/gradgpad_detailed_architecture_highlight_visualization.jpeg" align="left" width="600" height="800" />
### <span style='color :blue' > 2. Installation 💻 </span>
```
!pip install -U gradgpad
```
### <span style='color :blue' > 3. Import gradgpad </span>
```
import os
from ipywidgets import interact, Layout, FloatSlider, Dropdown
from gradgpad import (
PadRadarPaiPlotter,
PadRadarProtocolPlotter,
BiasPercentilePlotter,
HistogramPlotter,
DetPlotter,
WorkingPoint,
FineGrainedPaisProvider,
CombinedScenario,
ResultsProvider,
Approach,
Protocol,
GifCreator,
ScoresProvider,
Subset,
Metrics,
SplitByLabelMode,
Demographic,
GrainedPaiMode
)
# Load Results
results = {
"Quality": ResultsProvider.get(
Approach.QUALITY_RBF, protocol=Protocol.GRANDTEST
),
"Auxiliary": ResultsProvider.get(
Approach.AUXILIARY, protocol=Protocol.GRANDTEST
),
}
all_results = {
"Quality": ResultsProvider.all(
Approach.QUALITY_RBF
),
"Auxiliary": ResultsProvider.all(
Approach.AUXILIARY
),
}
# Load Scores
scores_auxiliary_devel = ScoresProvider.get(
Approach.AUXILIARY,
Protocol.GRANDTEST,
Subset.DEVEL
)
scores_auxiliary_test = ScoresProvider.get(
Approach.AUXILIARY,
Protocol.GRANDTEST,
Subset.TEST
)
output_path = "output"
os.makedirs(output_path, exist_ok=True)
```
---
### <span style='color :blue' > 4. Histogram Plotter </span>
Use `HistogramPlotter` to calculate a plot an histogram with several configurable parameters.
```
histogram_plotter = HistogramPlotter()
histogram_plotter.show(scores_auxiliary_test)
```
Save the histogram with `save` method
```
histogram_plotter.save(f"{output_path}/my_histogram.png", scores_auxiliary_test)
```
You can also configure:
* Normalize the histogram with the boolean parameter `normalize`.
* Change the title (using the `title` parameter)
* Add a limitation for y values with `y_max_value` [0-1]
```
histogram_plotter = HistogramPlotter(
normalize=True,
title="My Histogram",
y_max_value=1.4,
)
histogram_plotter.show(scores_auxiliary_test)
```
If you want to add a threshold, you can do it with `plot_vertical_line_on_value` parameter. Use `legend_vertical_line` to customize the legend.
In the following example, we calculate the EER threshold in `DEVEL` and show the Histogram in `TEST`
```
metrics = Metrics(scores_auxiliary_devel, scores_auxiliary_test)
eer_th = metrics.get_eer_th(Subset.DEVEL)
histogram_plotter = HistogramPlotter(
plot_vertical_line_on_value=eer_th,
legend_vertical_line="EER @ Devel"
)
histogram_plotter.show(scores_auxiliary_test)
```
###### <span style='color :green' > 4.1 Interactive Histogram Plotter </span>
You can also plot different subgrups to check how is distributed the scores.
Use the following interactive function to play with the `HistogramPlotter` class
```
@interact(layout=Layout(width='120%', height='80px'))
def calculate_histogram(
split_mode=SplitByLabelMode.options(),
show_attacks=[True, False],
normalize=[True, False],
approach=Approach.options(),
protocol=Protocol.grandtest_options(),
subset=Subset.options()
):
title = f"Histogram PAIs ({split_mode.value})"
histogram_plotter = HistogramPlotter(
title=title,
split_by_label_mode=split_mode,
normalize=normalize,
exclude_labels=[-1] if show_attacks is False else None,
)
scores = ScoresProvider.get(
approach,
protocol,
subset
)
histogram_plotter.show(scores)
```
---
### <span style='color :blue' > 5. DET Plotter </span>
Use `DetPlotter` to plot DET curve (Detection Error Tradeoff) where error rates for binary classification system are represented plotting the false rejection rate vs. false acceptance rate.
```
det_plotter = DetPlotter()
det_plotter.show(scores_auxiliary_test)
```
Save calculated DET curve with `save` method.
```
det_plotter.save(f"{output_path}/my_det.png", scores_auxiliary_test)
```
You can also configure:
* Change the title (using the `title` parameter)
* You can also plot different subgrups performance with `split_by_label_mode`.
```
det_plotter = DetPlotter(
title="Demographic Sex",
split_by_label_mode=SplitByLabelMode.SEX
)
det_plotter.show(scores_auxiliary_test)
```
###### <span style='color :green' > 5.1 Interactive DET Plotter </span>
Use the following interactive function to play with the `Det` class
```
@interact(layout=Layout(width='120%', height='80px'))
def calculate_det_curve(
split_mode=SplitByLabelMode.options_for_curves(),
):
title = f"DET PAIs ({split_mode.value})"
det_plotter = DetPlotter(
title=title,
split_by_label_mode=split_mode,
)
det_plotter.show(scores_auxiliary_test)
```
---
### <span style='color :blue' > 6. PAD Radar Plotter </span>
The “PAD-radar” show us information related to model’s generalization and PAI behaviour.
* *PAD-Radar by PAI*: Use `PadRadarPaiPlotter` to calculate a radar graph where each vertex is the performance (APCER for a given BPCER working point) of an specific PAI.
* *PAD-Radar by protocols*: Use `PadRadarProtocolPlotter`to calculate a radar graph where each vertex is the performance (APCER for a given BPCER working point) of an specific subprotocol (e.g cross-dataset -> Replay-Mobile).
##### 6.1 PAD Radar By PAI
```
pad_radar_pai_plotter = PadRadarPaiPlotter(
title="My First PAD-Radar",
working_point=WorkingPoint.BPCER_10,
combined_scenario=CombinedScenario.PAS_II_AND_III
)
pad_radar_pai_plotter.show(results)
```
###### <span style='color :green' > 6.1.1 Interactive PAD radar by PAI Plotter </span>
Let's do it this interactive. Select the working point and the PAS (Presentation Attack Scenario)
```
@interact
def calculate_pad_radar_by_pai(
wp=WorkingPoint.options(),
scenario=CombinedScenario.options()
):
title = f"PAD-Radar | APCER {wp}%".replace("WorkingPoint.", "@ ").replace("_", " ")
pad_radar_pai_plotter = PadRadarPaiPlotter(
title=title,
working_point=wp,
combined_scenario=scenario
)
pad_radar_pai_plotter.show(results)
```
##### 6.2. PAD Radar By Protocol
```
pad_radar_protocol_plotter = PadRadarProtocolPlotter(
title="PAD-Radar (Cross-Database)",
working_point=WorkingPoint.BPCER_10,
grained_pai_mode=GrainedPaiMode.FINE,
protocol=Protocol.CROSS_DATASET
)
pad_radar_protocol_plotter.show(all_results)
```
###### <span style='color :green' > 6.2.1 Interactive PAD radar by Protocol Plotter </span>
```
@interact
def calculate_pad_radar_by_prtocol(
wp=WorkingPoint.options(),
grained_pai_mode=GrainedPaiMode.options(),
protocol=Protocol.generalization_options()
):
title = f"PAD-Radar {protocol.value} | APCER {wp}%".replace("WorkingPoint.", "@ ").replace("_", " ")
pad_radar_protocol_plotter = PadRadarProtocolPlotter(
title=title,
working_point=wp,
grained_pai_mode=grained_pai_mode,
protocol=protocol
)
pad_radar_protocol_plotter.show(all_results)
```
##### 6.3. Create a dynamic PAD radar image (GIF)
Save several PAD radar (e.g different working point).
```
saved_filenames = []
for working_point in WorkingPoint.options():
title = f"PAD-Radar | APCER {working_point}%".replace("WorkingPoint.", "@ ").replace("_", " ")
output_filename = f"{output_path}/pad_radar_apcer_{working_point}.png".lower()
pad_radar_pai_plotter = PadRadarPaiPlotter(
title=title,
working_point=working_point,
combined_scenario=CombinedScenario.PAS_I_AND_II
)
pad_radar_pai_plotter.save(output_filename, results)
saved_filenames.append(output_filename)
```
From saved files, we can create a GIF with the following code:
```
from IPython.display import Image
output_filename = f"{output_path}/pad_radar.gif"
GifCreator.execute(output_filename, saved_filenames)
display(Image(output_filename))
```
---
### <span style='color :blue' > 7. Bias Percentile Plotter</span>
The “Bias-Percentile” is a proposed visualization to observe the usability of a system considering different Demographic groups. To represent genuine and attacks scores in a percentile graph, use `BiasPercentilePlotter`.
```
bias_percentile_plotter = BiasPercentilePlotter(
title="Bias Percentile",
demographic=Demographic.SEX,
)
bias_percentile_plotter.show(scores_auxiliary_test)
```
Use `working_point` tuple to plot a working point region. Imagine you want to compare bias percentiles over a given acceptable working point for a specific use case.
```
bias_percentile_plotter = BiasPercentilePlotter(
title="Bias Percentile",
demographic=Demographic.SEX,
working_point=(0.4, 0.6)
)
bias_percentile_plotter.show(scores_auxiliary_test)
```
###### <span style='color :green' > 7.1 Interactive Bias Percentile Plotter </span>
Play with the following interactive function:
```
@interact(layout=Layout(width='150%', height='80px'))
def calculate_percentile(
demographic=Demographic.options(),
lower_wp=FloatSlider(min=0.0, max=1.0, step=0.05, value=0.4),
higher_wp=FloatSlider(min=0.0, max=1.0, step=0.05, value=0.55),
approach=Dropdown(
options=Approach.options(),
value=Approach.AUXILIARY,
),
protocol=Dropdown(
options=Protocol.options(),
value=Protocol.GRANDTEST_SEX_50_50,
),
subset=Dropdown(
options=Subset.options(),
value=Subset.TEST,
)
):
title = f"Percentile ({demographic.value})"
bias_percentile_plotter = BiasPercentilePlotter(
title=title,
demographic=demographic,
working_point=(lower_wp, higher_wp)
)
scores = ScoresProvider.get(
approach,
protocol,
subset
)
bias_percentile_plotter.show(scores)
```
| github_jupyter |
##### Copyright 2021 The TensorFlow Cloud Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TensorFlow Cloud
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/cloud/tutorials/overview.ipynb"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/cloud/blob/master/g3doc/tutorials/overview.ipynb""><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/cloud/blob/master/g3doc/tutorials/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/cloud/tutorials/overview.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
<td>
<a href="https://kaggle.com/kernels/welcome?src=https://github.com/tensorflow/cloud/blob/master/g3doc/tutorials/overview.ipynb" target="blank">
<img width="90" src="https://www.kaggle.com/static/images/site-logo.png" alt="Kaggle logo">Run in Kaggle
</a>
</td>
</table>
TensorFlow Cloud is a library that makes it easier to do training and hyperparameter tuning of Keras models on Google Cloud.
Using TensorFlow Cloud's `run` API, you can send your model code directly to your Google Cloud account, and use Google Cloud compute resources without needing to login and interact with the Cloud UI (once you have set up your project in the console).
This means that you can use your Google Cloud compute resources from inside directly a Python notebook: a notebook just like this one! You can also send models to Google Cloud from a plain `.py` Python script.
## Simple example
This is a simple introductory example to demonstrate how to train a model remotely using [TensorFlow Cloud](https://tensorflow.org/cloud) and Google Cloud.
You can just read through it to get an idea of how this works, or you can run the notebook in Google Colab. Running the notebook requires connecting to a Google Cloud account and entering your credentials and project ID. See [Setting Up and Connecting To Your Google Cloud Account](https://www.tensorflow.org/cloud/tutorials/google_cloud_project_setup_instructions) if you don't have an account yet or are not sure how to set up a project in the console.
## Import required modules
```
import tensorflow as tf
tf.version.VERSION
! pip install -q tensorflow-cloud
import tensorflow_cloud as tfc
print(tfc.__version__)
import sys
```
## Project Configurations
Set project parameters. If you don't know what your `GCP_PROJECT_ID` or `GCS_BUCKET` should be, see [Setting Up and Connecting To Your Google Cloud Account](google_cloud_project_setup_instructions.ipynb).
The `JOB_NAME` is optional, and you can set it to any string. If you are doing multiple training experiemnts (for example) as part of a larger project, you may want to give each of them a unique `JOB_NAME`.
```
# Set Google Cloud Specific parameters
# TODO: Please set GCP_PROJECT_ID to your own Google Cloud project ID.
GCP_PROJECT_ID = 'YOUR_PROJECT_ID' #@param {type:"string"}
# TODO: set GCS_BUCKET to your own Google Cloud Storage (GCS) bucket.
GCS_BUCKET = 'YOUR_GCS_BUCKET_NAME' #@param {type:"string"}
# DO NOT CHANGE: Currently only the 'us-central1' region is supported.
REGION = 'us-central1'
# OPTIONAL: You can change the job name to any string.
JOB_NAME = 'mnist' #@param {type:"string"}
# Setting location were training logs and checkpoints will be stored
GCS_BASE_PATH = f'gs://{GCS_BUCKET}/{JOB_NAME}'
TENSORBOARD_LOGS_DIR = os.path.join(GCS_BASE_PATH,"logs")
MODEL_CHECKPOINT_DIR = os.path.join(GCS_BASE_PATH,"checkpoints")
SAVED_MODEL_DIR = os.path.join(GCS_BASE_PATH,"saved_model")
```
## Authenticating the notebook to use your Google Cloud Project
This code authenticates the notebook, checking your valid Google Cloud credentials and identity. It is inside the `if not tfc.remote()` block to ensure that it is only run in the notebook, and will not be run when the notebook code is sent to Google Cloud.
Note: For Kaggle Notebooks click on "Add-ons"->"Google Cloud SDK" before running the cell below.
```
# Using tfc.remote() to ensure this code only runs in notebook
if not tfc.remote():
# Authentication for Kaggle Notebooks
if "kaggle_secrets" in sys.modules:
from kaggle_secrets import UserSecretsClient
UserSecretsClient().set_gcloud_credentials(project=GCP_PROJECT_ID)
# Authentication for Colab Notebooks
if "google.colab" in sys.modules:
from google.colab import auth
auth.authenticate_user()
os.environ["GOOGLE_CLOUD_PROJECT"] = GCP_PROJECT_ID
```
## Model and data setup
From here we are following the basic procedure for setting up a simple Keras model to run classification on the MNIST dataset.
### Load and split data
Read raw data and split to train and test data sets.
```
(x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape((60000, 28 * 28))
x_train = x_train.astype('float32') / 255
```
### Create a model and prepare for training
Create a simple model and set up a few callbacks for it.
```
from tensorflow.keras import layers
model = tf.keras.Sequential([
tf.keras.layers.Dense(512, activation='relu', input_shape=(28 * 28,)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
```
### Quick validation training
We'll train the model for one (1) epoch just to make sure everything is set up correctly, and we'll wrap that training command in `if not` `tfc.remote`, so that it only happens here in the runtime environment in which you are reading this, not when it is sent to Google Cloud.
```
if not tfc.remote():
# Run the training for 1 epoch and a small subset of the data to validate setup
model.fit(x=x_train[:100], y=y_train[:100], validation_split=0.2, epochs=1)
```
## Prepare for remote training
The code below will only run when the notebook code is sent to Google Cloud, not inside the runtime in which you are reading this.
First, we set up callbacks which will:
* Create logs for [TensorBoard](https://www.tensorflow.org/tensorboard).
* Create [checkpoints](/guide/checkpoint) and save them to the checkpoints directory specified above.
* Stop model training if loss is not improving sufficiently.
Then we call `model.fit` and `model.save`, which (when this code is running on Google Cloud) which actually run the full training (100 epochs) and then save the trained model in the GCS Bucket and directory defined above.
```
if tfc.remote():
# Configure Tensorboard logs
callbacks=[
tf.keras.callbacks.TensorBoard(log_dir=TENSORBOARD_LOGS_DIR),
tf.keras.callbacks.ModelCheckpoint(
MODEL_CHECKPOINT_DIR,
save_best_only=True),
tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta =0.001,
patience=3)]
model.fit(x=x_train, y=y_train, epochs=100,
validation_split=0.2, callbacks=callbacks)
model.save(SAVED_MODEL_DIR)
```
## Start the remote training
TensorFlow Cloud takes all the code from its local execution environment (this notebook), wraps it up, and sends it to Google Cloud for execution. (That's why the `if` and `if not` `tfc.remote` wrappers are important.)
This step will prepare your code from this notebook for remote execution and then start a remote training job on Google Cloud Platform to train the model.
First we add the `tensorflow-cloud` Python package to a `requirements.txt` file, which will be sent along with the code in this notebook. You can add other packages here as needed.
Then a GPU and a CPU image are specified. You only need to specify one or the other; the GPU is used in the code that follows.
Finally, the heart of TensorFlow cloud: the call to `tfc.run`. When this is executed inside this notebook, all the code from this notebook, and the rest of the files in this directory, will be packaged and sent to Google Cloud for execution. The parameters on the `run` method specify the details of the GPU CPU images are specified. You only need to specify one or the other; the GPU is used in the code that follows.
Finally, the heart of TensorFlow cloud: the call to `tfc.run`. When this is executed inside this notebook, all the code from this notebook, and the rest of the files in this directory, will be packaged and sent to Google Cloud for execution. The parameters on the `run` method specify the details of the GPU and CPU images are specified. You only need to specify one or the other; the GPU is used in the code that follows.
Finally, the heart of TensorFlow cloud: the call to `tfc.run`. When this is executed inside this notebook, all the code from this notebook, and the rest of the files in this directory, will be packaged and sent to Google Cloud for execution. The parameters on the `run` method specify the details of the execution environment and the distribution strategy (if any) to be used.
Once the job is submitted you can go to the next step to monitor the jobs progress via Tensorboard.
```
# If you are using a custom image you can install modules via requirements
# txt file.
with open('requirements.txt','w') as f:
f.write('tensorflow-cloud\n')
# Optional: Some recommended base images. If you provide none the system
# will choose one for you.
TF_GPU_IMAGE= "gcr.io/deeplearning-platform-release/tf2-cpu.2-5"
TF_CPU_IMAGE= "gcr.io/deeplearning-platform-release/tf2-gpu.2-5"
# Submit a single node training job using GPU.
tfc.run(
distribution_strategy='auto',
requirements_txt='requirements.txt',
docker_config=tfc.DockerConfig(
parent_image=TF_GPU_IMAGE,
image_build_bucket=GCS_BUCKET
),
chief_config=tfc.COMMON_MACHINE_CONFIGS['K80_1X'],
job_labels={'job': JOB_NAME}
)
```
## Training Results
### Reconnect your Colab instance
Most remote training jobs are long running. If you are using Colab, it may time out before the training results are available.
In that case, **rerun the following sections in order** to reconnect and configure your Colab instance to access the training results.
1. Import required modules
2. Project Configurations
3. Authenticating the notebook to use your Google Cloud Project
**DO NOT** rerun the rest of the code.
### Load Tensorboard
While the training is in progress you can use Tensorboard to view the results. Note the results will show only after your training has started. This may take a few minutes.
```
%load_ext tensorboard
%tensorboard --logdir $TENSORBOARD_LOGS_DIR
```
## Load your trained model
Once training is complete, you can retrieve your model from the GCS Bucket you specified above.
```
trained_model = tf.keras.models.load_model(SAVED_MODEL_DIR)
trained_model.summary()
```
| github_jupyter |
# Confidence Interval:
In this notebook you will find:
- Get confidence intervals for predicted survival curves using XGBSE estimators;
- How to use XGBSEBootstrapEstimator, a meta estimator for bagging;
- A nice function to help us plot survival curves.
```
import matplotlib.pyplot as plt
plt.style.use('bmh')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# to easily plot confidence intervals
def plot_ci(mean, upper_ci, lower_ci, i=42, title='Probability of survival $P(T \geq t)$'):
# plotting mean and confidence intervals
plt.figure(figsize=(12, 4), dpi=120)
plt.plot(mean.columns,mean.iloc[i])
plt.fill_between(mean.columns, lower_ci.iloc[i], upper_ci.iloc[i], alpha=0.2)
plt.title(title)
plt.xlabel('Time [days]')
plt.ylabel('Probability')
plt.tight_layout()
```
## Metrabic
We will be using the Molecular Taxonomy of Breast Cancer International Consortium (METABRIC) dataset from [pycox](https://github.com/havakv/pycox#datasets) as base for this example.
```
from xgbse.converters import convert_to_structured
from pycox.datasets import metabric
import numpy as np
# getting data
df = metabric.read_df()
df.head()
```
## Split and Time Bins
Split the data in train and test, using sklearn API. We also setup the TIME_BINS array, which will be used to fit the survival curve.
```
from xgbse.converters import convert_to_structured
from sklearn.model_selection import train_test_split
# splitting to X, T, E format
X = df.drop(['duration', 'event'], axis=1)
T = df['duration']
E = df['event']
y = convert_to_structured(T, E)
# splitting between train, and validation
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state = 0)
TIME_BINS = np.arange(15, 315, 15)
TIME_BINS
```
## Calculating confidence intervals
We will be using the XGBSEKaplanTree estimator to fit the model and predict a survival curve for each point in our test data, and via <i>return_ci</i> parameter we will get upper and lower bounds for the confidence interval.
```
from xgbse import XGBSEKaplanTree, XGBSEBootstrapEstimator
from xgbse.metrics import concordance_index, approx_brier_score
# xgboost parameters to fit our model
PARAMS_TREE = {
'objective': 'survival:cox',
'eval_metric': 'cox-nloglik',
'tree_method': 'hist',
'max_depth': 10,
'booster':'dart',
'subsample': 1.0,
'min_child_weight': 50,
'colsample_bynode': 1.0
}
```
### Numerical Form
The KaplanTree and KaplanNeighbors models support estimation of confidence intervals via the Exponential Greenwood formula.
```
%%time
# fitting xgbse model
xgbse_model = XGBSEKaplanTree(PARAMS_TREE)
xgbse_model.fit(X_train, y_train, time_bins=TIME_BINS)
# predicting
mean, upper_ci, lower_ci = xgbse_model.predict(X_test, return_ci=True)
# print metrics
print(f"C-index: {concordance_index(y_test, mean)}")
print(f"Avg. Brier Score: {approx_brier_score(y_test, mean)}")
# plotting CIs
plot_ci(mean, upper_ci, lower_ci)
```
### Non-parametric Form
We can also use the XGBSEBootstrapEstimator to wrap any XGBSE model and get confidence intervals via bagging, which also slighty increase our performance at the cost of computation time.
```
%%time
# base model as XGBSEKaplanTree
base_model = XGBSEKaplanTree(PARAMS_TREE)
# bootstrap meta estimator
bootstrap_estimator = XGBSEBootstrapEstimator(base_model, n_estimators=100)
# fitting the meta estimator
bootstrap_estimator.fit(X_train, y_train, time_bins=TIME_BINS)
# predicting
mean, upper_ci, lower_ci = bootstrap_estimator.predict(X_test, return_ci=True)
# print metrics
print(f"C-index: {concordance_index(y_test, mean)}")
print(f"Avg. Brier Score: {approx_brier_score(y_test, mean)}")
# plotting CIs
plot_ci(mean, upper_ci, lower_ci)
```
| github_jupyter |
# Downloading deforestation map data from the Global Forest Change 2000-2018 dataset
This will test downloads of map data from the [Global Forest Change 2000-2018](https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html) dataset. This dataset includes various layers of information derived from satellite images, mainly from Landsat, related to forests cover loss and gain over the past two decades. A complete visualization of this data can be accessed [here](https://earthenginepartners.appspot.com/science-2013-global-forest).
```
import numpy as np
import yaml
import requests
import urllib
import imageio
from pathlib import Path
from skimage.transform import resize
import matplotlib.pyplot as plt
import PIL
from PIL import ImageEnhance
%matplotlib inline
```
The data has been divided into 10x10 degree granules encompassing the entire globe. The resolution is of 1 arc-second per pixel (approximately 30 meters per pixel at the equator).
Each granule has been divided into 6 layers with the following information in each (information from [University of Maryland](https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html)):
* `treecover2000`: Tree canopy cover for year 2000, defined as canopy closure for all vegetation taller than 5m in height. Encoded as a percentage per output grid cell, in the range 0–100.
* `gain`: Global forest cover gain 2000–2012. Forest gain during the period 2000–2012, defined as the inverse of loss, or a non-forest to forest change entirely within the study period. Encoded as either 1 (gain) or 0 (no gain).
* `lossyear`: Year of gross forest cover loss event. Forest loss during the period 2000–2018, defined as a stand-replacement disturbance, or a change from a forest to non-forest state. Encoded as either 0 (no loss) or else a value in the range 1–17, representing loss detected primarily in the year 2001–2018, respectively.
* `datamask`: Data mask Three values representing areas of no data (0), mapped land surface (1), and permanent water bodies (2).
* `first`: Circa year 2000 Landsat 7 cloud-free image composite. Reference multispectral imagery from the first available year, typically 2000. If no cloud-free observations were available for year 2000, imagery was taken from the closest year with cloud-free data, within the range 1999–2012.
* `last`: Circa year 2018 Landsat cloud-free image composite. Reference multispectral imagery from the last available year, typically 2018. If no cloud-free observations were available for year 2018, imagery was taken from the closest year with cloud-free data, within the range 2010–2015.
We have a YAML file with the download links for each layer and granule. We can load it as a dict of lists of strings.
```
data_folder = Path("../data/")
with open(data_folder/"hansen_urls.yaml") as yaml_file:
urls = yaml.load(yaml_file, Loader=yaml.FullLoader)
layer_names = list(urls.keys())
```
Before anythong else, it would be useful to have a function that translates a pair of latitude/longitude coordinates into the string code for the corresponding granule that contains said point. The granules are identified by the value of their top-left (NW) corner, in degrees. They do not include the poles, and only go from 60 degrees South to 80 degrees North.
```
def coords_to_granule(coords):
lat, lon = coords
assert -90 <= lat <= 90, "Longitude out of valid range"
assert -180 <= lon < 180, "Latitude out of valid range"
# Transform the latitude
granule_lat = int(np.ceil(lat/10)*10)
str_lat = str(abs(granule_lat)).zfill(2) + ("N" if granule_lat >= 0 else "S")
# Transform the longitude
granule_lon = int(np.floor(lon/10)*10)
str_lon = str(abs(granule_lon)).zfill(3) + ("E" if granule_lon >= 0 else "W")
return str_lat + "_" + str_lon
coords_to_granule((40.416775, -3.703790)) # Madrid!
```
We can also easily have a function that takes a pair of coordinates and a layer name and retrieves the appropriate url from the dict we created before.
```
def url_for_granule(coords, layer, url_dict=urls):
assert -60 < coords[0] <= 80, "Longitude out of available granule range"
assert layer in url_dict, "Requested layer is not in the provided URL dict"
for url in url_dict[layer]:
granule_str = coords_to_granule(coords)
if granule_str in url:
return url
raise Exception(f"URL not found for granule {granule_str} corresponding to requested coordinates {coords}.")
url_for_granule((40.416775, -3.703790), "gain")
```
Next, we will create a function to download the TIFF file from one of these URLS.
```
def download_granule(coords, layer, destination, url_dict=urls, redownload=False):
url = url_for_granule(coords, layer, url_dict=urls)
filename = Path(urllib.parse.urlparse(url).path).name
destination = Path(destination)
# If file already exists, do not download again unless specified
if ((destination/filename).is_file() == False) or redownload:
request = requests.get(url, allow_redirects=True)
with open(destination/filename, 'wb') as file:
file.write(request.content)
# If the file has not been downloaded already, this will take a while!
download_granule((40.416775, -3.703790), layer="treecover2000", destination=data_folder/"hansen/")
```
Great! Now we can easily retrieve data from whatever area of the globe we want. Let's take a look at the granule we just downloaded. The image is so bin that we are going to have to increase the limit of what PIL is allowed to try to open.
```
PIL.Image.MAX_IMAGE_PIXELS = 2000000000
granule = PIL.Image.open(data_folder/"hansen/Hansen_GFC-2018-v1.6_treecover2000_50N_010W.tif")
granule.size
```
Wow, that is a 40k x 40k pixel image, so 1600 million pixels overall. We might or might not end up working with the full data, but let's reduce its size for visualization purposes now.
```
# Using skimage.transform.resize
granule_resized = granule.resize((400, 400), PIL.Image.ANTIALIAS)
```
And finally we can take a look at the image!
```
granule_resized
```
It represents the land covered by trees in the year 2000 in the northern part of Spain. The image is grey and quite dim. This is because it has a single channel, in which values from 0 to 100 representing percentage of land covered by trees in that section are being interpreted as a brightness value from 0 to 255. We rescale the values by 255/100 in order to improve the visualization.
```
ImageEnhance.Brightness(granule_resized).enhance(255/100)
def download_list(coords_list, layer, destination, url_dict=urls):
for coords in coords_list:
print(f"Downloading data for layer {layer} at coordinates {coords}...")
try:
download_granule(coords, layer=layer, destination=destination, url_dict=urls, redownload=False)
print(f"Complete!")
except:
print(f"Download failed! D:")
print(f"Finished download queue :D")
all_granules = [(lat, lon) for lon in range(-180, 180, 10) for lat in range(-50, 90, 10)]
print(len(all_granules))
%%time
destination = "/Users/miguel/Documents/hansen"
layer = "treecover2000"
download_list(all_granules[10:100], layer=layer, destination=destination)
```
| github_jupyter |
# Asymptotic solutions in long-times
Projectile motion in a non-homogenous potential field with drag is described by the equation
$$y_{\tau \tau} + \beta \epsilon y_{\tau} + \frac{1}{(1 + \epsilon y)^2} = 0,$$
with $y(0) = \epsilon$ and $y_{\tau}(0)=1$, and where $\epsilon \ll 1$ is expected.
```
import sympy as sym
from sympy import init_printing
init_printing(order='rev-lex')
y, eps, a, b, tau, t, beta, gamma = sym.symbols('y, epsilon, a, b, tau, t, beta, gamma')
y0 = sym.Function('y0')(t)
y1 = sym.Function('y1')(t)
y2 = sym.Function('y2')(t)
y3 = sym.Function('y3')(t)
y4 = sym.Function('y4')(t)
y = sym.Eq(y0 + eps*y1 + eps**2*y2 + eps**3*y3 + eps**4*y4) # naive expansion
class f(sym.Function):
@classmethod
def eval(cls, y):
return y.lhs.diff(t,t) + beta*eps*y.lhs.diff(t)**2 + 1/(1 + eps*y.lhs)**2
#return y.lhs.diff(tau, tau) + eps/y.lhs**2
y
the_series = sym.series(f(y), eps, x0=0, n=5)
by_order = sym.collect(the_series, eps, evaluate=False)
the_series
```
### $\mathcal{O} \left( 1 \right) \mbox{Solution}$
```
sym.Eq(by_order[1].removeO())
eqn = sym.Eq(by_order[1].removeO()) #1 + y0(tau).diff(tau, tau))
soln0 = sym.dsolve(eqn, y0)
constants = sym.solve([soln0.rhs.subs(t,0) - 0, \
soln0.rhs.diff(t).subs(t,0) - 1])
C1, C2 = sym.symbols('C1 C2')
soln0 = soln0.subs(constants)
soln0
```
### $\mathcal{O} \left( \epsilon \right) \mbox{Solution}$
```
by_order[eps]
try:
eqn = sym.Eq(by_order[eps].replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps])
soln1 = sym.dsolve(eqn, y1)
constants = sym.solve([soln1.rhs.subs(t,0) - 0, \
soln1.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln1 = soln1.subs(constants)
soln1
```
### $\mathcal{O} \left( \epsilon^2 \right) \mbox{Solution}$
```
by_order[eps**2]
try:
eqn = sym.Eq(by_order[eps**2].replace(y1, soln1.rhs).replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps**2].replace(y1, soln1.rhs))
soln2 = sym.dsolve(eqn, y2)
constants = sym.solve([soln2.rhs.subs(t,0) - 0, \
soln2.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln2 = soln2.subs(constants)
sym.factor(soln2)
```
### $\mathcal{O} \left( \epsilon^3 \right) \mbox{Solution}$
```
by_order[eps**3]
try:
eqn = sym.Eq(by_order[eps**3].replace(y2, soln2.rhs).replace(y1, soln1.rhs).replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps**3].replace(y2, soln2.rhs))
soln3 = sym.dsolve(eqn, y3)
constants = sym.solve([soln3.rhs.subs(t,0) - 0, \
soln3.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln3 = soln3.subs(constants)
sym.factor(soln3)
```
### $\mathcal{O} \left( \epsilon^4 \right) \mbox{Solution}$
```
by_order[eps**4]
try:
eqn = sym.Eq(by_order[eps**4].replace(y3, soln3.rhs).replace(
y2, soln2.rhs).replace(y1, soln1.rhs).replace(y0, soln0.rhs))
except NameError:
eqn = sym.Eq(by_order[eps**4].replace(y3, soln3.rhs))
soln4 = sym.dsolve(eqn, y4)
constants = sym.solve([soln4.rhs.subs(t,0) - 0, \
soln4.rhs.diff(t,1).subs(t,0) - 0])
C1, C2 = sym.symbols('C1 C2')
soln4 = soln4.subs(constants)
sym.factor(soln4)
```
### $\mbox{Composite Solution}$
```
y_comp = sym.symbols('y_{comp}', cls=sym.Function)
try:
y_comp = sym.Eq(y_comp, soln0.rhs + eps*soln1.rhs + eps**2*soln2.rhs + eps**3*soln3.rhs + eps**4*soln4.rhs) # + eps**2*soln2.rhs)
except NameError:
y_comp = sym.Eq(y_comp, eps*soln1.rhs + eps**2*soln2.rhs + eps**3*soln3.rhs + eps**4*soln4.rhs) # + eps**2*soln2.rhs)
#print(sym.latex(y_comp))
#print(str(y_comp.rhs.subs(beta,6.5E-4)))
print(sym.latex(y_comp.rhs.subs(beta,6.5E-4).subs(t, 1) * 1.37))
#sym.latex(y_comp.rhs.subs(beta, 6.5E-4))
y_comp.rhs
```
### $\mbox{The Trajectory}$
```
def savefig(filename, pics):
if pics == True:
plt.savefig('../doc/figures/{}.pgf'.format(filename), bbox_inches='tight', dpi=400)
else:
pass
pics = True
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import scipy as sp
%config InlineBackend.figure_format = 'retina'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['figure.dpi'] = 300
matplotlib.rcParams.update(
{ 'text.color': 'k',
'xtick.color': 'k',
'ytick.color': 'k',
'axes.labelcolor': 'k'
})
plt.rc('font', size=14)
eps_val = [.1, .5, 1.][::-1]
linestyle = ['rs--', 'bo-', 'cv-.', 'k+:', 'm']
tt = sp.arange(0,1,0.001)
bet = 6.5E-4
plt.figure(figsize=(6, 4))#, dpi=100)
for keys, vals in enumerate(eps_val):
y_compP = sym.lambdify(t, y_comp.rhs.subs(eps, vals).subs(beta, bet), 'numpy')
plt.plot(tt, y_compP(tt), linestyle[keys],label=r'$\phi \mathbf{E}\mbox{u}$'+ ' = {}'.format(vals).rstrip('0').rstrip('.')
, markevery=100)
plt.ylim(ymin=0., ymax=0.7)
plt.xlim(xmax=1)
plt.ylabel(r'$y^*$')
plt.xlabel(r'$t^*$')
leg = plt.legend(title = r'$\mathbf{D}\mbox{g}$' +' = {:1.0E}'.format(bet)[:-4] + r'$\times 10^{-4}$')
leg.get_frame().set_linewidth(0.0)
savefig('long_times',pics)
plt.show();
```
## Time aloft
```
y2 = sym.symbols('y2', cls=sym.Function)
y2 = sym.Function('y2')(t)
try:
y2 = sym.Eq(y2, soln0.rhs + eps*soln1.rhs + eps**2*soln2.rhs
+ eps**3*soln3.rhs + eps**4*soln4.rhs) # + eps**2*soln2.rhs)
except NameError:
y2 = sym.Eq(y2, eps*soln1.rhs + eps**2*soln2.rhs + eps**3*soln3.rhs + eps**4*soln4.rhs)
y2.rhs
#y2.diff(t)
tau0, tau1, tau2, tau3 = sym.symbols('tau0 tau1 tau2 tau3')
tau = sym.Eq(tau0 + eps*tau1 + eps**2*tau2 + eps**3*tau3)
y3 = y2.rhs.subs(t, tau.lhs).series(eps)
col = sym.collect(y3, eps, evaluate=False)
```
### $\mathcal{O} \left( 1 \right) \mbox{Solution}$
```
#tau0 = 2
sym.Eq(col[1].removeO())
```
Two roots, lets look at $\tau_0 = 2$.
### $\mathcal{O} \left( \epsilon \right) \mbox{Solution}$
```
order_eps = col[eps].subs(tau0, 2)
order_eps
soln_eps = sym.solve(order_eps, tau1)
```
### $\mathcal{O} \left( \epsilon^2 \right) \mbox{Solution}$
```
order_eps2 = col[eps**2].subs(tau0, 2).subs(tau1, soln_eps[0])
order_eps2
soln_eps2 = sym.solve(order_eps2, tau2)
```
### $\mathcal{O} \left( \epsilon^3 \right) \mbox{Solution}$
```
order_eps3 = col[eps**3].subs(tau0, 2).subs(tau1, soln_eps[0]).subs(tau2, soln_eps2[0])
order_eps3
soln_eps3 = sym.solve(order_eps3, tau3)
```
### Composite solution
```
0.2*8*np.pi
2*1.45
tau0, tau1, tau2, tau3 = sym.symbols('tau0 tau1 tau2 tau3')
tau = sym.Eq(tau0 + eps*tau1 + eps**2*tau2 + eps**3*tau3)
tau = tau.subs(tau0, 2).subs(tau1, soln_eps[0]).subs(tau2, soln_eps2[0]).subs(tau3, soln_eps3[0])
print(tau.subs(beta, 6E-4).lhs*1.45)
print(str(tau.subs(beta, 6E-4).lhs))
tau_soln = sym.lambdify(eps, tau.subs(beta, 6E-4).lhs*1.45, 'numpy')
print(tau_soln(0.67))
tau
a=y_comp.rhs.subs(t, tau.lhs.subs(eps, gamma)).subs(beta,6.5E-4)
#(sym.collect(a, eps, evaluate=False)[1])
b = sym.collect(a, eps, evaluate=False)[1]
sym.simplify(b)
ttt = np.arange(0.01, 1,0.001)
betas = [bet]
linestyle = ['k','k--', 'bo-', 'cv-.', 'k+:', 'm']
plt.figure(figsize=(6, 4))#, dpi=100)
for keys, vals in enumerate(betas):
taun = tau.subs(beta, vals)
tau_soln = sym.lambdify(eps, taun.lhs, 'numpy')
label=['{:1.0E}'.format(vals)[:-4] + r'$\times 10^{-4}$']
plt.semilogx(ttt, tau_soln(ttt), linestyle[keys],
label=r'$\mathbf{D}\mbox{g}$ = '+ label[keys], markevery=100)
plt.xlabel(r'$\phi \mathbf{E}\mbox{u}$')
plt.ylabel(r'$t_f$')
plt.legend()
savefig('drag', pics)
plt.show();
```
## Drag vs. inertia
```
t, V = sym.symbols('t V', real=True, positive=True)
u = sym.symbols('u', cls=sym.Function)
eq = sym.diff(u(t), t) + u(t)**2
sol = sym.dsolve(eq, u(t))
print(sol.rhs)
u = sol.rhs
C1, C2 = sym.symbols('C1 C2')
eq = sym.Eq(u.subs(t, 0), 1)
sol = sym.solve(eq, C1)[0]
print(sol)
print(u.subs(C1, sol))
tt = np.arange(0.01,1.,0.001)
plt.figure(figsize=(6, 4), dpi=100)
u_soln = sym.lambdify(t, u.subs(C1, sol).subs(V,1.), 'numpy')
plt.semilogy(tt, u_soln(tt), 'k')
plt.show();
```
| github_jupyter |
## Copyright 2021 Antoine Simoulin.
<i>Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Icons made by <a href="https://www.flaticon.com/authors/freepik" title="Freepik">Freepik</a>, <a href="https://www.flaticon.com/authors/pixel-perfect" title="Pixel perfect">Pixel perfect</a>, <a href="https://www.flaticon.com/authors/becris" title="Becris">Becris</a>, <a href="https://www.flaticon.com/authors/smashicons" title="Smashicons">Smashicons</a>, <a href="https://www.flaticon.com/authors/srip" title="srip">srip</a>, <a href="https://www.flaticon.com/authors/adib-sulthon" title="Adib">Adib</a>, <a href="https://www.flaticon.com/authors/flat-icons" title="Flat Icons">Flat Icons</a> and <a href="https://www.flaticon.com/authors/dinosoftlabs" title="Pixel perfect">DinosoftLabs</a> from <a href="https://www.flaticon.com/" title="Flaticon"> www.flaticon.com</a></i>
# TP 3 : Words Embeddings
<img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/tp3-header.png?raw=True" width="1000">
On va s'appuyer sur le corpus collecté par <span class="badge badge-secondary">([Panckhurst et al., 2016](#panckhurst-2016))</span> qui rassemble 88,000 sms collectés dans la région de Montpellier. Le corpus a été dé-identifié (en particulier, les noms sont remplacés par [ _forename_ ]). Pour chaque sms, on a identifié les Emojis dans le texte.
Il y avait beaucoup de type d'Emojis. Dans le TP, ils ont été simplifiés selon le tableau suivant. Tous les Emojis de la colonne `Emoji list` ont été remplacé par l'emoji de la colonne `Generic`. Dans le TP les Emojis n'apparaissent pas dans le texte du sms car on cherche à les prédire.
| Generic Emoji | Emoji list |
|:--------------:|:------------------------------------------------------------------:|
| 😃 | '=P', ':)', ':P', '=)', ':p', ':d', ':-)', '=D', ':D', '^^' |
| 😲 | ':O', 'o_o', ':o', ':&' |
| 😔 | '"-.-'''", '<_>', '-_-', "--'", "-.-'", '-.-', "-.-''", "-\_-'" |
| 😠 | ':/', ':-/', ':-(', ':(', ':-<' |
| 😆 | '>.<', '¤.¤', '<>','><', '*.*', 'xd', 'XD', 'xD', 'x)',';)', ';-)' |
| 😍 | '</3', '<3' |
Finalement pour le TP, on a filtré le jeu de données pour ne conserver que les sms contenant qu'un seul Emoji. On a par ailleurs <i>down samplé</i> les classes majoritaires pour limiter le déséquilibre du jeu de données. En effet les sms avec un smiley 😃 était largement sur-représentés.
<b>L'objet du TP est de prédire l'émoji associé à chaque message. Pour cela on vectorisera le texte en utilisant les méthodes d'embeddings.</b>
```
%%capture
# Check environment
if 'google.colab' in str(get_ipython()):
IN_COLAB = True
else:
IN_COLAB = False
if IN_COLAB:
# ⚠️ Execute only if running in Colab
!pip install -q scikit-learn==0.23.2 matplotlib==3.1.3 pandas==1.1.3 gensim==3.8.1 torch==1.6.0 torchvision==0.7.0
!pip install skorch==0.10.0
# then restart runtime environment
from gensim.models import KeyedVectors
from collections import Counter
import numpy as np
import pandas as pd
import re
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
import os, sys
# IPython automatically reload all changed code
%load_ext autoreload
%autoreload 2
# Inline Figures with matplotlib
%matplotlib inline
%config InlineBackend.figure_format='retina'
# import extrenal modules
import urllib.request
class_names = ['happy', 'joke', 'astonished', 'angry', 'bored', 'heart']
repo_url = 'https://raw.githubusercontent.com/AntoineSimoulin/m2-data-sciences/master/'
_ = urllib.request.urlretrieve(repo_url + 'src/plots.py', 'plots.py')
if not os.path.exists('smileys'):
os.makedirs('smileys')
for c in class_names:
_ = urllib.request.urlretrieve(
repo_url + 'TP3%20-%20Word%20Embeddings/smileys/{}.png'.format(c),
'smileys/{}.png'.format(c))
```
On va utiliser les embeddings déjà entrainé que nous avons manipulé au cours précédent. Pour limiter la taille du fichier d'embeddings, on a sauvegardé que les `10,000` mots les plus fréquents. <b>Vous devez récupérer le fichier d'embeddings aisni que le jeu de données directement sur le [Moodle](https://moodle.u-paris.fr/course/view.php?id=11048).</b>
```
w2v_model = KeyedVectors.load_word2vec_format("oscar.fr.300.10k.model")
w2v_model.init_sims(replace=True)
len(w2v_model.vocab)
# On crée un array avec les 10,000 premiers mots et on crée le dictionaire de vocabulaire
word_count = {k: w2v_model.vocab[k].count for k in w2v_model.vocab}
word_count = Counter(word_count)
word_count.most_common(10)
idx2w = {i: w for (i, (w, f)) in enumerate(word_count.most_common(10000), 2)}
idx2w[0] = 'unk'
idx2w[1] = 'pad'
w2idx = {w: i for (i, (w, f)) in enumerate(word_count.most_common(10000), 2)}
w2idx['unk'] = 0
w2idx['pad'] = 1
embeddings_vectors = [w2v_model[w] for (w, f) in word_count.most_common(10000)]
word2vec_embeddings = np.vstack(embeddings_vectors)
word2vec_embeddings = np.concatenate((np.zeros_like(word2vec_embeddings[0:2]), word2vec_embeddings), 0)
word2vec_embeddings.shape
w2idx['Oh']
word2vec_embeddings[3664][:10]
w2v_model['Oh'][:10]
dataset = pd.read_csv('emojis.csv')
dataset.head()
dataset.loc[3, 'sms']
class_names = ['happy', 'joke', 'astonished', 'angry', 'bored', 'heart']
dataset.shape
```
On va utiliser la même fonction de tokenization qui a été utilisée pour entrainer les embeddings.
```
token_pattern = re.compile(r"(\->|(?::\)|:-\)|:\(|:-\(|;\);-\)|:-O|8-|:P|:D|:\||:S|:\$|:@|8o\||\+o\(|\(H\)|\(C\)|\(\?\))|(?:[\d.,]+)|([^\s\w0-9])\2*|(?:[\w0-9\.]+['’]?)(?<!\.))")
def tokenize(text):
tokens = [groups[0] for groups in re.findall(token_pattern, str(text))]
tokens = [t.strip() for t in tokens]
return tokens
dataset['tokens'] = dataset['sms'].apply(tokenize)
dataset.head()
```
### Exploration de données
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Observer la distribution des classes.</p>
</div>
<hr>
```
dataset[["happy", "joke", "astonished", "angry", "bored", "heart"]].sum().plot.bar(color='#970137',
title="dataset distribution");
```
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Evaluer la proportion de tokens qui sont hors du vocabulaire des embeddings.</p>
</div>
<hr>
```
# %load solutions/unk.py
tokens_not_in_voc = []
for sms in dataset['tokens']:
for t in sms:
tokens_not_in_voc.append(t not in w2idx)
print("On a {:.2f}% des tokens hors du vocabulaire".format(sum(tokens_not_in_voc) / len(tokens_not_in_voc) * 100))
```
### Vectorization
Les embeddings de mots permettent de représenter chaque <i>token</i> par un vecteur. Pour obtenir un vecteur qui représente le sms, on va agréger les différents mots du texte. On considérera plusieurs fonctions d'agrégation : la somme, la moyenne, me maximum ou le minimum.
En pratique nous verrons dans le dernier cours d'ouverture qu'il existe des méthodes plus évoluées pour composer les mots de la phrase. Néanmoins une simple fonction d'agrégation nous donnera déjà une bonne <i>baseline</i>.
<img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/model.png?raw=True" width="500">
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Ecrire une fonction qui permet de vectoriser un sms.</p>
</div>
<hr>
```
# %load solutions/vectorize_1.py
def vectorize(tokens, agg_method='mean'):
token_embeddings_arr = np.array([w2v_model[t] for t in tokens if t in w2v_model.vocab])
if not len(token_embeddings_arr):
return np.zeros_like(w2v_model['roi'])
# Agréger les représentations de chaque token.
# Le vecteur de sortie doit être de taille (300, )
if agg_method == 'mean':
sentence_embedding = np.mean(token_embeddings_arr, axis=0)
elif agg_method == 'max':
sentence_embedding = np.max(token_embeddings_arr, axis=0)
elif agg_method == 'sum':
sentence_embedding = np.sum(token_embeddings_arr, axis=0)
return sentence_embedding
vectorize(dataset['tokens'][0], agg_method='max')
```
On voudrait attribuer un poids moins important aux embeddings des mots moins caractéristiques. Pour ça, on voudrait pondérer la contribution des vecteurs de chaque mot en fonction de leur score TF-IDF.
<img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/model-tfidf.png?raw=True" width="700">
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Utiliser la pondération TF-IDF pour pondérer chacun des vecteurs.</p>
</div>
<hr>
```
tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x,
lowercase=False)
tfidf_vectorizer.fit(dataset['tokens'])
w2idx_tfidf = {w: idx for (idx, w) in enumerate(tfidf_vectorizer.get_feature_names())}
idx_tfidf2w = {idx: w for (idx, w) in enumerate(tfidf_vectorizer.get_feature_names())}
# %load solutions/vectorize_2.py
def vectorize(tokens, agg_method='mean', tfidf_vectorizer=None):
token_embeddings_arr = np.array([w2v_model[t] for t in tokens if t in w2v_model.vocab])
if not len(token_embeddings_arr):
return np.zeros_like(w2v_model['roi'])
# Agréger les représentations de chaque token.
# Le vecteur de sortie doit être de taille (300, )
if agg_method == 'mean':
sentence_embedding = np.mean(token_embeddings_arr, axis=0)
elif agg_method == 'max':
sentence_embedding = np.max(token_embeddings_arr, axis=0)
elif agg_method == 'sum':
sentence_embedding = np.sum(token_embeddings_arr, axis=0)
elif agg_method == 'tfidf':
tf_idf_w = tfidf_vectorizer.transform([tokens]).todense().transpose()
tf_idf_w = np.squeeze([tf_idf_w[w2idx_tfidf[t]] for t in tokens if t in w2v_model.vocab])
sentence_embedding = np.average(token_embeddings_arr, weights=tf_idf_w, axis=0)
return sentence_embedding
X = [vectorize(sms) for sms in dataset['tokens']]
X = np.array(X)
print(X.shape)
```
On va intégrer la fonction `vectorize` dans un module compatible avec les fonctions de `sklearn`.
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Intégrer votre fonction de vectorization dans la classe Vectorizer ci-dessous. Vous devez simoplement la copier/coller en replaçant tfidf_vectorizer par self.tfidf_vectorizer car c'est maintenant un attribut de la class</p>
</div>
<hr>
```
# 6 choses à faire pour l'excercice sur la class Vectorizer :
# copier votre fonction vectorize dans la class
# ajouter l'argument self dans la fonction vectorize
# supprimer l'argument tfidf_vectorizer de la fonction vectorize
# remplacer toutes les occurences de agg_method par self.agg_method dans la fonction vectorize
# supprimer l'argument agg_method de la fonction vectorize
# remplacer toutes les occurences de w2idx_tfidf par self.w2idx_tfidf dans la fonction vectorize
# %load solutions/vectorizer.py
class Vectorizer(BaseEstimator, TransformerMixin):
def __init__(self, agg_method='mean', normalize=False):
self.agg_method = agg_method
self.normalize = normalize
self.tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x,
lowercase=False,
token_pattern=None)
def vectorize(self, tokens):
token_embeddings_arr = np.array([w2v_model[t] for t in tokens if t in w2v_model.vocab])
if len(token_embeddings_arr) == 0:
sentence_embedding = np.zeros_like(w2v_model['roi'])
elif len(token_embeddings_arr) == 1:
sentence_embedding = np.squeeze(token_embeddings_arr)
elif len(token_embeddings_arr) > 1:
if self.agg_method == 'mean':
sentence_embedding = np.mean(token_embeddings_arr, axis=0)
elif self.agg_method == 'max':
sentence_embedding = np.max(token_embeddings_arr, axis=0)
elif self.agg_method == 'sum':
sentence_embedding = np.sum(token_embeddings_arr, axis=0)
elif self.agg_method == 'tfidf':
tf_idf_w = self.tfidf_vectorizer.transform([tokens]).todense().transpose()
tf_idf_w = np.squeeze([tf_idf_w[self.w2idx_tfidf.get(t, 0)] for t in tokens if t in w2v_model.vocab])
sentence_embedding = np.average(token_embeddings_arr, weights=tf_idf_w, axis=0)
return sentence_embedding
def _vectorize(self, tokens):
return vectorize(tokens)
def fit(self, X, y=None):
self.tfidf_vectorizer.fit(X['tokens'])
self.w2idx_tfidf = {w: idx for (idx, w) in enumerate(self.tfidf_vectorizer.get_feature_names())}
self.idx_tfidf2w = {idx: w for (idx, w) in enumerate(self.tfidf_vectorizer.get_feature_names())}
return self
def transform(self, X, y=None, eps=1e-12):
X = [self.vectorize(t) for t in X['tokens']]
X = np.array(X)
if self.normalize:
X = X / np.linalg.norm(X + eps, axis=1, keepdims=True)
return X
vectorizer = Vectorizer(agg_method='tfidf')
X = vectorizer.fit_transform(dataset)
X.shape
```
### Classification
On compare deux algorithmes de classification : Une régression logistique et un SVM ou l'on pénalise les classes majoritaires.
```
X_train, X_test = train_test_split(
dataset, test_size=0.33, random_state=42)
y_train = X_train[['happy', 'joke', 'astonished', 'angry', 'bored', 'heart']].astype(int).values
y_train = [x.tolist().index(1) for x in y_train]
y_test = X_test[['happy', 'joke', 'astonished', 'angry', 'bored', 'heart']].astype(int).values
y_test = [x.tolist().index(1) for x in y_test]
len(y_train)
X_train.shape
LogReg_pipeline = Pipeline([
('vect', Vectorizer('tfidf')),
('clf', OneVsRestClassifier(LogisticRegression(solver='sag'))),
])
# Training logistic regression model on train data
LogReg_pipeline.fit(X_train, y_train)
# Infering data on test set
prediction_LogReg = LogReg_pipeline.predict(X_test)
SVC_pipeline = Pipeline([
('vect', Vectorizer('tfidf')),
('clf', OneVsRestClassifier(SVC(kernel='linear',
class_weight='balanced', # penalize
probability=True), n_jobs=-1))
])
SVC_pipeline.fit(X_train, y_train)
prediction_SVC = SVC_pipeline.predict(X_test)
```
### Evaluation
```
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from plots import plot_confusion_matrix
print('Test accuracy is {}'.format(accuracy_score(y_test, prediction_SVC)))
print('Test ROC socre is {}'.format(roc_auc_score(np.eye(np.max(y_test) + 1)[y_test],
SVC_pipeline.predict_proba(X_test),
multi_class='ovo')))
plot_confusion_matrix(confusion_matrix(y_test, prediction_SVC),
classes=class_names,
title='Confusion matrix, without normalization')
print('Test accuracy is {}'.format(accuracy_score(y_test, prediction_LogReg)))
print('Test ROC socre is {}'.format(roc_auc_score(np.eye(np.max(y_test) + 1)[y_test],
LogReg_pipeline.predict_proba(X_test),
multi_class='ovo')))
plot_confusion_matrix(confusion_matrix(y_test, prediction_LogReg),
classes=class_names,
title='Confusion matrix, without normalization')
```
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Quelle mesure de performance vous semble le plus adaptée pour ce cas d'usage ?</p>
</div>
<hr>
La mesure de performance dépend évidemment du contexte d'évaluation du cas d'usage. Par exemple, si l'on est dans un cas de classification binaire ou l'on cherche à distinguer des spams d'emails normaux, les spams représenteront peut être 5% du jeu de données. Un algorithme qui prédirait toujours "not spam" aurait une précision de 95%, ce qui est évidemment inutile. Dans notre cas, le jeu de données est légèrement déséquilibré. Quand on observe les résultats précédent, la régression logistique obtient une meilleure [précision](https://scikit-learn.org/stable/modules/model_evaluation.html#accuracy-score) (37,3%) que le SVM (32,5%). Mais les matrices de confusions révèlent que pour la régression logistique, les classes minoritaires (astonished et bored) ne sont jamais prédite. La précision ne traduit donc pas ce phénomène.
A l'inverse, AUC (Area Under the Curve) correspond comme son nom l'indique à l'aire sous la courbe [ROC](https://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics). Cette métrique traduit mieux la capacité des méthodes à séparer les classes puisqu'elle est meilleure pour le SVM (0.684) que pour la régression logistique (0.677). L'utilisation de l'AUC écarte les modèles représentatifs, mais pas discriminants. Dans notre cas d'usage, la mesure ROC semble ainsi plus adaptée.
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Comparer les résultats obtenus avec les deux algorithmes de classifications</p>
</div>
<hr>
Comme on l'a mentionné précédemment, la régression logistique semble prioriser les classes majoritaires pour obtenir une meilleure précision globale. Cette dernière est beaucoup plus performante que le SVM pour prédire les classes "happy" ou "joke". Dans le SVM, on a appliqué un pénalité selon la distribution du jeu de données (paramètre `class_weight='balanced'`) et ainsi on prédit les classes minoritaires. Chacune des classes indépendamment semble mieux modélisée et on voit ressortir la diagonale qui caractérise ce comportement.
Quand on observe le carré 2x2 dans le coin supérieur gauche de la matrice de confusion, on constate que les deux modèles semblent avoir du mal à distinguer les deux classes "happy" et "joke". Ces humeurs sont sans doutes trop proches et difficiles à distinguer.
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Comparer les différentes méthodes d'agrégation proposées. (Mean, Max, Sum, Moyenne pondérée par le TF-IDF)</p>
</div>
<hr>
```
test_svc = {'acc': {}, 'roc': {}}
test_logreg = {'acc': {}, 'roc': {}}
for agg_method in ['mean', 'max', 'sum', 'tfidf']:
print('Computing agg method: {}'.format(agg_method))
LogReg_pipeline = Pipeline([
('vect', Vectorizer(agg_method)),
('clf', OneVsRestClassifier(LogisticRegression(solver='sag'))),])
SVC_pipeline = Pipeline([
('vect', Vectorizer(agg_method)),
('clf', OneVsRestClassifier(SVC(kernel='linear',
class_weight='balanced', # penalize
probability=True), n_jobs=-1))])
SVC_pipeline.fit(X_train, y_train)
prediction_SVC = SVC_pipeline.predict(X_test)
LogReg_pipeline.fit(X_train, y_train)
prediction_LogReg = LogReg_pipeline.predict(X_test)
test_svc['acc'][agg_method] = accuracy_score(y_test, prediction_SVC)
test_svc['roc'][agg_method] = roc_auc_score(np.eye(np.max(y_test) + 1)[y_test],
SVC_pipeline.predict_proba(X_test),
multi_class='ovo')
test_logreg['acc'][agg_method] = accuracy_score(y_test, prediction_LogReg)
test_logreg['roc'][agg_method] = roc_auc_score(np.eye(np.max(y_test) + 1)[y_test],
LogReg_pipeline.predict_proba(X_test),
multi_class='ovo')
print('Test accuracy for SVC with agg method {} is {}'.format(agg_method, test_svc['acc'][agg_method]))
print('Test ROC score for ROC with agg method {} is {}'.format(agg_method, test_svc['roc'][agg_method]))
x_svc = [v for (k, v)in test_svc['acc'].items()]
y_svc = [v for (k, v)in test_svc['roc'].items()]
x_logreg = [v for (k, v)in test_logreg['acc'].items()]
y_logreg = [v for (k, v)in test_logreg['roc'].items()]
labels = ['svc', 'svc', 'svc', 'svc', 'logreg', 'logreg', 'logreg', 'logreg']
colors = ['#970137', '#970137', '#970137', '#970137', 'black', 'black', 'black', 'black']
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter(x_svc, y_svc, color='#970137', label='balanced svc')
for i, agg in enumerate(['mean', 'max', 'sum', 'tfidf']):
label = "{}".format(agg)
plt.annotate(label, (x_svc[i], y_svc[i]),
textcoords="offset points", xytext=(10,10),
ha='center', rotation=0, fontsize=15, fontweight='black')
ax.scatter(x_logreg, y_logreg, color='black', label='logreg')
for i, agg in enumerate(['mean', 'max', 'sum', 'tfidf']):
label = "{}".format(agg)
plt.annotate(label, (x_logreg[i], y_logreg[i]),
textcoords="offset points", xytext=(10,10),
ha='center', rotation=0, fontsize=15, fontweight='black')
ax.set_xlabel('Accuracy')
ax.set_ylabel('ROC')
ax.set_title('Method comparison')
ax.legend(loc=4)
plt.show();
```
On peut comparer les méthodes d'aggrégation et l'algorithme de classification en fonction de la mesure de précision ou ROC. On retrouve ce qu'on a observé précédemment : `LogReg` obtient de meilleures précisions, et `SVM` obtient de meilleurs ROC (sauf dans le cas de la méthode `max`). Quelque soit l'algorithme de classification ou la mesure de performance, la méthode d'aggrégation `max` semble être la moins adaptée (la précision et les scores ROC sont moins bons). En considérant le score ROC comme la mesure de performance la plus adaptée, on peut dire que les différentes méthodes d'aggrégation ont des scores proches.
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice (Bonus) :</b> Comparer les résultats obtenus avec un réseau de neurones récurent (RNN).</p>
</div>
<hr>
Nous ferons une ouverture sur les réseaux de neurones et leur utilisation pour le texte lors de la dernière séance. Néanmoins, cela peut être une bonne occasion de se familiariser avec leur utilisation. Nous allons utiliser la librairie [skorch](https://github.com/skorch-dev/skorch) qui est un wrapper de la librairie [PyTorch](https://pytorch.org/) compatible avec [scikit-learn](https://scikit-learn.org/). Cela permet en particulier de simplifier les aspects d'optimisation. Ici on utilise un réseau récurent de type LSTM <span class="badge badge-secondary">([Cho and al., 2014](#cho-2014)</span>, <span class="badge badge-secondary">[Hochreiter and Schmidhuber, 1997](#schmidhuber-1997))</span>. Les réseaux de neurones récurrents modélisent les phrases comme des séquences d’embeddings de mots. Ils traitent l’entrée séquentiellement. A chaque étape, le vecteur de sortie est calculé en fonction de l’embedding du mot courant et de l’état caché précédent.
<img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/lstm.png?raw=True" width="700">
```
import torch
from torch import nn
from skorch import NeuralNet, NeuralNetClassifier
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from sklearn.utils.class_weight import compute_class_weight
class RNNClassifier(nn.Module):
def __init__(self, n_classes, embeddings_weights,
hidden_dim=100, embedding_dim=300, dropout=0.5):
super(RNNClassifier, self).__init__()
self.embeddings = nn.Embedding.from_pretrained(embeddings_weights, sparse=True)
self.embeddings.weight.requires_grad = False
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
self.dense = nn.Linear(hidden_dim, n_classes)
self.dropout = dropout
self.drop = nn.Dropout(self.dropout)
def forward(self, X, **kwargs):
X, X_len = X
X = self.embeddings(X)
# On utilise une méthode de pytorch pour tenir compte de la longueur des phrases
# et ainsi s'adapter au padding.
X_packed = pack_padded_sequence(X, X_len, batch_first=True, enforce_sorted=False)
X_packed, (h, c) = self.lstm(X_packed)# [1][0] # .transpose(0, 1)
# https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html#torch.nn.LSTM
X, output_lengths = pad_packed_sequence(X_packed, batch_first=True)
# X = torch.sigmoid(X)
out = F.softmax(self.dense(h.squeeze()), dim=-1)
return out
class_weights = compute_class_weight(
'balanced', classes=range(len(class_names)), y=y_train)
# On va donner un poids plus important aux classes minoritaires
# mais pas proportionnel à leur distribution pour ne pas trop les favoriser
# au détriment de la précision globale
class_weights = [1, 1, 1.3, 1, 1.3, 1]
class_weights = torch.tensor(class_weights, dtype=torch.float)
net = NeuralNetClassifier( # NeuralNet
RNNClassifier(len(class_names), torch.tensor(word2vec_embeddings)),
max_epochs=10,
lr=0.001,
optimizer=torch.optim.Adam,
criterion=torch.nn.NLLLoss,
criterion__weight=class_weights
)
sequences = [torch.tensor([w2idx.get(t, 0) for t in tokens]) for tokens in X_train['tokens']]
sequences_length = torch.tensor([len(s) for s in sequences])
# On "pad" les séquences pour qu'elles aient toutes la même longueur.
padded_sequences = pad_sequence(sequences, batch_first=True, padding_value=1)
net.fit((padded_sequences, sequences_length), torch.tensor(y_train))
sequences_test = [torch.tensor([w2idx.get(t, 0) for t in tokens]) for tokens in X_test['tokens']]
sequences_test_length = torch.tensor([len(s) for s in sequences_test])
# On "pad" les séquences pour qu'elles aient toutes la même longueur.
padded_sequences_test = pad_sequence(sequences_test, batch_first=True, padding_value=1)
prediction_LSTM = net.predict((padded_sequences_test, sequences_test_length))
print('Test accuracy is {}'.format(accuracy_score(y_test, prediction_LSTM)))
print('Test ROC socre is {}'.format(roc_auc_score(np.eye(np.max(y_test) + 1)[y_test],
net.predict_proba((padded_sequences_test, sequences_test_length)),
multi_class='ovo')))
plot_confusion_matrix(confusion_matrix(y_test, prediction_LSTM),
classes=class_names,
title='Confusion matrix, without normalization')
```
Dans ce cas, l'apport des réseaux de neurones n'est pas évident. Nénamoins en règle générale, les méthodes de Deep Learning sont plus performantes, en particulier quand le nombre de données augmente.
L'utilisation des méthodes de down-sampling ou up-sampling peut s'avérer fastidieux (on va se priver de données ou en utiliser d'autres plusieurs fois. La sélection des données doit se faire précisémment pour ne pas impacter les capacités de généralisation de l'algorithme). Nous avons préféré ici utiliser un algorithme qui pénalise les classes majoritaires et une mesure d'erreur adaptée. Il existe un bon article de blog pour gérer les classes déséquilibrées : https://elitedatascience.com/imbalanced-classes.
On peut se faire une idée des limites et des points fort de l'algorithme en regardant des prédictions.
```
humors = ['happy', 'astonished', 'bored', 'angry', 'joke', 'heart']
meta_smiley = [b'\xF0\x9F\x98\x83'.decode("utf-8"),
b'\xF0\x9F\x98\xB2'.decode("utf-8"),
b'\xF0\x9F\x98\x94'.decode("utf-8"),
b'\xF0\x9F\x98\xA0'.decode("utf-8"),
b'\xF0\x9F\x98\x86'.decode("utf-8"),
b'\xF0\x9F\x98\x8D'.decode("utf-8")]
humor_2_emoji = {h: ms for (h, ms) in zip(humors, meta_smiley)}
X_test.shape
for _ in range(10):
idx = np.random.randint(0, len(X_test))
emojis = humor_2_emoji[class_names[prediction_SVC[idx]]]
true_emojis = humor_2_emoji[class_names[y_test[idx]]]
print(X_test['sms'].values[idx], '(Pred)', emojis, '(True)', true_emojis, '\n')
```
### Visualisation
On peut aussi essayer de visualiser plus globalement les représentations. Pour ça on peut utiliser des algorithmes de réduction de dimension pour visualiser nos données. On a déjà parlé de UMAP et t-SNE. De manière intutive, l'algorithme projete les représentations dans un espace de plus faible dimension en s'efforcant de respecter les distances entre les points entre l'espace de départ et d'arrivée. Il permet de visualiser facilement les données. On va utiliser l'outil `Tensorboard` qui intègre les principales méthodes de réduction de dimensions.
```
from pathlib import Path
from PIL import Image
import os
from os import listdir
from os.path import isfile, join
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
import torch
import tensorflow as tf
import tensorboard as tb
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
pil_img = Image.open('./smileys/happy.png').convert('RGB')
pil_img = pil_img.resize((100, 100))
smileys_images = [f for f in listdir('./smileys') if isfile(join('./smileys', f))]
imgs_tb = {}
for s in smileys_images:
pil_img = Image.open(os.path.join('smileys', s)).convert('RGB')
pil_img = pil_img.resize((25, 25))
pil_to_tensor = transforms.ToTensor()(pil_img).unsqueeze_(0)
imgs_tb[Path(os.path.join('smileys', s)).stem] = pil_to_tensor
writer_embeddings = SummaryWriter(log_dir=os.path.join("./tfb/"))
vectorizer = Vectorizer(agg_method='tfidf', normalize=True)
emb_test = vectorizer.fit_transform(X_test)
writer_embeddings.add_embedding(torch.tensor(emb_test),
metadata=[(r, s, l) for (r, s, l) in zip(
X_test['sms'].values,
[humor_2_emoji[class_names[y]] for y in y_test],
[humor_2_emoji[class_names[y]] for y in prediction_SVC])
],
label_img=torch.cat([imgs_tb[class_names[y]] for y in y_test]),
metadata_header=['sms','label', 'prediction'],
tag="SMS-EMB-CLS")
```
Pour visualiser les représentations, lancer un tensorboard. Dans un terminal, se placer dans le dossier ou est éxécuté le notebook et exécuter:
```
tensorboard --logdir ./tfb/
```
Dans **Colab** on va lancer le tensorboard directement dans le notebook en éxécutant les cellules suivante :
```
%load_ext tensorboard
```
```
%tensorboard --logdir ./tfb/
```
```
# Load the TensorBoard notebook extension
%load_ext tensorboard
from tensorboard import notebook
notebook.list() # View open TensorBoard instances
# Control TensorBoard display. If no port is provided,
# the most recently launched TensorBoard is used
notebook.display(port=6006, height=1000);
```
<hr>
<div class="alert alert-info" role="alert">
<p><b>📝 Exercice :</b> Utiliser les méthodes UMAP, PCA et t-SNE pour projeter les données. Comparez les différentes méthodes de projections et interprétez qualitativement les propriétés de vos représentations.</p>
</div>
<hr>
A première vue, il est plus difficile d'analyser les projections des sms que celle des embeddings de mots. En effet quelque soit la méthode de projection, les documents sont moins bien séparés et l'analyse semble moins directe. On observe cependant des différences entre les méthodes de projection.
De manière générale, les sms ne sont pas forcément bien séparés. Il semblerait que les clusters s'expliquent généralement pour des raisons qui sont indépendantes de la sémantique. Par exemple, les messages avec des noms déidentifiés (étiquette \[_forename_\]), les messages courts, avec un recoupement lexical important: "bonne chance" et "bon courage" ou encore avec ou des horaires des durées.
Finalement, il est intéressant de voir que l'on peut jouer sur les hyper-paramètres. Pour le t-SNE, on peut ajouter un degré de supervision qui permet d'améliorer la définition des clusters en fonction des labels. Pour UMAP on peut faire évoluer le nomre de voisins et ainsi la forme du nuage de point. Par ailleurs, UMAP semble légèrement plus rapide en terme de temps de calcul et de convergence des projections.
La compatibilité entre Jupyter/Colab et Tensorboard est un parfois instable (c.f. https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks). Si vous êtes sur Colab, vous pouvez télécharger le dossier directement sur votre ordinateur. Téléchargez le .zip, sur votre ordinateur, dezipé le.
```
!zip -r tfb.zip ./tfb/
```
Sur votre ordinateur, dans un terminal, se placer dans le dossier ou est le notebook et exécuter:
```
tensorboard --logdir ./tfb/
```
Vous devriez avoir un visuel comme ci-dessous. Vous pouvez cliquer sur un sms et vous avez à droite les sms les plus proches en terme de distance cosine comme nous l'avons fait pour word2vec. Par ailleurs chaque sms est représenté par le smiley correspondant. Vous pouvez faire varier les méthodes de projection dans le panneau de gauche.
<img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/tfb-viz.png?raw=True" width="1000">
## 📚 References
> <div id="panckhurst-2016">Panckhurst, Rachel, et al. <a href=https://hal.archives-ouvertes.fr/hal-01485560> 88milSMS. A corpus of authentic text messages in French.</a> Banque de corpus CoMeRe. Chanier T.(éd)-Ortolang: Nancy (2016).</div>
> <div id="schmidhuber-1997">Sepp Hochreiter, Jürgen Schmidhuber. <a href=https://dl.acm.org/doi/10.1162/neco.1997.9.8.1735> Long Short-Term Memory.</a> Neural Comput. 9(8): 1735-1780 (1997).</div>
> <div id="cho-2014">Kyunghyun Cho, Bart van Merrienboer, Çaglar Gülçehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, Yoshua Bengio: <a href=https://doi.org/10.3115/v1/d14-1179> Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation.</a> EMNLP 2014: 1724-1734.</div>
| github_jupyter |
Welcome to the colab notebook for [GPTNeo](https://github.com/EleutherAI/GPTNeo) - a fully open source implementation of GPT like models for mesh-tensorflow by [EleutherAI](eleuther.ai).
Our library provides training and inference for GPT models up to GPT3 sizes on both TPUs and GPUs.
In this notebook we walk you through TPU training (or finetuning!) and sampling using the freely available colab TPUs.
If you find our repo useful, come join [our discord](https://discord.gg/BK2v3EJ) and say hi! 😬
Before we get going - make sure you are running this notebook with a TPU available. Go to Runtime -> Change Runtime Type and select 'TPU' under hardware accelerator.
```
%%bash
cd /content/GPTNeo
rm -rf GPTNeo
ls -l
pretrained_model = None
dataset = None
#@title Setup
%tensorflow_version 2.x
!git clone https://github.com/EleutherAI/GPTNeo
%cd GPTNeo
!pip3 install -q -r requirements.txt
pretrained_model = None
dataset = None
```
## Set Up Google Cloud
To train on TPUs we need to store our data on a google cloud bucket - as TPUs can't read from local filesystems.
You can set up a bucket by signing up for a free trial here: https://console.cloud.google.com/
Make a bucket at https://console.cloud.google.com/storage and come back when that's done.
Make sure to select 'Uniform' access control when setting up the bucket, or the colab notebook won't have the required permissions to read from it.
The next cell sets up google authentication and gives the notebook read and write access to your bucket.
https://github.com/google-research/text-to-text-transfer-transformer/issues/318
```
!pip install -q t5 tensorflow-text==2.2
```
```
from google.colab import auth
auth.authenticate_user()
!gcloud init
path_to_cloud_bucket = 'gs://terraformgenerator/ml/GPTNeo' #@param {type:"string"}
```
## Set Up Dataset
We first need to download and tokenize a dataset. If you just want to sample from a pretrained model, you can skip this step and move on to the `Pretrained Model` section.
You can choose from:
* Sampling Only - choose this option if you only wish to sample from our trained models, then move on to the `Pretrained Model` section.
* OpenWebText - an opensource clone of OpenAI's WebText dataset, the original training data of GPT2.
* YoutubeSubtitles - a dataset of subtitles scraped from youtube videos.
* Hackernews - comments scraped from hackernews
* NIHExporter - Data relating to various projects from the national institute of health.
* Custom - if this option is chosen you will be prompted to enter the path to your own dataset. It should be a directory containing .txt or .jsonl files.
All these datasets are from EleutherAI's side project - [The Pile™](https://github.com/EleutherAI/The-Pile) - an effort to gather a general purpose, diverse and open source plain text dataset large enough to train 1T+ parameter language models.
Even the smallest datasets are fairly large files, so this step will likely take a while. Select a dataset in the next cell, then run the next two cells, and go grab a snack and a cup of tea 😊
Alternatively, you can provide your own dataset in the form of a folder or gzip archive of .txt files. Simply select 'Custom' below and follow input the path to your data and the name of your dataset when prompted.
```
# Select a Dataset:
import os
dataset = 'Sampling_Only' #@param ["Sampling_Only", "OpenWebText", "YoutubeSubtitles", "HackerNews", "NIHExporter", "Custom"]
if dataset == "Sampling_Only":
pass
elif dataset == 'OpenWebText':
!wget https://the-eye.eu/public/AI/pile_preliminary_components/openwebtext2.jsonl.zst.tar -O openwebtext.tar.xz
!tar xf openwebtext.tar.xz
dataset_path = "openwebtext"
dataset_name = dataset_path
out_name = dataset_name + "_tokenized"
elif dataset == 'YoutubeSubtitles':
os.makedirs('data', exist_ok=True)
!wget https://the-eye.eu/public/AI/pile_preliminary_components/yt_subs.jsonl.zst -O data/yt_subs.jsonl.zst
dataset_path = 'data'
dataset_name = 'ytsubs'
out_name = dataset_name + "_tokenized"
elif dataset == 'HackerNews':
os.makedirs('data', exist_ok=True)
!wget https://the-eye.eu/public/AI/pile_preliminary_components/hn.tar.gz -O data/hn.tar.gz
dataset_path = 'data'
dataset_name = 'hackernews'
out_name = dataset_name + "_tokenized"
elif dataset == "NIHExporter":
os.makedirs('data', exist_ok=True)
!wget https://the-eye.eu/public/AI/pile_preliminary_components/NIH_ExPORTER_awarded_grant_text.jsonl.zst -O data/NIH_ExPORTER_awarded_grant_text.jsonl.zst
dataset_path = 'data'
os.system('mv NIH_ExPORTER_awarded_grant_text.jsonl.zst ./data')
dataset_name = 'nihexporter'
out_name = dataset_name + "_tokenized"
elif dataset == "Custom":
dataset_path = input('Enter the path to the folder containing your data: ')
dataset_name = input('Enter the name of your dataset: ')
out_name = dataset_name + "_tokenized"
else:
raise NotImplementedError('please select from available options: ["OpenWebText", "YoutubeSubtitles", "HackerNews", "NIHExporter", "Custom"]')
%%bash
echo $dataset
cd GPTNeo
pwd
```
### Tokenize and Upload Data
Now tokenize the dataset and copy it over to your google cloud bucket. You may skip this step if you are sampling from a pre-trained model.
```
!cd GPTNeo
!pwd
# Tokenize Data
!python data/create_tfrecords.py --input_dir /content/GPTNeo/$dataset_path --name $dataset_name --files_per 1000 --output_dir $out_name --write_dataset_config --processes 1
# copy the data to your bucket
if not path_to_cloud_bucket.endswith('/'):
path_to_cloud_bucket += '/'
copy_loc = path_to_cloud_bucket + "datasets/" + dataset
!gsutil -m cp -r /content/GPTNeo/$out_name $copy_loc
!gsutil ls $path_to_cloud_bucket
```
Before starting training - you'll need to edit your dataset & model configs to point to your buckets / data. You need to do this even if you are sampling from a pre-trained model.
* First change the writefile path to point to your chosen dataset - e.g `%%writefile configs/dataset_configs/ytsubs.json`
* Change the "path" field to point to your cloud bucket location - e.g `gs://neo_lmdatasets/datasets/ytsubs_*.tfrecords`
* Change `dataset_name` in `%%writefile configs/dataset_configs/dataset_name.json` to the name of your chosen dataset.
* Once you've made the edits, then run the cell below to overwrite the existing files.
```
%%writefile configs/dataset_configs/Sampling_Only.json
{
"path": "gs://eleutherai/datasets/Sampling_Only/Sampling_Only*.tfrecords",
"eval_path": "",
"n_vocab": 50256,
"tokenizer_is_pretrained": true,
"tokenizer_path": "gpt2",
"eos_id": 50256,
"padding_id": 50257
}
```
## Set Model Configs
The model below is identical to our pretrained GPT3XL model (1.3B Params).
If you want to use a smaller model, you can modify any of the config files in ../configs/ ending in _8.json, all of which are designed to train on tpu-v8s.
For a more detailed breakdown on what each item in the configuration file means - please read through our training and config guides in our [github README](https://github.com/EleutherAI/GPTNeo#training-guide).
You'll want to change the first item in the `datasets` list to the name of your chosen dataset. (the filename minus .json in ./configs/dataset_configs)
You'll also want to modify the `model_path` field to point to your google cloud bucket, so checkpoints get saved to there.
```
%%writefile configs/GPT3_XL.json
{
"n_head": 16,
"n_vocab": 50257,
"embed_dropout": 0,
"lr": 0.0002,
"lr_decay": "cosine",
"warmup_steps": 3000,
"beta1": 0.9,
"beta2": 0.95,
"epsilon": 1e-8,
"opt_name": "adam",
"weight_decay": 0,
"train_batch_size": 256,
"attn_dropout": 0,
"train_steps": 600000,
"eval_steps": 0,
"predict_steps": 1,
"res_dropout": 0,
"eval_batch_size": 4,
"predict_batch_size": 1,
"iterations": 100,
"n_embd": 2048,
"datasets": [["pile", null, null, null]],
"model": "GPT",
"model_path": "gs://eleutherai/GPT3_XL",
"n_ctx": 2048,
"n_layer": 24,
"scale_by_depth": true,
"scale_by_in": false,
"attention_types" : [[["global", "local"],12]],
"mesh_shape": "x:4,y:2",
"layout": "intermediate_expanded:x,heads:x,vocab:n_vocab,memory_length:y,embd:y",
"activation_function": "gelu",
"recompute_grad": true,
"gradient_clipping": 1.0,
"tokens_per_mb_per_replica": 2048,
"precision": "bfloat16"
}
```
## Training from Scratch
Now we will begin to train the model. If no previous model is found in "model_path", the model will start training from scratch. If you'd prefer to finetune from pretrained, skip to the `Finetune a Pretrained Model` section.
If everything's set up correctly, you can now run the main.py function to start training!
```
!python3 main.py --model colab_XL --steps_per_checkpoint 500 --tpu colab
```
## Pretrained Model
If you want to sample from or finetune a pretrained model, EleutherAI has pretrained two models for release. One with [1.3B parameters](https://the-eye.eu/public/AI/gptneo-release/GPT3_XL/), and another with [2.7B](https://the-eye.eu/public/AI/gptneo-release/GPT3_2-7B/).
Select an option below to download the weights locally. You will then need to upload them to your cloud bucket in order to finetune from them. If the download command isn't working, try the commented out code to download from a different source.
The 2-7B model likely won't fit into the colab TPUs memory, and you may have to get some larger pods to finetune from it.
Sampling from it, however, works just fine.
```
# @title Download pretrained model weights:
pretrained_model = 'GPT3_2-7B' #@param ["GPT3_XL", "GPT3_2-7B"]
!wget -m -np -c -U "eye02" -w 2 -R "index.html*" "https://the-eye.eu/public/AI/gptneo-release/$pretrained_model/"
path_to_local_weights = f"/content/GPTNeo/the-eye.eu/public/AI/gptneo-release/{pretrained_model}"
# URL = f"http://eaidata.bmk.sh/data/gptneo-release/{pretrained_model}/"
# FOLDER_NAME = "GPT3_XL"
# !curl $URL | grep -i "</a>" | sed -n 's/.*href="\([^"]*\).*/\1/p' | sed "s|^|$URL|" | xargs -n 1 -P 4 wget -P $pretrained_model
# path_to_local_weights = pretrained_model
# upload to your bucket
bucket_base = "gs://" + path_to_cloud_bucket.replace('gs://', '').split('/')[0]
!gsutil -m cp -r $path_to_local_weights $bucket_base
```
If everything has worked successfully you should now see your model listed in your bucket below.
```
!gsutil ls $bucket_base
```
Now we want to make a few modifications to the model config in order to get training / sampling working on colab.
If you are just sampling from our pretrained models, you can leave the settings as is, run the cell below, then move on to the `Sample from your model` section.
If finetuning, you can change parameters below.
* `path_to_model` should point to the model weights location in your cloud bucket, and will default to `$bucket_base/${pretrained_model}` if nothing is entered.
* `batch_size` is your train batch size - if you're encountering memory errors, try lowering this.
* `dataset_name` is the name of your dataset, if nothing is entered, this should default to the dataset you selected in the `Prepare Data` section.
* `mesh_shape` specifies the way the model will be divided up across the TPU cores. We suggest leaving this alone unless you know what you're doing.
* `train_steps` specifies how many steps you want the model to finetune for. We set this to 1000 for demonstrative purposes but you may need to increase this a little depending on your goals. If you are just sampling from the model, you can leave this as is.
* `steps_per_checkpoint` specifies how often you want to save model weights during training.
```
# @title Modify config for colab.
import json
from pprint import pprint
path_to_model = "" #@param {type:"string"}
batch_size = 8 #@param {type:"integer"}
dset = "" #@param {type:"string"}
mesh_shape = "x:4,y:2" #@param {type:"string"}
train_steps = 1000 #@param {type:"integer"}
steps_per_checkpoint = 500 #@param {type:"integer"}
start_step = 400000 if pretrained_model == "GPT3_2-7B" else 362000
if path_to_model == "":
path_to_model = f'{bucket_base.strip("/")}/{pretrained_model}'
print(f'MODEL PATH: {path_to_model}\n')
if dset == "" and dataset != "Sampling_Only":
dset = dataset
elif dataset is None and dset == "":
dset = "pile"
def pad_to_multiple_of(n, mult):
"""
pads n to a multiple of mult
"""
extra = n % mult
if extra > 0:
n = n + mult - extra
return n
with open(f'{path_to_local_weights}/config.json', 'r') as f:
data = json.load(f)
pprint(data)
dset_val = [[dset, None, None, None]] if dset != "" else data["datasets"]
mods = {
"mesh_shape": mesh_shape,
"layout": "intermediate_expanded:x,heads:x,memory_length:y,embd:y",
"model_path": path_to_model,
"datasets": dset_val,
"train_steps": start_step + train_steps,
"eval_steps": 0,
"train_batch_size": batch_size,
"predict_batch_size": batch_size
}
data.update(mods)
print('\n--->\n')
pprint(data)
with open(f'configs/{pretrained_model}.json', 'w') as outfile:
json.dump(data, outfile, indent=2)
```
### Begin Fine-Tuning
If you are fine-tuning the pretrained model, this line of code will begin the training.
```
!python3 main.py --model $pretrained_model --steps_per_checkpoint $steps_per_checkpoint --tpu colab
```
### Sample from your model
Once training is finished, (or your pretrained model is on your bucket), you can run the same command with the --predict flag to sample from your model.
To pass in a prompt, save it to a .txt file, and pass in the name of the file with the --prompt flag.
use the cell below to enter your prompt, and run it to save it to example_prompt.txt.
You may need to decrease the predict batch size in your config if you're facing OOM errors.
Let's see if the GPTNeo model can finish coding itself, with a sample prompt consisting of the beginning of a `torch.nn.Module`:
```
%%writefile example_prompt.txt
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
!python3 main.py --model $pretrained_model --steps_per_checkpoint 500 --tpu colab --predict --prompt example_prompt.txt
```
# Evaluating the model
This section assumes you are using a pretrained model and relies on variables created in the `Pretrained model` section.
## Wikitext
Download the wikitext test set:
```
wikitext103_src = "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
!wget $wikitext103_src
!unzip wikitext-103-raw-v1.zip
```
Tokenize and upload to bucket:
```
!mkdir wikitext
!mv /content/GPTNeo/wikitext-103-raw/wiki.test.raw wikitext/wikitext_test.txt
# Tokenize Data
!python data/create_tfrecords.py --input_dir wikitext --name wikitext --files_per 1000 --output_dir wikitext_tokenized --write_dataset_config --processes 1 --wikitext-detokenize
# copy the data to your bucket
if not path_to_cloud_bucket.endswith('/'):
path_to_cloud_bucket += '/'
copy_loc = path_to_cloud_bucket
!gsutil -m cp -r wikitext_tokenized $copy_loc
!gsutil ls $path_to_cloud_bucket
```
Now make a dataset config that points to the tokenized wikitext data:
```
%%writefile configs/dataset_configs/wikitext.json
{
"path": "",
"eval_path": "gs://terraformgenerator/ml/GPTNeo/wikitext_tokenized/*.tfrecords",
"n_vocab": 50256,
"tokenizer_is_pretrained": true,
"tokenizer_path": "gpt2",
"eos_id": 50256,
"padding_id": 50257
}
```
And update your model config to point to that dataset:
```
# @title Modify config for wikitext.
import json
from pprint import pprint
batch_size = 8 #@param {type:"integer"}
assert pretrained_model is not None
with open(f'configs/{pretrained_model}.json', 'r') as f:
data = json.load(f)
pprint(data)
dset_val = [["wikitext", None, None, None]]
mods = {
"datasets": dset_val,
"eval_steps": 139 // batch_size,
"train_batch_size": batch_size,
"eval_batch_size": batch_size,
}
data.update(mods)
print('\n--->\n')
pprint(data)
with open(f'configs/{pretrained_model}.json', 'w') as outfile:
json.dump(data, outfile, indent=2)
```
Now run model in eval mode over tokenized data:
```
!python3 main.py --eval --tpu colab --model $pretrained_model
```
## Lambada
Lambada eval is built into the codebase and can be run by adding a field to your model config
```
# @title Modify config for Lambada.
import json
from pprint import pprint
batch_size = 8 #@param {type:"integer"}
assert pretrained_model is not None
with open(f'configs/{pretrained_model}.json', 'r') as f:
data = json.load(f)
mods = {
"datasets": dset_val,
"eval_steps": 0,
"train_batch_size": batch_size,
"eval_batch_size": batch_size,
"eval_tasks": ["lambada"]
}
data.update(mods)
print('\n--->\n')
pprint(data)
with open(f'configs/{pretrained_model}.json', 'w') as outfile:
json.dump(data, outfile, indent=2)
```
Now run the eval:
```
!python3 main.py --eval --tpu colab --model $pretrained_model
```
| github_jupyter |
<a href="https://colab.research.google.com/github/gemenerik/RTSR4k/blob/master/RTSR4k.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<p><img alt="Colaboratory logo" height="45px" src="/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px"></p>
<h1>RTSR4k</h1>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent tincidunt pretium enim. Nunc fermentum malesuada nibh non scelerisque. Praesent et erat et ligula iaculis elementum vitae ac odio. Fusce feugiat mollis aliquam. Curabitur tristique dolor ut orci volutpat gravida. Nunc malesuada tempor mauris, eget posuere ante consectetur ac. Ut maximus justo nisi, ut eleifend odio posuere sed. Ut dolor urna, ullamcorper ut turpis sit amet, finibus ultricies massa. Sed sollicitudin nisl id libero ullamcorper suscipit.
Nullam auctor leo tellus, nec viverra massa malesuada quis. Nunc euismod nisl euismod, dignissim quam eu, lobortis mauris. Nunc efficitur facilisis viverra. Fusce at arcu non risus lacinia vestibulum. Nunc nec condimentum lorem. Pellentesque laoreet neque et metus luctus, ac finibus massa iaculis. Ut eu pulvinar ex, in dignissim ipsum. Etiam vitae lacinia felis. Praesent ornare dolor eget euismod sollicitudin.
### Pull from GitHub
```
!sudo rm -rf RTSR4k
!git clone https://github.com/gemenerik/RTSR4k
import os
os.chdir('RTSR4k/data')
os.mkdir('val')
os.chdir('..')
```
### Tweakable settings
```
UPSCALE_FACTOR = 3 #@param {type:"slider", min:1, max:5, step:1}
GAUSSIAN_BLUR_RADIUS = 0.2 #@param {type:"slider", min:0, max:1, step:0.1}
```
### Data processing
```
import argparse
import os
from os import listdir
from os.path import join
from PIL import Image, ImageFilter
from torch.utils.data.dataset import Dataset
from torchvision.transforms import Compose, CenterCrop, Scale
from tqdm import tqdm
def is_image_file(filename):
return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.JPG', '.JPEG', '.PNG', '.bmp'])
def is_video_file(filename):
return any(filename.endswith(extension) for extension in ['.mp4', '.avi', '.mpg', '.mkv', '.wmv', '.flv'])
def calculate_valid_crop_size(crop_size, upscale_factor):
return crop_size - (crop_size % upscale_factor)
def blur(img):
img2 = img.filter(ImageFilter.GaussianBlur(GAUSSIAN_BLUR_RADIUS))
return img2
def input_transform(crop_size, upscale_factor):
return Compose([
CenterCrop(crop_size),
Scale(crop_size // upscale_factor, interpolation=Image.BICUBIC), blur
])
def target_transform(crop_size):
return Compose([
CenterCrop(crop_size)
])
class DatasetFromFolder(Dataset):
def __init__(self, dataset_dir, upscale_factor, input_transform=None, target_transform=None):
super(DatasetFromFolder, self).__init__()
self.image_dir = dataset_dir + '/scaling_factor_' + str(upscale_factor) + '/data'
self.target_dir = dataset_dir + '/scaling_factor_' + str(upscale_factor) + '/target'
self.image_filenames = [join(self.image_dir, x) for x in listdir(self.image_dir) if is_image_file(x)]
self.target_filenames = [join(self.target_dir, x) for x in listdir(self.target_dir) if is_image_file(x)]
self.input_transform = input_transform
self.target_transform = target_transform
def __getitem__(self, index):
image, _, _ = Image.open(self.image_filenames[index]).convert('YCbCr').split()
target, _, _ = Image.open(self.target_filenames[index]).convert('YCbCr').split()
if self.input_transform:
image = self.input_transform(image)
if self.target_transform:
target = self.target_transform(target)
return image, target
def __len__(self):
return len(self.image_filenames)
def generate_dataset(data_type, upscale_factor, data_target):
images_name = [x for x in listdir('data/' + data_type) if is_image_file(x)]
crop_size = calculate_valid_crop_size(256, upscale_factor)
lr_transform = input_transform(crop_size, upscale_factor)
hr_transform = target_transform(crop_size)
root = 'data/' + data_target
if not os.path.exists(root):
os.makedirs(root)
path = root + '/scaling_factor_' + str(upscale_factor)
if not os.path.exists(path):
os.makedirs(path)
image_path = path + '/data'
if not os.path.exists(image_path):
os.makedirs(image_path)
target_path = path + '/target'
if not os.path.exists(target_path):
os.makedirs(target_path)
for image_name in tqdm(images_name, desc='generate ' + data_type + ' dataset with upscale factor = '
+ str(upscale_factor) + ' from dataset'):
image = Image.open('data/' + data_type + '/' + image_name)
target = image.copy()
image = lr_transform(image)
target = hr_transform(target)
image.save(image_path + '/' + image_name)
target.save(target_path + '/' + image_name)
if __name__ == "__main__":
generate_dataset(data_type='original_data/train', upscale_factor=UPSCALE_FACTOR, data_target='train')
generate_dataset(data_type='original_data/test/Set14', upscale_factor=UPSCALE_FACTOR, data_target='test')
generate_dataset(data_type='val', upscale_factor=UPSCALE_FACTOR, data_target='val')
```
### Train
```
from model import Net
from torchvision import transforms
from data_utils import DatasetFromFolder
import torch
import torch.nn as nn
import torch.optim
import pylab
import matplotlib.pyplot as plt
if __name__ == "__main__":
net = Net(upscale_factor=UPSCALE_FACTOR)
print(net)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Running on', device)
if device == 'cuda':
net.cuda()
transform = transforms.Compose([
# you can add other transformations in this list
transforms.ToTensor()
])
# trainset = torchvision.datasets.ImageFolder(root = './data/train/SRF_3', transform=transforms.ToTensor(),
# target_transform=None)
trainset = DatasetFromFolder('data/train', upscale_factor=UPSCALE_FACTOR, input_transform=transforms.ToTensor(),
target_transform=transforms.ToTensor())
testset = DatasetFromFolder('data/val', upscale_factor=UPSCALE_FACTOR, input_transform=transforms.ToTensor(),
target_transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
# testset = torchvision.datasets.ImageFolder(root = './data/val/SRF_3', transform=transform,
# target_transform=None)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=10e-3)
" train net "
epochs = []
losses = []
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylabel('total loss')
ax.set_xlabel('epoch')
Ln, = ax.plot([0],[1])
pylab.show()
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
for epoch in range(30): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print('[%d, %5d] total loss: %.3f' %
(epoch + 1, i + 1, running_loss,))
epochs.append(epoch+1)
losses.append(running_loss)
Ln.set_ydata(losses)
Ln.set_xdata(epochs)
ax.set_xlim(1,epoch+1)
ax.set_ylim(0,max(losses))
fig.canvas.draw()
plt.show()
plt.pause(0.1)
running_loss = 0.0
scheduler.step()
print('lr: ' + str(scheduler.get_lr()))
print('Finished Training')
" save "
PATH = './Trained.pth'
torch.save(net.state_dict(), PATH)
```
### Test image
```
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision.transforms import ToTensor
from model import Net
from torchvision import transforms
from data_utils import DatasetFromFolder
trainset = DatasetFromFolder('data/train', upscale_factor=UPSCALE_FACTOR, input_transform=transforms.ToTensor(),
target_transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
" load net "
PATH = 'Trained.pth'
net = Net(UPSCALE_FACTOR)
net.load_state_dict(torch.load(PATH))
i = UPSCALE_FACTOR
path = 'data/test/scaling_factor_%s/' %i
print(path)
image_name = 'baboon.png'
img = Image.open(path + '/data/' + image_name).convert('YCbCr')
y, cb, cr = img.split()
image = Variable(ToTensor()(y)).view(1, -1, y.size[1], y.size[0])
inputs, target = next(iter(trainloader))
pic = inputs.numpy()
output = net(inputs)
out = net(image)
out_img_y = out.data[0].numpy()
out_img_y *= 255.0
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
super_res_image = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
super_res_image.save('test.jpg')
low_res_image = Image.open(path + 'data/' + image_name)
display(low_res_image)
display(super_res_image)
high_res_image = Image.open(path + 'target/' + image_name)
display(high_res_image)
```
##### PSNR
```
from numpy import mean
from math import log10, sqrt
from cv2 import imread
original = imread(path + 'target/' + image_name)
contrast = imread('test.jpg',1)
def psnr(img1, img2):
mse = mean( (img1 - img2) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * log10(PIXEL_MAX / sqrt(mse))
d=psnr(original,contrast)
print(d)
```
| github_jupyter |
### Imports
```
import os
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics import roc_auc_score, roc_curve, mean_squared_error, average_precision_score
from tqdm import tqdm
%matplotlib inline
```
### Get test set pdbs
```
root_dir = os.path.abspath('')
parapred_dir = os.path.join(root_dir, "parapred")
data_dir = os.path.join(parapred_dir, "data")
# test_csv = os.path.join(data_dir, "dataset_test.csv")
test_csv = os.path.join(data_dir, "dataset_test_BIG.csv")
test_df = pd.read_csv(test_csv)
test_df.head()
uniq_pdb_list = test_df["pdb"].unique().tolist()
print("Number of Abs in our test set: \t{}".format(len(uniq_pdb_list)))
```
### Get Paragraph's detailed predictions for test set which include labels
```
# PECAN_dir = os.path.join(data_dir, "PECAN")
# PG_preds = os.path.join(PECAN_dir, "testset_detailed_record.csv")
# PG_df = pd.read_csv(PG_preds)
# PG_df["pdb"] = PG_df.apply(lambda row: row["pdb"].lower(), axis=1)
# print(PG_df.shape)
# PG_df.head()
```
---
# Run parapred on Chothia numbered files if this hasn't been done already
Parapred takes around 1 minute to process every 10 pdb files if they have not yet been processed. If all files have been processed then the below takes just a few seconds.
```
pdb_codes_not_found = []
pdb_codes_parapred_failed_to_run = []
for index, row in test_df.iterrows():
pdb = row["pdb"]
Hchain = row["Hchain"]
Lchain = row["Lchain"]
pdb_file = "../ABDB/entries/{}/structure/chothia/{}.pdb".format(pdb, pdb)
# pdb_file_parapred = "../ABDB/entries/{}/structure/chothia/{}_parapred_custom_PECANweights.pdb".format(pdb, pdb)
pdb_file_parapred = "../ABDB/entries/{}/structure/chothia/{}_parapred_custom_EXPANDEDweights.pdb".format(pdb, pdb)
try:
# create a copy of the main chothia numbered file if it does not exist already
# if I mess up parapred then I can remove the if and copy fresh from the original
if os.path.isfile(pdb_file_parapred):
pass # File already exists
else:
!cp $pdb_file $pdb_file_parapred
# if parapred has been ran on the file already then it will lose its header and start with 'ATOM'
with open(pdb_file_parapred, 'r') as f:
if f.readline()[:4] == "ATOM":
pass
else:
!parapred pdb $pdb_file_parapred --abh $Hchain --abl $Lchain
# check parapred has run on file and modified it
with open(pdb_file_parapred, 'r') as f:
if f.readline()[:4] != "ATOM":
pdb_codes_parapred_failed_to_run.append(pdb)
except FileNotFoundError:
pdb_codes_not_found.append(pdb)
# if index == 0: break
print(f"pdb codes not found: {pdb_codes_not_found}")
print(f"pdb codes Parapred failed to run on: {pdb_codes_parapred_failed_to_run}")
```
### Remove pdb codes that Parapred failed on
```
print(f"No. of pdb codes before: \t{test_df.shape[0]}")
test_df = test_df[~test_df["pdb"].isin(pdb_codes_not_found)].reset_index(drop=True)
test_df = test_df[~test_df["pdb"].isin(pdb_codes_parapred_failed_to_run)].reset_index(drop=True)
print(f"No. of pdb codes after: \t{test_df.shape[0]}")
test_df.head()
```
---
# Functions needed to create dataframe of Parapred predictions from pdb files
##### Chothia defined CDRs +2 extras residues at both ends (as defined in Parapred supplementary material)
```
def get_chothia_numbered_CDRs():
L1 = ['22', '23', '24', '25', '26',
'27', '28', '29', '30', '30A',
'30B', '30C', '30D', '30E', '30F',
'31', '32', '33', '34', '35', '36']
L2 = ['48', '49', '50', '51', '52',
'53', '54', '54A', '54B', '54C',
'54D', '55', '56', '57', '58']
L3 = ['87', '88', '89', '90', '91',
'92', '93', '94', '95', '95A',
'95B', '95C', '94D', '95E', '96',
'97', '98', '99']
H1 = ['24', '24A', '24B', '24C', '24D',
'24E', '24F', '25', '26', '27',
'28', '29', '30', '31', '31A',
'31B', '31C', '31D', '31E', '32F',
'31G', '32', '33', '34']
H2 = ['50', '51', '52', '52A', '52B',
'52C', '53', '54', '55', '56', '57', '58']
H3 = ['93', '94', '95', '96', '97', '98',
'99', '100', '100A', '100B', '100C',
'100D', '100E', '100F', '100G', '100H', '100I',
'100J', '100K', '100L', '100M', '100N', '100O',
'100P', '100Q', '100R', '100S', '100T', '101',
'102', '103', '104']
return L1, L2, L3, H1, H2, H3
def format_chothia_pdb(pdb_file):
'''
Process pdb file into pandas df
Original author: Alissa Hummer
:param pdb_file: file path of .pdb file to convert
:returns: df with atomic level info
'''
pd.options.mode.chained_assignment = None
pdb_whole = pd.read_csv(pdb_file,header=None,delimiter='\t')
pdb_whole.columns = ['pdb']
pdb = pdb_whole[pdb_whole['pdb'].str.startswith('ATOM')]
pdb['Atom_Name'] = pdb['pdb'].str[11:16].copy()
pdb['x'] = pdb['pdb'].str[30:38].copy()
pdb['y'] = pdb['pdb'].str[38:46].copy()
pdb['z'] = pdb['pdb'].str[46:54].copy()
pdb['AA'] = pdb['pdb'].str[17:20].copy()
pdb['Chain'] = pdb['pdb'].str[20:22].copy()
pdb['Chothia'] = pdb['pdb'].str[22:27].copy().str.strip()
pdb['Parapred'] = pdb['pdb'].str[60:66].copy()
pdb['Atom_type'] = pdb['pdb'].str[77].copy()
pdb.drop('pdb',axis=1,inplace=True)
pdb.replace({' ':''}, regex=True, inplace=True)
pdb.reset_index(inplace=True)
pdb.drop('index',axis=1,inplace=True)
# remove H atoms from our data (interested in heavy atoms only)
pdb = pdb[pdb['Atom_type']!='H']
pdb['x'] = pdb['x'].str.strip().astype(float)
pdb['y'] = pdb['y'].str.strip().astype(float)
pdb['z'] = pdb['z'].str.strip().astype(float)
return pdb
def parapred_CDRplus2_only_df(df, pdb_code, H_id, L_id, Calpha_only=True):
'''
Create smaller df containing only data for CDR C-alpha atoms
:param df: Chothia numbered dataframe for specific pdb entry
:returns: df with same cols as input but only rows for CDR+2 C-alphas if Calpha_only=True
or all heavy atoms if Calpha_only=False
'''
# get Chothia numbering
L1, L2, L3, H1, H2, H3 = get_chothia_numbered_CDRs()
CDRL = L1 + L2 + L3
CDRH = H1 + H2 + H3
# trim df so it contains only CDR residues that exist
if Calpha_only:
df_CDRplus2 = df[(((df["Chothia"].isin(CDRL)) & (df["Chain"]==L_id)) |
((df["Chothia"].isin(CDRH)) & (df["Chain"]==H_id))) &
(df["Atom_Name"].str.strip() == "CA")]
else:
df_CDRplus2 = df[(((df["Chothia"].isin(CDRL)) & (df["Chain"]==L_id)) |
((df["Chothia"].isin(CDRH)) & (df["Chain"]==H_id)))]
# get parapred prediction as decimal (0-1) rather than % (0-100)
df_CDRplus2["Parapred"] = df_CDRplus2["Parapred"].astype(float)/100
# drop duplicates where there is bad data in SAbDab
df_CDRplus2 = df_CDRplus2.drop_duplicates(subset=["Chain", "Chothia", "Atom_Name"], keep="first").reset_index(drop=True)
return df_CDRplus2
```
### I label according to 4.5A distance cutoff
```
def get_labels(df, pdb_code, H_id, L_id, antigen_chain_ids, radius=4.5):
'''
Label CDR residues according to if they bind any epitope AA
:param df: chothia numbered dataframe for specific pdb entry
:param p: SAbDab database object from p = db.fetch("<4_letter_pdb_code>")
:param radius: the max distance the closest atom in the epitope residue can be
from a CDR heavy atom for it to be considering 'binding'
:returns: tensor (num_CDR_residues, 1) 1 = binds, 0 = does not bind
'''
# get df with all atoms belonging to CDR of abs
df_CDRs = parapred_CDRplus2_only_df(df, pdb_code, H_id, L_id, Calpha_only=False)
# get df with all atoms belonging within radius of any antigen AA heavy atom
df_antigen = df[df["Chain"].isin(antigen_chain_ids)]
# drop duplicates where there is bad data in SAbDab
df_antigen = df_antigen.drop_duplicates(subset=["Chain", "Chothia", "Atom_Name"], keep="first").reset_index(drop=True)
# get ndarray of coors for atoms in CDRs and antigen
xyz_arr_CDRs = df_CDRs[["x", "y", "z"]].get_values()
xyz_arr_anti = df_antigen[["x", "y", "z"]].get_values()
# get distances between all CDR atoms and antigen atoms
dist_matrix = cdist(xyz_arr_CDRs, xyz_arr_anti, 'euclidean')
adj_matrix = np.where(dist_matrix <= radius, 1, 0)
# sum over each CDR atom - we only care that there is a hit, not how many
num_hits_per_CDR_atom = np.sum(adj_matrix, axis=1)
# add this new data back to df so we can group by each CDR residue
df_CDRs["num_hits_per_CDR_atom"] = num_hits_per_CDR_atom
labels = df_CDRs.groupby(["Chain", "Chothia"])["num_hits_per_CDR_atom"].sum()
labels = labels.reset_index()
# the order is lost in the groupby and sum and so we left join on ordered df to regain order
df_CDRs_Calpha = parapred_CDRplus2_only_df(df, pdb_code, H_id, L_id, Calpha_only=True)
labels_ordered = pd.merge(df_CDRs_Calpha[["Chain", "Chothia"]], labels, how='left',
left_on=["Chain", "Chothia"], right_on = ["Chain", "Chothia"])["num_hits_per_CDR_atom"]
# our data is currently how many residue atoms got a 'hit' but again we only care that one exists
labels_ordered = np.where(labels_ordered > 0, 1, 0)
return labels_ordered
```
---
# Create master dataframe with all of Parapred's predictions and labels
```
for index, row in tqdm(test_df.iterrows()):
pdb = row["pdb"]
Hchain = row["Hchain"]
Lchain = row["Lchain"]
Ag_ids = row["antigen_chain"].split("|")
Ag_ids = [Ag_id.strip() for Ag_id in Ag_ids]
pdb_file = "../ABDB/entries/{}/structure/chothia/{}.pdb".format(pdb, pdb)
# predictions using my new weights
# pdb_file_parapred = "../ABDB/entries/{}/structure/chothia/{}_parapred_custom_PECANweights.pdb".format(pdb, pdb)
pdb_file_parapred = "../ABDB/entries/{}/structure/chothia/{}_parapred_custom_EXPANDEDweights.pdb".format(pdb, pdb)
# predictions using Parapred's OOTB weights
# pdb_file_parapred = "../ABDB/entries/{}/structure/chothia/{}_parapred_PECANdata.pdb".format(pdb, pdb)
df_pp = format_chothia_pdb(pdb_file_parapred)
df_CDRs = parapred_CDRplus2_only_df(df_pp, pdb, Hchain, Lchain, Calpha_only=True)
df_CDRs["pdb"] = pdb
df_original = format_chothia_pdb(pdb_file)
labels = get_labels(df_original, pdb, Hchain, Lchain, Ag_ids)
df_CDRs["chothia_label"] = labels
if index == 0:
df_chothia_master = df_CDRs
else:
df_chothia_master = pd.concat([df_CDRs, df_chothia_master]).reset_index(drop=True)
# if index == 3: break
print(df_chothia_master.shape)
df_chothia_master.head(10)
```
---
# Visualise Parapred's performance
In their paper, the authors report Parapred's performance as:
* ROC AUC: 0.878
* F-score: 0.690
* MCC: 0.554
```
precision, recall, _ = precision_recall_curve(df_chothia_master["chothia_label"], df_chothia_master["Parapred"])
print("Parapred PR AUC: \t{:.3f}".format(auc(recall, precision)))
print("Parapred ROC AUC: \t{:.3f}".format(roc_auc_score(df_chothia_master["chothia_label"], df_chothia_master["Parapred"])))
non_binders = df_chothia_master[df_chothia_master["chothia_label"]==0]["Parapred"].tolist()
binders = df_chothia_master[df_chothia_master["chothia_label"]==1]["Parapred"].tolist()
bins = np.linspace(0, 1, 50)
plt.hist(non_binders, bins, alpha=0.5, label='non-binders')
plt.hist(binders, bins, alpha=0.5, label='binders')
plt.legend(loc='upper right')
plt.show()
```
| github_jupyter |
<img src="Logo.png" width="100" align="left"/>
# <center> Unit 3 Project </center>
# <center> Third section : supervised task </center>
In this notebook you will be building and training a supervised learning model to classify your data.
For this task we will be using another classification model "The random forests" model.
Steps for this task:
1. Load the already clustered dataset
2. Take into consideration that in this task we will not be using the already added column "Cluster"
3. Split your data.
3. Build your model using the SKlearn RandomForestClassifier class
4. classify your data and test the performance of your model
5. Evaluate the model ( accepted models should have at least an accuracy of 86%). Play with hyper parameters and provide a report about that.
6. Provide evidence on the quality of your model (not overfitted good metrics)
7. Create a new test dataset that contains the testset + an additional column called "predicted_class" stating the class predicted by your random forest classifier for each data point of the test set.
## 1. Load the data and split the data:
```
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
# To-Do: load the data
df = pd.read_csv("clustered_HepatitisC.csv")
df.head()
# To-Do : keep only the columns to be used : all features except ID, cluster
# The target here is the Category column
# Do not forget to split your data (this is a classification task)
# test set size should be 20% of the data
data = df.drop(["ID","cluster"], axis=1)
X = data.drop(["Category"], axis=1)
y = data.Category
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
```
## 2. Building the model and training and evaluate the performance:
```
# To-do build the model and train it
# note that you will be providing explanation about the hyper parameter tuning
# So you will be iterating a number of times before getting the desired performance
import numpy as np
from sklearn.model_selection import GridSearchCV
# Number of features to consider at every split
max_features = ['auto', 'log2', None]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 40, num=4)]
max_depth.append(None)
# Method of selecting samples for training each tree
bootstrap = [True, False]
tuned_params = {'max_features': max_features,
'max_depth': max_depth,
'bootstrap': bootstrap}
print(tuned_params)
```
In order to find the best hyper parameters for the model, I will use a grid search to find the best combination.
```
rf = RandomForestClassifier(n_estimators=400)
model = GridSearchCV(rf,tuned_params)
model.fit(X_train, y_train)
model.best_params_
y_hat = model.predict(X_test)
# To-do : evaluate the model in terms of accuracy and precision
# Provide evidence that your model is not overfitting
from sklearn.metrics import precision_score, accuracy_score
print("Train score : \n")
y_train_pred = model.predict(X_train)
print("Accuracy :",accuracy_score(y_train,y_train_pred),"\n")
print("Precision :",precision_score(y_train,y_train_pred,average="macro"),"\n")
print("Test score :\n")
print("Accuracy score :",accuracy_score(y_test,y_hat),"\n")
print("Precision :",precision_score(y_test,y_hat,average="macro"),"\n")
```
Having an accuracy of 100% on the train is a sign of overfitting, but since we have a high accuracy on the test set too (95%), then the model isn't overfitted.
> Hint : A Perfect accuracy on the train set suggest that we have an overfitted model So the student should be able to provide a detailed table about the hyper parameters / parameters tuning with a good conclusion stating that the model has at least an accuracy of 86% on the test set without signs of overfitting
## 3. Create the summary test set with the additional predicted class column:
In this part you need to add the predicted class as a column to your test dataframe and save this one
```
# To-Do : create the complete test dataframe : it should contain all the feature column + the actual target and the ID as well
test_df = X_test
test_df["Category"] = y_test
test_df.head()
# To-Do : Add the predicted_class column
test_df["Predicted_class"] = y_hat
test_df.head()
```
> Make sure you have 16 column in this test set
```
test_df["ID"] = df["ID"]
test_df["cluster"] = df["cluster"]
test_df.head()
cols = test_df.columns.tolist()
ID = cols[-2:-1]
cols.pop(-2)
cols = ID + cols
test_df = test_df[cols]
test_df.head()
test_df.shape
# Save the test set
test_df.to_csv("test_summary.csv",index=False)
```
| github_jupyter |
# Saving and Loading Models
In this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import fc_model
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here we can see one of the images.
```
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
```
# Train a network
To make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.
```
# Create the network, define the criterion and optimizer
model = fc_model.Network(784, 10, [512, 256, 128])
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)
```
## Saving and loading networks
As you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.
The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.
```
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
```
The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.
```
torch.save(model.state_dict(), 'checkpoint.pth')
```
Then we can load the state dict with `torch.load`.
```
state_dict = torch.load('checkpoint.pth')
print(state_dict.keys())
```
And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.
```
model.load_state_dict(state_dict)
```
Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.
```
# Try this
model = fc_model.Network(784, 10, [400, 200, 100])
# This will throw an error because the tensor sizes are wrong!
model.load_state_dict(state_dict)
```
This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.
```
checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
```
Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints.
```
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = fc_model.Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'])
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
print(model)
```
| github_jupyter |
## Our Mission ##
Spam detection is one of the major applications of Machine Learning in the interwebs today. Pretty much all of the major email service providers have spam detection systems built in and automatically classify such mail as 'Junk Mail'.
In this mission we will be using the Naive Bayes algorithm to create a model that can classify SMS messages as spam or not spam, based on the training we give to the model. It is important to have some level of intuition as to what a spammy text message might look like. Usually they have words like 'free', 'win', 'winner', 'cash', 'prize' and the like in them as these texts are designed to catch your eye and in some sense tempt you to open them. Also, spam messages tend to have words written in all capitals and also tend to use a lot of exclamation marks. To the recipient, it is usually pretty straightforward to identify a spam text and our objective here is to train a model to do that for us!
Being able to identify spam messages is a binary classification problem as messages are classified as either 'Spam' or 'Not Spam' and nothing else. Also, this is a supervised learning problem, as we will be feeding a labelled dataset into the model, that it can learn from, to make future predictions.
### Step 0: Introduction to the Naive Bayes Theorem ###
Bayes Theorem is one of the earliest probabilistic inference algorithms. It was developed by Reverend Bayes (which he used to try and infer the existence of God no less), and still performs extremely well for certain use cases.
It's best to understand this theorem using an example. Let's say you are a member of the Secret Service and you have been deployed to protect the Democratic presidential nominee during one of his/her campaign speeches. Being a public event that is open to all, your job is not easy and you have to be on the constant lookout for threats. So one place to start is to put a certain threat-factor for each person. So based on the features of an individual, like the age, sex, and other smaller factors like whether the person is carrying a bag, looks nervous, etc., you can make a judgment call as to whether that person is a viable threat.
If an individual ticks all the boxes up to a level where it crosses a threshold of doubt in your mind, you can take action and remove that person from the vicinity. Bayes Theorem works in the same way, as we are computing the probability of an event (a person being a threat) based on the probabilities of certain related events (age, sex, presence of bag or not, nervousness of the person, etc.).
One thing to consider is the independence of these features amongst each other. For example if a child looks nervous at the event then the likelihood of that person being a threat is not as much as say if it was a grown man who was nervous. To break this down a bit further, here there are two features we are considering, age AND nervousness. Say we look at these features individually, we could design a model that flags ALL persons that are nervous as potential threats. However, it is likely that we will have a lot of false positives as there is a strong chance that minors present at the event will be nervous. Hence by considering the age of a person along with the 'nervousness' feature we would definitely get a more accurate result as to who are potential threats and who aren't.
This is the 'Naive' bit of the theorem where it considers each feature to be independent of each other which may not always be the case and hence that can affect the final judgement.
In short, Bayes Theorem calculates the probability of a certain event happening (in our case, a message being spam) based on the joint probabilistic distributions of certain other events (in our case, the appearance of certain words in a message). We will dive into the workings of Bayes Theorem later in the mission, but first, let us understand the data we are going to work with.
### Step 1.1: Understanding our dataset ###
We will be using a dataset originally compiled and posted on the UCI Machine Learning repository which has a very good collection of datasets for experimental research purposes. If you're interested, you can review the [abstract](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) and the original [compressed data file](https://archive.ics.uci.edu/ml/machine-learning-databases/00228/) on the UCI site. For this exercise, however, we've gone ahead and downloaded the data for you.
** Here's a preview of the data: **
<img src="images/dqnb.png" height="1242" width="1242">
The columns in the data set are currently not named and as you can see, there are 2 columns.
The first column takes two values, 'ham' which signifies that the message is not spam, and 'spam' which signifies that the message is spam.
The second column is the text content of the SMS message that is being classified.
>**Instructions:**
* Import the dataset into a pandas dataframe using the **read_table** method. The file has already been downloaded, and you can access it using the filepath 'smsspamcollection/SMSSpamCollection'. Because this is a tab separated dataset we will be using '\\t' as the value for the 'sep' argument which specifies this format.
* Also, rename the column names by specifying a list ['label, 'sms_message'] to the 'names' argument of read_table().
* Print the first five values of the dataframe with the new column names.
```
'''
Solution
'''
import pandas as pd
# Dataset available using filepath 'smsspamcollection/SMSSpamCollection'
df = pd.read_table('smsspamcollection/SMSSpamCollection',
sep='\t',
header=None,
names=['label', 'sms_message'])
# Output printing out first 5 rows
df.head()
```
### Step 1.2: Data Preprocessing ###
Now that we have a basic understanding of what our dataset looks like, lets convert our labels to binary variables, 0 to represent 'ham'(i.e. not spam) and 1 to represent 'spam' for ease of computation.
You might be wondering why do we need to do this step? The answer to this lies in how scikit-learn handles inputs. Scikit-learn only deals with numerical values and hence if we were to leave our label values as strings, scikit-learn would do the conversion internally(more specifically, the string labels will be cast to unknown float values).
Our model would still be able to make predictions if we left our labels as strings but we could have issues later when calculating performance metrics, for example when calculating our precision and recall scores. Hence, to avoid unexpected 'gotchas' later, it is good practice to have our categorical values be fed into our model as integers.
>**Instructions:**
* Convert the values in the 'label' colum to numerical values using map method as follows:
{'ham':0, 'spam':1} This maps the 'ham' value to 0 and the 'spam' value to 1.
* Also, to get an idea of the size of the dataset we are dealing with, print out number of rows and columns using
'shape'.
```
'''
Solution
'''
df['label'] = df.label.map({'ham':0, 'spam':1})
print(df.shape)
df.head() # returns (rows, columns)
```
### Step 2.1: Bag of words ###
What we have here in our data set is a large collection of text data (5,572 rows of data). Most ML algorithms rely on numerical data to be fed into them as input, and email/sms messages are usually text heavy.
Here we'd like to introduce the Bag of Words(BoW) concept which is a term used to specify the problems that have a 'bag of words' or a collection of text data that needs to be worked with. The basic idea of BoW is to take a piece of text and count the frequency of the words in that text. It is important to note that the BoW concept treats each word individually and the order in which the words occur does not matter.
Using a process which we will go through now, we can covert a collection of documents to a matrix, with each document being a row and each word(token) being the column, and the corresponding (row,column) values being the frequency of occurrance of each word or token in that document.
For example:
Lets say we have 4 documents as follows:
`['Hello, how are you!',
'Win money, win from home.',
'Call me now',
'Hello, Call you tomorrow?']`
Our objective here is to convert this set of text to a frequency distribution matrix, as follows:
<img src="images/countvectorizer.png" height="542" width="542">
Here as we can see, the documents are numbered in the rows, and each word is a column name, with the corresponding value being the frequency of that word in the document.
Lets break this down and see how we can do this conversion using a small set of documents.
To handle this, we will be using sklearns
[count vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer) method which does the following:
* It tokenizes the string(separates the string into individual words) and gives an integer ID to each token.
* It counts the occurrance of each of those tokens.
**Please Note:**
* The CountVectorizer method automatically converts all tokenized words to their lower case form so that it does not treat words like 'He' and 'he' differently. It does this using the `lowercase` parameter which is by default set to `True`.
* It also ignores all punctuation so that words followed by a punctuation mark (for example: 'hello!') are not treated differently than the same words not prefixed or suffixed by a punctuation mark (for example: 'hello'). It does this using the `token_pattern` parameter which has a default regular expression which selects tokens of 2 or more alphanumeric characters.
* The third parameter to take note of is the `stop_words` parameter. Stop words refer to the most commonly used words in a language. They include words like 'am', 'an', 'and', 'the' etc. By setting this parameter value to `english`, CountVectorizer will automatically ignore all words(from our input text) that are found in the built in list of english stop words in scikit-learn. This is extremely helpful as stop words can skew our calculations when we are trying to find certain key words that are indicative of spam.
We will dive into the application of each of these into our model in a later step, but for now it is important to be aware of such preprocessing techniques available to us when dealing with textual data.
### Step 2.2: Implementing Bag of Words from scratch ###
Before we dive into scikit-learn's Bag of Words(BoW) library to do the dirty work for us, let's implement it ourselves first so that we can understand what's happening behind the scenes.
**Step 1: Convert all strings to their lower case form.**
Let's say we have a document set:
```
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
```
>>**Instructions:**
* Convert all the strings in the documents set to their lower case. Save them into a list called 'lower_case_documents'. You can convert strings to their lower case in python by using the lower() method.
```
'''
Solution:
'''
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
lower_case_documents = []
for i in documents:
lower_case_documents.append(i.lower())
print(lower_case_documents)
```
**Step 2: Removing all punctuation**
>>**Instructions:**
Remove all punctuation from the strings in the document set. Save them into a list called
'sans_punctuation_documents'.
```
'''
Solution:
'''
sans_punctuation_documents = []
import string
for i in lower_case_documents:
sans_punctuation_documents.append(i.translate(str.maketrans('', '', string.punctuation)))
print(sans_punctuation_documents)
```
**Step 3: Tokenization**
Tokenizing a sentence in a document set means splitting up a sentence into individual words using a delimiter. The delimiter specifies what character we will use to identify the beginning and the end of a word(for example we could use a single space as the delimiter for identifying words in our document set.)
>>**Instructions:**
Tokenize the strings stored in 'sans_punctuation_documents' using the split() method. Store the final document set
in a list called 'preprocessed_documents'.
```
'''
Solution:
'''
preprocessed_documents = []
for i in sans_punctuation_documents:
preprocessed_documents.append(i.split(' '))
print(preprocessed_documents)
```
**Step 4: Count frequencies**
Now that we have our document set in the required format, we can proceed to counting the occurrence of each word in each document of the document set. We will use the `Counter` method from the Python `collections` library for this purpose.
`Counter` counts the occurrence of each item in the list and returns a dictionary with the key as the item being counted and the corresponding value being the count of that item in the list.
>>**Instructions:**
Using the Counter() method and preprocessed_documents as the input, create a dictionary with the keys being each word in each document and the corresponding values being the frequncy of occurrence of that word. Save each Counter dictionary as an item in a list called 'frequency_list'.
```
'''
Solution
'''
frequency_list = []
import pprint
from collections import Counter
for i in preprocessed_documents:
frequency_counts = Counter(i)
frequency_list.append(frequency_counts)
pprint.pprint(frequency_list)
```
Congratulations! You have implemented the Bag of Words process from scratch! As we can see in our previous output, we have a frequency distribution dictionary which gives a clear view of the text that we are dealing with.
We should now have a solid understanding of what is happening behind the scenes in the `sklearn.feature_extraction.text.CountVectorizer` method of scikit-learn.
We will now implement `sklearn.feature_extraction.text.CountVectorizer` method in the next step.
### Step 2.3: Implementing Bag of Words in scikit-learn ###
Now that we have implemented the BoW concept from scratch, let's go ahead and use scikit-learn to do this process in a clean and succinct way. We will use the same document set as we used in the previous step.
```
'''
Here we will look to create a frequency matrix on a smaller document set to make sure we understand how the
document-term matrix generation happens. We have created a sample document set 'documents'.
'''
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
```
>>**Instructions:**
Import the sklearn.feature_extraction.text.CountVectorizer method and create an instance of it called 'count_vector'.
```
'''
Solution
'''
from sklearn.feature_extraction.text import CountVectorizer
count_vector = CountVectorizer()
```
**Data preprocessing with CountVectorizer()**
In Step 2.2, we implemented a version of the CountVectorizer() method from scratch that entailed cleaning our data first. This cleaning involved converting all of our data to lower case and removing all punctuation marks. CountVectorizer() has certain parameters which take care of these steps for us. They are:
* `lowercase = True`
The `lowercase` parameter has a default value of `True` which converts all of our text to its lower case form.
* `token_pattern = (?u)\\b\\w\\w+\\b`
The `token_pattern` parameter has a default regular expression value of `(?u)\\b\\w\\w+\\b` which ignores all punctuation marks and treats them as delimiters, while accepting alphanumeric strings of length greater than or equal to 2, as individual tokens or words.
* `stop_words`
The `stop_words` parameter, if set to `english` will remove all words from our document set that match a list of English stop words which is defined in scikit-learn. Considering the size of our dataset and the fact that we are dealing with SMS messages and not larger text sources like e-mail, we will not be setting this parameter value.
You can take a look at all the parameter values of your `count_vector` object by simply printing out the object as follows:
```
'''
Practice node:
Print the 'count_vector' object which is an instance of 'CountVectorizer()'
'''
print(count_vector)
```
>>**Instructions:**
Fit your document dataset to the CountVectorizer object you have created using fit(), and get the list of words
which have been categorized as features using the get_feature_names() method.
```
'''
Solution:
'''
count_vector.fit(documents)
count_vector.get_feature_names()
```
The `get_feature_names()` method returns our feature names for this dataset, which is the set of words that make up our vocabulary for 'documents'.
>>**Instructions:**
Create a matrix with the rows being each of the 4 documents, and the columns being each word.
The corresponding (row, column) value is the frequency of occurrance of that word(in the column) in a particular
document(in the row). You can do this using the transform() method and passing in the document data set as the
argument. The transform() method returns a matrix of numpy integers, you can convert this to an array using
toarray(). Call the array 'doc_array'
```
'''
Solution
'''
doc_array = count_vector.transform(documents).toarray()
doc_array
```
Now we have a clean representation of the documents in terms of the frequency distribution of the words in them. To make it easier to understand our next step is to convert this array into a dataframe and name the columns appropriately.
>>**Instructions:**
Convert the array we obtained, loaded into 'doc_array', into a dataframe and set the column names to
the word names(which you computed earlier using get_feature_names(). Call the dataframe 'frequency_matrix'.
```
'''
Solution
'''
frequency_matrix = pd.DataFrame(doc_array,
columns = count_vector.get_feature_names())
frequency_matrix
```
Congratulations! You have successfully implemented a Bag of Words problem for a document dataset that we created.
One potential issue that can arise from using this method out of the box is the fact that if our dataset of text is extremely large(say if we have a large collection of news articles or email data), there will be certain values that are more common that others simply due to the structure of the language itself. So for example words like 'is', 'the', 'an', pronouns, grammatical contructs etc could skew our matrix and affect our analyis.
There are a couple of ways to mitigate this. One way is to use the `stop_words` parameter and set its value to `english`. This will automatically ignore all words(from our input text) that are found in a built in list of English stop words in scikit-learn.
Another way of mitigating this is by using the [tfidf](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer) method. This method is out of scope for the context of this lesson.
### Step 3.1: Training and testing sets ###
Now that we have understood how to deal with the Bag of Words problem we can get back to our dataset and proceed with our analysis. Our first step in this regard would be to split our dataset into a training and testing set so we can test our model later.
>>**Instructions:**
Split the dataset into a training and testing set by using the train_test_split method in sklearn. Split the data
using the following variables:
* `X_train` is our training data for the 'sms_message' column.
* `y_train` is our training data for the 'label' column
* `X_test` is our testing data for the 'sms_message' column.
* `y_test` is our testing data for the 'label' column
Print out the number of rows we have in each our training and testing data.
```
'''
Solution
'''
# split into training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df['sms_message'],
df['label'],
random_state=1)
print('Number of rows in the total set: {}'.format(df.shape[0]))
print('Number of rows in the training set: {}'.format(X_train.shape[0]))
print('Number of rows in the test set: {}'.format(X_test.shape[0]))
```
### Step 3.2: Applying Bag of Words processing to our dataset. ###
Now that we have split the data, our next objective is to follow the steps from Step 2: Bag of words and convert our data into the desired matrix format. To do this we will be using CountVectorizer() as we did before. There are two steps to consider here:
* Firstly, we have to fit our training data (`X_train`) into `CountVectorizer()` and return the matrix.
* Secondly, we have to transform our testing data (`X_test`) to return the matrix.
Note that `X_train` is our training data for the 'sms_message' column in our dataset and we will be using this to train our model.
`X_test` is our testing data for the 'sms_message' column and this is the data we will be using(after transformation to a matrix) to make predictions on. We will then compare those predictions with `y_test` in a later step.
For now, we have provided the code that does the matrix transformations for you!
```
'''
[Practice Node]
The code for this segment is in 2 parts. Firstly, we are learning a vocabulary dictionary for the training data
and then transforming the data into a document-term matrix; secondly, for the testing data we are only
transforming the data into a document-term matrix.
This is similar to the process we followed in Step 2.3
We will provide the transformed data to students in the variables 'training_data' and 'testing_data'.
'''
'''
Solution
'''
# Instantiate the CountVectorizer method
count_vector = CountVectorizer()
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
```
### Step 4.1: Bayes Theorem implementation from scratch ###
Now that we have our dataset in the format that we need, we can move onto the next portion of our mission which is the algorithm we will use to make our predictions to classify a message as spam or not spam. Remember that at the start of the mission we briefly discussed the Bayes theorem but now we shall go into a little more detail. In layman's terms, the Bayes theorem calculates the probability of an event occurring, based on certain other probabilities that are related to the event in question. It is composed of a prior(the probabilities that we are aware of or that is given to us) and the posterior(the probabilities we are looking to compute using the priors).
Let us implement the Bayes Theorem from scratch using a simple example. Let's say we are trying to find the odds of an individual having diabetes, given that he or she was tested for it and got a positive result.
In the medical field, such probabilies play a very important role as it usually deals with life and death situatuations.
We assume the following:
`P(D)` is the probability of a person having Diabetes. It's value is `0.01` or in other words, 1% of the general population has diabetes(Disclaimer: these values are assumptions and are not reflective of any medical study).
`P(Pos)` is the probability of getting a positive test result.
`P(Neg)` is the probability of getting a negative test result.
`P(Pos|D)` is the probability of getting a positive result on a test done for detecting diabetes, given that you have diabetes. This has a value `0.9`. In other words the test is correct 90% of the time. This is also called the Sensitivity or True Positive Rate.
`P(Neg|~D)` is the probability of getting a negative result on a test done for detecting diabetes, given that you do not have diabetes. This also has a value of `0.9` and is therefore correct, 90% of the time. This is also called the Specificity or True Negative Rate.
The Bayes formula is as follows:
<img src="images/bayes_formula.png" height="242" width="242">
* `P(A)` is the prior probability of A occuring independantly. In our example this is `P(D)`. This value is given to us.
* `P(B)` is the prior probability of B occuring independantly. In our example this is `P(Pos)`.
* `P(A|B)` is the posterior probability that A occurs given B. In our example this is `P(D|Pos)`. That is, **the probability of an individual having diabetes, given that, that individual got a positive test result. This is the value that we are looking to calculate.**
* `P(B|A)` is the likelihood probability of B occuring, given A. In our example this is `P(Pos|D)`. This value is given to us.
Putting our values into the formula for Bayes theorem we get:
`P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)`
The probability of getting a positive test result `P(Pos)` can be calulated using the Sensitivity and Specificity as follows:
`P(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]`
```
'''
Instructions:
Calculate probability of getting a positive test result, P(Pos)
'''
'''
Solution (skeleton code will be provided)
'''
# P(D)
p_diabetes = 0.01
# P(~D)
p_no_diabetes = 0.99
# Sensitivity or P(Pos|D)
p_pos_diabetes = 0.9
# Specificity or P(Neg/~D)
p_neg_no_diabetes = 0.9
# P(Pos)
p_pos = (p_diabetes * p_pos_diabetes) + (p_no_diabetes * (1 - p_neg_no_diabetes))
print('The probability of getting a positive test result P(Pos) is: {}',format(p_pos))
```
** Using all of this information we can calculate our posteriors as follows: **
The probability of an individual having diabetes, given that, that individual got a positive test result:
`P(D/Pos) = (P(D) * Sensitivity)) / P(Pos)`
The probability of an individual not having diabetes, given that, that individual got a positive test result:
`P(~D/Pos) = (P(~D) * (1-Specificity)) / P(Pos)`
The sum of our posteriors will always equal `1`.
```
'''
Instructions:
Compute the probability of an individual having diabetes, given that, that individual got a positive test result.
In other words, compute P(D|Pos).
The formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)
'''
'''
Solution
'''
# P(D|Pos)
p_diabetes_pos = (p_diabetes * p_pos_diabetes) / p_pos
print('Probability of an individual having diabetes, given that that individual got a positive test result is:\
',format(p_diabetes_pos))
'''
Instructions:
Compute the probability of an individual not having diabetes, given that, that individual got a positive test result.
In other words, compute P(~D|Pos).
The formula is: P(~D|Pos) = (P(~D) * P(Pos|~D) / P(Pos)
Note that P(Pos/~D) can be computed as 1 - P(Neg/~D).
Therefore:
P(Pos/~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1
'''
'''
Solution
'''
# P(Pos/~D)
p_pos_no_diabetes = 0.1
# P(~D|Pos)
p_no_diabetes_pos = (p_no_diabetes * p_pos_no_diabetes) / p_pos
print ('Probability of an individual not having diabetes, given that individual got a positive test result is:'\
,p_no_diabetes_pos)
```
Congratulations! You have implemented Bayes theorem from scratch. Your analysis shows that even if you get a positive test result, there is only a 8.3% chance that you actually have diabetes and a 91.67% chance that you do not have diabetes. This is of course assuming that only 1% of the entire population has diabetes which of course is only an assumption.
** What does the term 'Naive' in 'Naive Bayes' mean ? **
The term 'Naive' in Naive Bayes comes from the fact that the algorithm considers the features that it is using to make the predictions to be independent of each other, which may not always be the case. So in our Diabetes example, we are considering only one feature, that is the test result. Say we added another feature, 'exercise'. Let's say this feature has a binary value of `0` and `1`, where the former signifies that the individual exercises less than or equal to 2 days a week and the latter signifies that the individual exercises greater than or equal to 3 days a week. If we had to use both of these features, namely the test result and the value of the 'exercise' feature, to compute our final probabilities, Bayes' theorem would fail. Naive Bayes' is an extension of Bayes' theorem that assumes that all the features are independent of each other.
### Step 4.2: Naive Bayes implementation from scratch ###
Now that you have understood the ins and outs of Bayes Theorem, we will extend it to consider cases where we have more than feature.
Let's say that we have two political parties' candidates, 'Jill Stein' of the Green Party and 'Gary Johnson' of the Libertarian Party and we have the probabilities of each of these candidates saying the words 'freedom', 'immigration' and 'environment' when they give a speech:
* Probability that Jill Stein says 'freedom': 0.1 ---------> `P(F|J)`
* Probability that Jill Stein says 'immigration': 0.1 -----> `P(I|J)`
* Probability that Jill Stein says 'environment': 0.8 -----> `P(E|J)`
* Probability that Gary Johnson says 'freedom': 0.7 -------> `P(F|G)`
* Probability that Gary Johnson says 'immigration': 0.2 ---> `P(I|G)`
* Probability that Gary Johnson says 'environment': 0.1 ---> `P(E|G)`
And let us also assume that the probablility of Jill Stein giving a speech, `P(J)` is `0.5` and the same for Gary Johnson, `P(G) = 0.5`.
Given this, what if we had to find the probabilities of Jill Stein saying the words 'freedom' and 'immigration'? This is where the Naive Bayes'theorem comes into play as we are considering two features, 'freedom' and 'immigration'.
Now we are at a place where we can define the formula for the Naive Bayes' theorem:
<img src="images/naivebayes.png" height="342" width="342">
Here, `y` is the class variable or in our case the name of the candidate and `x1` through `xn` are the feature vectors or in our case the individual words. The theorem makes the assumption that each of the feature vectors or words (`xi`) are independent of each other.
To break this down, we have to compute the following posterior probabilities:
* `P(J|F,I)`: Probability of Jill Stein saying the words Freedom and Immigration.
Using the formula and our knowledge of Bayes' theorem, we can compute this as follows: `P(J|F,I)` = `(P(J) * P(F|J) * P(I|J)) / P(F,I)`. Here `P(F,I)` is the probability of the words 'freedom' and 'immigration' being said in a speech.
* `P(G|F,I)`: Probability of Gary Johnson saying the words Freedom and Immigration.
Using the formula, we can compute this as follows: `P(G|F,I)` = `(P(G) * P(F|G) * P(I|G)) / P(F,I)`
```
'''
Instructions: Compute the probability of the words 'freedom' and 'immigration' being said in a speech, or
P(F,I).
The first step is multiplying the probabilities of Jill Stein giving a speech with her individual
probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text
The second step is multiplying the probabilities of Gary Johnson giving a speech with his individual
probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text
The third step is to add both of these probabilities and you will get P(F,I).
'''
'''
Solution: Step 1
'''
# P(J)
p_j = 0.5
# P(F/J)
p_j_f = 0.1
# P(I/J)
p_j_i = 0.1
p_j_text = p_j * p_j_f * p_j_i
print(p_j_text)
'''
Solution: Step 2
'''
# P(G)
p_g = 0.5
# P(F/G)
p_g_f = 0.7
# P(I/G)
p_g_i = 0.2
p_g_text = p_g * p_g_f * p_g_i
print(p_g_text)
'''
Solution: Step 3: Compute P(F,I) and store in p_f_i
'''
p_f_i = p_j_text + p_g_text
print('Probability of words freedom and immigration being said are: ', format(p_f_i))
```
Now we can compute the probability of `P(J|F,I)`, that is the probability of Jill Stein saying the words Freedom and Immigration and `P(G|F,I)`, that is the probability of Gary Johnson saying the words Freedom and Immigration.
```
'''
Instructions:
Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi
'''
'''
Solution
'''
p_j_fi = p_j_text / p_f_i
print('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi))
'''
Instructions:
Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi
'''
'''
Solution
'''
p_g_fi = p_g_text / p_f_i
print('The probability of Gary Johnson saying the words Freedom and Immigration: ', format(p_g_fi))
```
And as we can see, just like in the Bayes' theorem case, the sum of our posteriors is equal to 1. Congratulations! You have implemented the Naive Bayes' theorem from scratch. Our analysis shows that there is only a 6.6% chance that Jill Stein of the Green Party uses the words 'freedom' and 'immigration' in her speech as compard the the 93.3% chance for Gary Johnson of the Libertarian party.
Another more generic example of Naive Bayes' in action is as when we search for the term 'Sacramento Kings' in a search engine. In order for us to get the results pertaining to the Scramento Kings NBA basketball team, the search engine needs to be able to associate the two words together and not treat them individually, in which case we would get results of images tagged with 'Sacramento' like pictures of city landscapes and images of 'Kings' which could be pictures of crowns or kings from history when what we are looking to get are images of the basketball team. This is a classic case of the search engine treating the words as independant entities and hence being 'naive' in its approach.
Applying this to our problem of classifying messages as spam, the Naive Bayes algorithm *looks at each word individually and not as associated entities* with any kind of link between them. In the case of spam detectors, this usually works as there are certain red flag words which can almost guarantee its classification as spam, for example emails with words like 'viagra' are usually classified as spam.
### Step 5: Naive Bayes implementation using scikit-learn ###
Thankfully, sklearn has several Naive Bayes implementations that we can use and so we do not have to do the math from scratch. We will be using sklearns `sklearn.naive_bayes` method to make predictions on our dataset.
Specifically, we will be using the multinomial Naive Bayes implementation. This particular classifier is suitable for classification with discrete features (such as in our case, word counts for text classification). It takes in integer word counts as its input. On the other hand Gaussian Naive Bayes is better suited for continuous data as it assumes that the input data has a Gaussian(normal) distribution.
```
'''
Instructions:
We have loaded the training data into the variable 'training_data' and the testing data into the
variable 'testing_data'.
Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier
'naive_bayes'. You will be training the classifier using 'training_data' and y_train' from our split earlier.
'''
'''
Solution
'''
from sklearn.naive_bayes import MultinomialNB
naive_bayes = MultinomialNB()
naive_bayes.fit(training_data, y_train)
'''
Instructions:
Now that our algorithm has been trained using the training data set we can now make some predictions on the test data
stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable.
'''
'''
Solution
'''
predictions = naive_bayes.predict(testing_data)
```
Now that predictions have been made on our test set, we need to check the accuracy of our predictions.
### Step 6: Evaluating our model ###
Now that we have made predictions on our test set, our next goal is to evaluate how well our model is doing. There are various mechanisms for doing so, but first let's do quick recap of them.
** Accuracy ** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).
** Precision ** tells us what proportion of messages we classified as spam, actually were spam.
It is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classification), in other words it is the ratio of
`[True Positives/(True Positives + False Positives)]`
** Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.
It is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of
`[True Positives/(True Positives + False Negatives)]`
For classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score.
We will be using all 4 metrics to make sure our model does well. For all 4 metrics whose values can range from 0 to 1, having a score as close to 1 as possible is a good indicator of how well our model is doing.
```
'''
Instructions:
Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions
you made earlier stored in the 'predictions' variable.
'''
'''
Solution
'''
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
```
### Step 7: Conclusion ###
One of the major advantages that Naive Bayes has over other classification algorithms is its ability to handle an extremely large number of features. In our case, each word is treated as a feature and there are thousands of different words. Also, it performs well even with the presence of irrelevant features and is relatively unaffected by them. The other major advantage it has is its relative simplicity. Naive Bayes' works well right out of the box and tuning it's parameters is rarely ever necessary, except usually in cases where the distribution of the data is known.
It rarely ever overfits the data. Another important advantage is that its model training and prediction times are very fast for the amount of data it can handle. All in all, Naive Bayes' really is a gem of an algorithm!
Congratulations! You have succesfully designed a model that can efficiently predict if an SMS message is spam or not!
Thank you for learning with us!
| github_jupyter |
## Precision-Recall Curves in Multiclass
For multiclass classification, we have 2 options:
- determine a PR curve for each class.
- determine the overall PR curve as the micro-average of all classes
Let's see how to do both.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
# to convert the 1-D target vector in to a matrix
from sklearn.preprocessing import label_binarize
from sklearn.metrics import precision_recall_curve
from yellowbrick.classifier import PrecisionRecallCurve
```
## Load data (multiclass)
```
# load data
data = load_wine()
data = pd.concat([
pd.DataFrame(data.data, columns=data.feature_names),
pd.DataFrame(data.target, columns=['target']),
], axis=1)
data.head()
# target distribution:
# multiclass and (fairly) balanced
data.target.value_counts(normalize=True)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# the target is a vector with the 3 classes
y_test[0:10]
```
## Train ML models
The dataset we are using is very, extremely simple, so I am creating dumb models intentionally, that is few trees and very shallow for the random forests and few iterations for the logit. This is, so that we can get the most out of the PR curves by inspecting them visually.
### Random Forests
The Random Forests in sklearn are not trained as a 1 vs Rest. So in order to produce a 1 vs rest probability vector for each class, we need to wrap this estimator with another one from sklearn:
- [OneVsRestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html)
```
# set up the model, wrapped by the OneVsRestClassifier
rf = OneVsRestClassifier(
RandomForestClassifier(
n_estimators=10, random_state=39, max_depth=1, n_jobs=4,
)
)
# train the model
rf.fit(X_train, y_train)
# produce the predictions (as probabilities)
y_train_rf = rf.predict_proba(X_train)
y_test_rf = rf.predict_proba(X_test)
# note that the predictions are an array of 3 columns
# first column: the probability of an observation of being of class 0
# second column: the probability of an observation of being of class 1
# third column: the probability of an observation of being of class 2
y_test_rf[0:10, :]
pd.DataFrame(y_test_rf).sum(axis=1)[0:10]
# The final prediction is that of the biggest probabiity
rf.predict(X_test)[0:10]
```
### Logistic Regression
The Logistic regression supports 1 vs rest automatically though its multi_class parameter:
```
# set up the model
logit = LogisticRegression(
random_state=0, multi_class='ovr', max_iter=10,
)
# train
logit.fit(X_train, y_train)
# obtain the probabilities
y_train_logit = logit.predict_proba(X_train)
y_test_logit = logit.predict_proba(X_test)
# note that the predictions are an array of 3 columns
# first column: the probability of an observation of being of class 0
# second column: the probability of an observation of being of class 1
# third column: the probability of an observation of being of class 2
y_test_logit[0:10, :]
# The final prediction is that of the biggest probabiity
logit.predict(X_test)[0:10]
```
## Precision-Recall Curve
### Per class with Sklearn
```
# with label_binarize we transform the target vector
# into a multi-label matrix, so that it matches the
# outputs of the models
# then we have 1 class per column
y_test = label_binarize(y_test, classes=[0, 1, 2])
y_test[0:10, :]
# now we determine the precision and recall at different thresholds
# considering only the probability vector for class 2 and the true
# target for class 2
# so we treat the problem as class 2 vs rest
p, r, thresholds = precision_recall_curve(y_test[:, 2], y_test_rf[:, 2])
# precision values
p
# recall values
r
# threhsolds examined
thresholds
```
Go ahead and examine the precision and recall for the other classes see how these values change.
```
# now let's do these for all classes and capture the results in
# dictionaries, so we can plot the values afterwards
# determine the Precision and recall
# at various thresholds of probability
# in a 1 vs all fashion, for each class
precision_rf = dict()
recall_rf = dict()
# for each class
for i in range(3):
# determine precision and recall at various thresholds
# in a 1 vs all fashion
precision_rf[i], recall_rf[i], _ = precision_recall_curve(
y_test[:, i], y_test_rf[:, i])
precision_rf
# plot the curves for each class
for i in range(3):
plt.plot(recall_rf[i], precision_rf[i], label='class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve")
plt.show()
# and now for the logistic regression
precision_lg = dict()
recall_lg = dict()
# for each class
for i in range(3):
# determine precision and recall at various thresholds
# in a 1 vs all fashion
precision_lg[i], recall_lg[i], _ = precision_recall_curve(
y_test[:, i], y_test_logit[:, i])
plt.plot(recall_lg[i], precision_lg[i], label='class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve")
plt.show()
# and now, just because it is a bit difficult to compare
# between models, we plot the PR curves class by class,
# but the 2 models in the same plot
# for each class
for i in range(3):
plt.plot(recall_lg[i], precision_lg[i], label='logit class {}'.format(i))
plt.plot(recall_rf[i], precision_rf[i], label='rf class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve for class{}".format(i))
plt.show()
```
We see that the Random Forest does a better job for all classes.
### Micro-average with sklearn
In order to do this, we concatenate all the probability vectors 1 after the other, and so we do with the real values.
```
# probability vectors for all classes in 1-d vector
y_test_rf.ravel()
# see that the unravelled prediction vector has 3 times the size
# of the origina target
len(y_test), len(y_test_rf.ravel())
# A "micro-average": quantifying score on all classes jointly
# for random forests
precision_rf["micro"], recall_rf["micro"], _ = precision_recall_curve(
y_test.ravel(), y_test_rf.ravel(),
)
# for logistic regression
precision_lg["micro"], recall_lg["micro"], _ = precision_recall_curve(
y_test.ravel(), y_test_logit.ravel(),
)
# now we plot them next to each other
i = "micro"
plt.plot(recall_lg[i], precision_lg[i], label='logit micro {}')
plt.plot(recall_rf[i], precision_rf[i], label='rf micro {}')
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve for class{}".format(i))
plt.show()
```
## Yellowbrick
### Per class with Yellobrick
https://www.scikit-yb.org/en/latest/api/classifier/prcurve.html
**Note:**
In the cells below, we are passing to Yellobrick classes a model that is already fit. When we fit() the Yellobrick class, it will check if the model is fit, in which case it will do nothing.
If we pass a model that is not fit, and a multiclass target, Yellowbrick will wrap the model automatically with a 1 vs Rest classifier.
Check Yellobrick's documentation for more details.
```
visualizer = PrecisionRecallCurve(
rf, per_class=True, cmap="cool", micro=False,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
visualizer = PrecisionRecallCurve(
logit, per_class=True, cmap="cool", micro=False, cv=0.05,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
```
### Micro yellowbrick
```
visualizer = PrecisionRecallCurve(
rf, cmap="cool", micro=True,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
visualizer = PrecisionRecallCurve(
logit, cmap="cool", micro=True, cv=0.05,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
```
That's all for PR curves
| github_jupyter |
# **Basic Drawing**
In this section, we’ll cover basic image transformations. These
are common techniques that you’ll likely apply to images,
including translation, rotation, resizing, flipping, and cropping.
```
#
# Based environment:
#
from __future__ import print_function
from matplotlib import pyplot as plt
%matplotlib inline
import cv2
import numpy as np
import pandas as pd
image = cv2.imread("../img_source/must_logo.png")
plt.imshow(image)
plt.title('Original')
plt.show()
```
## **Translation**
The first method we are going to explore is translation.
Translation is the shifting of an image along the $x$ and $y$
axis.
```
# Store height and width of the image
height, width = image.shape[:2]
quarter_height, quarter_width = height / 4, width / 4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
# warpAffine to transform
# the image using the matrix, T
img_translation = cv2.warpAffine(image, T, (width, height))
plt.imshow(img_translation)
plt.title('Translation')
plt.show()
M = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
plt.subplot(1, 2, 1)
plt.title('Shifted Down and Right')
plt.imshow(shifted)
M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
plt.subplot(1, 2, 2)
plt.title('Shifted Up and Left')
plt.imshow(shifted)
plt.show()
# shifted = imutils.translate(image, 0, 100)
```
### Affine Transformation
```
rows,cols,ch=image.shape
pts1=np.float32([[50,50],[200,50],[50,200]])
pts2=np.float32([[10,100],[200,50],[100,250]])
M=cv2.getAffineTransform(pts1,pts2)
dst=cv2.warpAffine(image,M,(300,300))
plt.subplot(121),plt.imshow(image),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
```
### Prespect Transformation
```
rows,cols,ch=image.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M=cv2.getPerspectiveTransform(pts1,pts2)
dst=cv2.warpPerspective(image,M,(300,300))
plt.subplot(121),plt.imshow(image),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
```
## **Rotation**
Rotation is exactly what it sounds like: rotating an image
by some angle $\theta$. In this section, we’ll explore how to rotate
an image.
```
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
plt.subplot(1,2,1)
plt.imshow(rotated)
plt.title("Rotated by 45 Degrees")
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
plt.subplot(1,2,2)
plt.imshow(rotated)
plt.title('Rotated by -90 Degrees')
plt.show()
```
## **Resize**
Perhaps, not surprisingly, we will be using the `cv2.resize`
function to resize our images. But we need to keep in mind
the aspect ratio of the image when we are using this function.
```
r = 150.0 / image.shape[1]
dim = (150, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
plt.subplot(1,2,1)
plt.imshow(resized)
plt.title("Resized (Width)")
r = 50.0 / image.shape[0]
dim = (int(image.shape[1] * r), 50)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
plt.subplot(1,2,2)
plt.imshow(resized)
plt.title("Resized (Height)")
plt.show()
```
## Cropping, Flipping and Masking
```
cropped = image[30:120 , 240:335]
plt.imshow(cropped)
plt.title("T_Rex Face")
flipped=cv2.flip(image,1)
plt.subplot(1,3,1)
plt.imshow(flipped)
plt.title("Flipped Horizontally")
flipped=cv2.flip(image,0)
plt.subplot(1,3,2)
plt.imshow(flipped)
plt.title("Flipped Vertically")
flipped=cv2.flip(image,-1)
plt.subplot(1,3,3)
plt.imshow(flipped)
plt.title("Flipped Horizontally & Vertically")
mask = np.zeros(image.shape[:2], dtype = "uint8")
(x, y) = (image.shape[1] // 2, image.shape[0] // 2)
cv2.rectangle(mask, (x - 75, y - 75), (x + 75 , y + 75), 255,-1)
# cv2.imshow("Mask", mask)
plt.subplot(1,2,1)
plt.imshow(mask)
plt.title("Mask")
masked = cv2.bitwise_and(image, image, mask = mask)
# cv2.imshow("Mask Applied to Image", masked)
plt.subplot(1,2,2)
plt.imshow(masked)
plt.title("Mask Applied to Imag")
```
### Sub-brief Note
```
def translate(image , x ,y):
M=np.float32([[1,0,x],[0,1,y]])
shifted=cv2.warpAffine(image,M,(image.shape[1],image.shape[0]))
return shifted
def rotate(image,angle, center = None, scale = 1.0):
h,w=image.shape[:2]
if center is None:
center=(w//2,h//2)
M=cv2.getRotationMatrix2D(center,angle,scale)
rotated=cv2.warpAffine(image,M,(w,h))
return rotated
def resize(image, width = None, height = None, inter = cv2.
INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation = inter)
return resized
```
## **Arithmetic**
```
print("max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print("min of 0: {}".format(cv2.subtract(np.uint8([50]), np.uint8([100]))))
print("wrap around: {}".format(np.uint8([200]) + np.uint8([100])))
print("wrap around: {}".format(np.uint8([50]) - np.uint8([100])))
M = np.ones(image.shape, dtype = "uint8") * 100
added = cv2.add(image, M)
cv2.imshow("Added", added)
plt.subplot(1,2,1)
plt.imshow(added)
plt.title("Added")
M = np.ones(image.shape, dtype = "uint8") * 50
subtracted = cv2.subtract(image, M)
plt.subplot(1,2,2)
plt.imshow(subtracted)
plt.title("Subtracted")
plt.show()
```
| github_jupyter |
# Spark Session
Spark can work with data located on HDFS or a non-distributed filesystem. It can also use YARN from Hadoop, or [Mesos](https://mesos.apache.org/), or a resource manager of its own.
All distributed operations with Spark are done using so-called Spark Session. Usually one is already created by your cluster's administrator:
```
from pyspark.sql import SparkSession
spark1 = SparkSession.builder.getOrCreate()
spark1
# this kill the current session too
# after running this command, you will need to restart the notebook's kernel
spark1.stop()
!hdfs dfs -ls /
# the URL here won't work because of the network access configuration
spark
```
# Reading Data
Spark can consume data in a variety of formats, e.g. in JSON. We use the [YELP Dataset](https://www.yelp.com/dataset) for this example. It's easily obtainable and free to use in education and research.
```
reviews_on_hdfs = "/user/borisshminke/data/yelp_academic_dataset_review.json"
%%time
spark.read.text(reviews_on_hdfs).count()
some = spark.read.text(reviews_on_hdfs)
some.show(n=2)
```
This code simply reads a JSON file as a text, line by line, and counts the number of lines. Let's compare the speed with `wc` tool:
```
%%time
!wc -l /home/borisshminke/Downloads/yelp_academic_dataset_review.json
```
Although `wc` is implemented in C and is more efficient in general than JVM code behind Spark, it uses only one CPU, and sometimes may work slower than it's distributed counterpart from Spark.
Parsing JSON in Spark is really simple:
```
reviews = spark.read.json(reviews_on_hdfs)
reviews.show(n=5)
from pyspark.sql import functions as sf
x = reviews \
.groupby(sf.date_trunc("day", "date").alias("day")) \
.agg(sf.sum("cool").alias("total_cool")) \
.sort(sf.desc("total_cool"))
# Spark can be used similarly to Pandas
from pyspark.sql import functions as sf
(
reviews
.select(sf.col("cool").alias("groovy"), "date")
.groupby(sf.date_trunc("day", "date").alias("day"))
.agg(
sf.count(sf.col("groovy")).alias("total_cool")
)
.sort(sf.desc("total_cool"))
.show(n=5)
)
# or you can use Spark as an SQL engine
reviews.createOrReplaceTempView("reviews")
spark.sql("""
SELECT
date_trunc('day', date) AS day,
SUM(cool) AS total_cool
FROM reviews
GROUP BY
day
ORDER BY
total_cool DESC
LIMIT 5
""").show()
```
# Do It Yourself
[Spark Manual](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql.html#functions) is your best friend!
* count number of users, buisenesses
* count average number of reviews and stars per business and per user
* find histograms for distributions of cool, funny, and useful columns
* find ten most frequent words from the reviews
* save results to disk
```
reviews.columns
new_data = reviews.select("user_id")
```
# Action
count, show, top, max, min - computation happens
# Transformations
whatever - computation doesn't happen
```
reviews.select(sf.col("user_id"))
reviews.count()
reviews = reviews.cache()
%%time
reviews.count()
%%time
reviews.count()
users = reviews.select("user_id").distinct().cache()
%%time
users.count()
%%time
users.count()
users.explain()
(
reviews
.agg(sf.approx_count_distinct(reviews.user_id).alias('distinct_users'))
.collect()
)
reviews.select(
sf.split(sf.col("text"), " ").alias("words")
).show(n=2)
(
reviews.select(
sf.split(sf.col("text"), " ").alias("words")
)
.select(
sf.explode("words").alias("word")
)
).show(n=5)
(
reviews.select(
sf.split(sf.col("text"), " ").alias("words")
)
.filter("stars > 4")
.select(
sf.explode("words").alias("word")
)
.cache()
.filter("word NOT IN ('the', 'and', 'to', 'I', 'a', 'was', 'of', 'is', 'for')")
.groupby(
"word"
)
.agg(sf.count("word").alias("cnt"))
.sort(sf.desc("cnt"))
.show(n=10)
)
spark.sql("""
SELECT
explode(split(text, ' '´)) AS word,
COUNT(*) cnt
FROM reviews
GROUP BY
word
ORDER BY
cnt DESC
LIMIT 5
""").show()
users.write.csv("/user/borisshminke/data/users")
!hdfs dfs -ls /user/borisshminke/data/users
new = spark.read.csv("/user/borisshminke/data/users")
new.show(n=2)
!hdfs dfs -getmerge /user/borisshminke/data/users /home/borisshminke/Downloads/users
!head /home/borisshminke/Downloads/users
```
| github_jupyter |
```
from __future__ import division, absolute_import
import sys
import os
import numpy as np
import random
import pickle
import time
import h5py
import pandas as pd
from plotnine import *
from imblearn import over_sampling
from collections import Counter
from tables import *
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
import sklearn
from sklearn import preprocessing
from sklearn.cluster import MiniBatchKMeans
#root
absPath = '/home/angela3/imbalance_pcm_benchmark/'
sys.path.insert(0, absPath)
from src.imbalance_functions import *
np.random.seed(8)
random.seed(8)
protein_type = "GPCRs" #"kinases"
activity_file = "".join((absPath, "data/", protein_type, "_activity.csv"))
activity_df = pd.read_csv(activity_file, sep="\t")
print(activity_df.info())
print(activity_df.head())
Counter(activity_df["label"])
unique_prots = activity_df["DeepAffinity Protein ID"].drop_duplicates().tolist()
print("There are",len(unique_prots),"different proteins")
if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/"))):
os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/")))
with open("".join((absPath, "data/", protein_type, "/", protein_type, "_prots.pickle")), 'wb') as handle:
pickle.dump(unique_prots, handle, protocol=pickle.HIGHEST_PROTOCOL)
unique_comps = activity_df["DeepAffinity Compound ID"].drop_duplicates().tolist()
print("There are",len(unique_comps),"different compounds")
activity_df.groupby(["DeepAffinity Protein ID", "label"])["label"].count()
list_ratios = []
for prot in unique_prots:
ratio_actives_inactives = computing_active_inactive_ratio(activity_df, prot)
dicti = {"DeepAffinity Protein ID" : prot, "ratio_actives_inactives": ratio_actives_inactives}
list_ratios.append(dicti)
df_ratios = pd.DataFrame(list_ratios)
df_ratios.head()
(ggplot(df_ratios, aes("ratio_actives_inactives")) + geom_histogram())
# for each protein, save a sub-dataframe with the data
if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/"))):
os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/")))
#loading fingeprints
file_fps = "".join((absPath,"raw_data/dcid_fingerprint.tsv" ))
fps_df = pd.read_csv(file_fps, sep="\t")
fps_df.info()
for prot in unique_prots:
sub_prot = activity_df[activity_df["DeepAffinity Protein ID"] == prot]
prot_fps = pd.merge(sub_prot[["DeepAffinity Protein ID", "DeepAffinity Compound ID", "label",
"Canonical SMILE", "Sequence", "family"]],
fps_df, "left", on=["DeepAffinity Compound ID"])
file_subdf = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/", prot, ".csv"))
prot_fps.to_csv(file_subdf)
# how many unique compounds are according to fingerprints?
activity_with_fps = pd.merge(activity_df, fps_df, "left", on=["DeepAffinity Compound ID"])
activity_with_fps.info()
activity_with_fps.drop_duplicates("Fingerprint Feature")
activity_with_fps.drop_duplicates("DeepAffinity Compound ID")
```
### Balancing data
```
for prot in unique_prots:
print(prot)
file_subdf = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/", prot, ".csv"))
sub_prot = pd.read_csv(file_subdf)
print(sub_prot.shape)
prots_passing_smote = []
for prot in unique_prots:
print(prot)
sm = over_sampling.SMOTE(sampling_strategy=1.0, random_state=42)
file_subdf = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/", prot, ".csv"))
sub_prot = pd.read_csv(file_subdf)
sub_prot["separated_fps"] = sub_prot["Fingerprint Feature"].apply(separating_fps)
X = pd.DataFrame.from_dict(dict(zip(sub_prot["separated_fps"].index, sub_prot["separated_fps"].values))).transpose()
Y = sub_prot["label"].values
print(X.shape)
try:
Xmot, Ymot = sm.fit_resample(X, Y)
print(Xmot.shape)
prots_passing_smote.append(prot)
except:
print(" The specified ratio required to remove samples from the minority class while trying to generate new samples.")
continue
#print(Counter(Ymot))
# para cada proteina, guardar un sub-dataframe con los datos
if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/"))):
os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/")))
pickle_path = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/", prot, ".pickle"))
with open(pickle_path, 'wb') as handle:
pickle.dump((Xmot, Ymot), handle, protocol=pickle.HIGHEST_PROTOCOL)
print("There are", len(prots_passing_smote), "proteins to which SMOTE can be applied")
#with open("".join((absPath, "data/", protein_type, "/smote_prots.pickle")), 'wb') as handle:
# pickle.dump(prots_passing_smote, handle, protocol=pickle.HIGHEST_PROTOCOL)
df_lists = []
for prot in prots_passing_smote:
print(prot)
#Load data pickle
pickle_path = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/", prot, ".pickle"))
with open(pickle_path, 'rb') as handle:
Xmot, Ymot = pickle.load(handle)
df = Xmot.copy()
df["Y"] = Ymot
print(len(Ymot))
df["prot"] = prot
df_lists.append(df)
df_complete = pd.concat(df_lists)
df_complete["fingerprint"] = df_complete[[i for i in range(881)]].apply(lambda row: "".join(row.values.astype(str)), axis=1)
df_complete.info()
df_complete.head()
unique_compounds_df = df_complete.drop_duplicates(subset="fingerprint", keep="first", ignore_index=True)
unique_compounds_df.info()
names_comps = ["c"+str(i) for i in range(unique_compounds_df.shape[0])]
unique_compounds_df["comp_ID"] = names_comps
unique_compounds_df.head()
unique_compounds_df[["fingerprint", "comp_ID"]].to_csv("".join((absPath, "data/", protein_type,
"/resampling_before_clustering/unique_compounds.csv")))
```
### Clustering data
```
nclusters = 100
batch_size = 1000
sample_indices = np.arange(0, unique_compounds_df.shape[0])
sample_indices = np.random.permutation(sample_indices)
#double checking
compounds_df_filtered = unique_compounds_df.drop(["prot", "Y"], axis=1)#[["fingerprint", "comp_ID"]]
print(compounds_df_filtered.head())
print(compounds_df_filtered.info())
generate_batches = batch_generator(batch_size, compounds_df_filtered, sample_indices)
#K-Means
model = MiniBatchKMeans(n_clusters=nclusters, init='k-means++', compute_labels=True)
sse = {}
labels = []
comp_ids = []
clusters_centers = {}
#i=0
for i, batch in enumerate(generate_batches):
print("Iteration ", i)
t0 = time.time()
df = pd.DataFrame(batch)
print(df["comp_ID"])
comp_ids.append(df["comp_ID"])
to_array = df.drop(['comp_ID'], axis=1).values
to_array = preprocessing.scale(to_array)
model.partial_fit(to_array)
print("The inertia for the batch %s is %s" % (i, model.inertia_))
t_mini_batch = time.time() - t0
print(t_mini_batch)
sse[i] = model.inertia_
labels.append(model.labels_)
clusters_centers[i] = model.cluster_centers_
plt.figure()
plt.plot(list(sse.keys()), list(sse.values()))
plt.show()
labels_array = np.hstack(labels)
comp_ids_list = np.hstack(comp_ids)
#Joining compound IDs and cluster labels
compound_clusters = pd.DataFrame({'comp_ID':comp_ids_list,
'cluster_label':labels_array})
compound_clusters.info()
compound_clusters.to_csv("".join((absPath, "data/", protein_type,
"/resampling_before_clustering/compound_clusters.csv")), header=True)
compounds_df_filtered["fingerprint"] = compounds_df_filtered[[i for i in range(881)]].apply(lambda row: "".join(row.values.astype(str)), axis=1)
compounds_df_filtered.head()
#merging compound names, compound clusters and fingerprints
activity_with_IDs = pd.merge(df_complete[["prot", "Y", "fingerprint"]],
compounds_df_filtered,
on=["fingerprint"], how='left')
activity_with_IDs.info()
activity_with_IDs.head()
activity_df_clusters = pd.merge(activity_with_IDs, compound_clusters, on=["comp_ID"], how="left")
activity_df_clusters.info()
activity_df_clusters.head()
activity_df_clusters.to_csv("".join((absPath, "data/", protein_type,
"/resampling_before_clustering/activity_clusters.csv")), sep="\t", header=True)
```
### Training test split
```
nfolds = 10
print(activity_df_clusters.info())
print(activity_df_clusters.head())
#How many pairs are there for each cluster?
label_count = activity_df_clusters["cluster_label"].value_counts()
print(label_count)
if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/"))):
os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/")))
# Histogram
bins = np.arange(0, 100, 1) # fixed bin size
plt.xlim([min(activity_df_clusters.loc[:,'cluster_label'])-5, max(activity_df_clusters.loc[:,'cluster_label'])+5])
plt.hist(activity_df_clusters['cluster_label'], bins=bins, alpha=0.5, edgecolor='black', linewidth=1.2)
plt.title('Distribution of compounds clusters')
plt.xlabel('clusters')
plt.ylabel('samples per cluster')
plt.show()
plt.savefig("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/histogram_clusters.png")))
#we create nfolds differents splitting partitions
for i in range(nfolds):
compounds_classif = accumulated_size_clusters(activity_df_clusters)
compounds_classif = training_test_split(compounds_classif, 80, 10, 10, i)
#Joining smiles-label dataframe with label information dataframe
name_column = "splitting_" + str(i)
activity_df_clusters = pd.merge(compounds_classif.loc[:, ["cluster_label", name_column]],
activity_df_clusters, on="cluster_label")
#Checking that all the partitions are more or less the same size
for i in range(nfolds):
name_column = "splitting_" + str(i)
print(activity_df_clusters[name_column].value_counts())
activity_df_clusters.info()
#Now we should check number of actives/inactives per splitting fold
count_list = []
for i in range(nfolds):
name_column = "splitting_" + str(i)
count_split = activity_df_clusters.loc[:,[name_column, "Y"]].groupby([name_column,
"Y"]).size().unstack(fill_value=0)
count_split_df = pd.DataFrame(count_split)
count_split_df_melt = pd.melt(count_split_df.reset_index(), id_vars = name_column, value_vars=[0.0,1.0])
count_split_df_melt["splitting_fold"] = name_column
count_split_df_melt = count_split_df_melt.rename(columns = {name_column:"split_set"})
print(count_split)
count_list.append(count_split_df_melt)
#preparing dataframe to check labels distribution across splitting sets
count_list_df = pd.concat(count_list, axis=0)
vals_to_replace = {0:'training_set', 1:'validation_set', 2:'test_set'}
vals_to_replace2 = dict(zip(["splitting_"+str(i) for i in range(nfolds)], range(nfolds)))
count_list_df['split_set'] = count_list_df['split_set'].map(vals_to_replace)
count_list_df['splitting_fold'] = count_list_df['splitting_fold'].map(vals_to_replace2)
p = (ggplot(count_list_df, aes(y="value")) +
geom_bar(aes(x="factor(splitting_fold)", fill="Y"), stat="identity") +
facet_grid(".~split_set") + xlab("splitting fold") + ylab("number of compounds")+
theme(legend_title=element_blank()))
p
ggsave(filename="".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/activity_distribution_clusters.pdf")), plot=p, dpi=300)
p
#Now we should check coincidence between clusters in splitting folds
cluster_labels_list = []
for i in range(nfolds):
cluster_labels = {}
name_column = "splitting_" + str(i)
count_split_cluster = activity_df_clusters.loc[:,[name_column, "cluster_label"]].groupby([name_column,
"cluster_label"]).size().unstack(fill_value=0)
#binarizing
binary_df = count_split_cluster >0
cluster_lists = binary_df.apply(lambda x: binary_df.columns[x == True], axis=1)
for idx in range(len(cluster_lists)):
cluster_labels[idx] = list(cluster_lists[idx])
cluster_labels_list.append(cluster_labels)
#print(count_split_cluster)
# Now we compute sets for drawing venn diagrams
for i in range(nfolds):
len_train = len(cluster_labels_list[i][0])
len_val = len(cluster_labels_list[i][1])
len_test = len(cluster_labels_list[i][2])
len_train_val = len(set(cluster_labels_list[i][0]). intersection(set(cluster_labels_list[i][1])))
len_train_test = len(set(cluster_labels_list[i][0]). intersection(set(cluster_labels_list[i][2])))
len_val_test = len(set(cluster_labels_list[i][1]). intersection(set(cluster_labels_list[i][2])))
len_train_val_test = len(set(cluster_labels_list[i][0]). intersection(set(cluster_labels_list[i][1])).intersection(set(cluster_labels_list[i][2])))
plt.figure(figsize=(4,4))
venn3(subsets = (len_train, len_val, len_train_val, len_test, len_train_test, len_val_test, len_train_val_test),
set_labels = ("training", "validation", "test"), alpha = 0.5)
plt.title("splitting fold " + str(i))
plt.savefig("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/venn_clusters_", str(i), ".png")))
#falta la info de las secuencias, uniprot ID, prot_family
activity_file = "".join((absPath, "data/", protein_type, "_activity.csv"))
activity_df = pd.read_csv(activity_file, sep="\t")
print(activity_df.info())
print(activity_df.head())
unique_prots_df = activity_df[activity_df["DeepAffinity Protein ID"].isin(unique_prots)].drop_duplicates(["DeepAffinity Protein ID"])
unique_prots_df = unique_prots_df[["DeepAffinity Protein ID", "Uniprot ID", "Sequence", "family"]]
unique_prots_df.info()
activity_df_clusters.rename(columns={"prot": "DeepAffinity Protein ID"}, inplace=True)
activity_df_clusters.info()
activity_clusters_prot_info = pd.merge(activity_df_clusters,
unique_prots_df,
"left", on=["DeepAffinity Protein ID"])
print(activity_clusters_prot_info.info())
print(activity_clusters_prot_info.head())
#despues de dividir en training, test y validation
#saving data into a HDF5
#Defining HDF5 table-type for storing data
class Protein_Compound_Complex(IsDescription):
#CID = UInt16Col()
da_comp_id = StringCol(4)
da_prot_id = StringCol(4)
uniprot_id = StringCol(6)
#activity = Float16Col()
label = UInt16Col()
#canonical_smiles = StringCol(100)
sequence = StringCol(2000)
prot_family = StringCol(5)
comp_cluster = UInt16Col()
splitting_0 = UInt8Col()
splitting_1 = UInt8Col()
splitting_2 = UInt8Col()
splitting_3 = UInt8Col()
splitting_4 = UInt8Col()
splitting_5 = UInt8Col()
splitting_6 = UInt8Col()
splitting_7 = UInt8Col()
splitting_8 = UInt8Col()
splitting_9 = UInt8Col()
fingerprint = StringCol(900)
#open a HDF5 file with write options
file_h5 = open_file("".join((absPath, "data/", protein_type,"/resampling_before_clustering/compounds_activity.h5")), "w")
root = file_h5.root
group = file_h5.create_group(root, "activity")
table = file_h5.create_table('/activity', "prot_comp", Protein_Compound_Complex)
pair = table.row
for index,row in activity_clusters_prot_info.iterrows():
#pair["CID"] = row["CID"]
pair["da_comp_id"] = row["comp_ID"]
pair["da_prot_id"] = row["DeepAffinity Protein ID"]
pair["uniprot_id"] = row["Uniprot ID"]
#pair["activity"] = row["activity"]
pair["label"] = row["Y"]
#pair["canonical_smiles"] = row["Canonical SMILE"]
pair["sequence"] = row["Sequence"]
pair["prot_family"] = row["family"]
pair["comp_cluster"] = row["cluster_label"]
pair["fingerprint"] = row["fingerprint"]
for i in range(nfolds):
name_col = "splitting_" + str(i)
pair[name_col] = row[name_col]
pair.append()
table.flush()
file_h5.close()
#Opening HDF5 with data
filee = "".join((absPath, "data/", protein_type,"/resampling_before_clustering/compounds_activity.h5"))
f = h5py.File(filee, 'r')
group = '/activity'
table = "prot_comp"
#shuffling data indices
n_samples = len(f[group][table])
sample_indices = np.arange(0, n_samples)
sample_indices = np.random.permutation(sample_indices)
#creating folder to storage splitting lists if it does not exist
if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/splitting_lists/"))):
os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/splitting_lists/")))
for i in range(nfolds):
column_name = "splitting_" + str(i)
training_list, validation_list, test_list = splitting_division(f, group,
table,
sample_indices,
column_name)
pickle_filename = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/splitting_lists/",
column_name, "_list.pickle"))
with open(pickle_filename, "wb") as handle:
pickle.dump((training_list, validation_list, test_list), handle)
```
| github_jupyter |
```
import graphlab as gl
import pickle
import pandas as pd
import numpy as np
from collections import Counter
data_items = pickle.load(open("/Users/marvinbertin/Github/family_style_chat_bot/data/user_by_cuisine_by_dish_ratings.pkl", 'rb'))
data_cuisine = pickle.load(open("/Users/marvinbertin/Github/family_style_chat_bot/data/user_by_cuisine_ratings.pkl", 'rb'))
df_cuisine = pd.DataFrame(data_cuisine)
df_cuisine.head()
df_cuisine.pivot(index='user_id', columns="item_id")
data_items.keys()
class group_recommender(object):
def __init__(self, cuisine_sf, dict_cuisine_items):
self.cuisine = gl.SFrame(cuisine_sf)
self.cuisine_items = dict_cuisine_items
def recommend(self, group_list):
group_name = "_".join(group_list)
sf_avg_user = self.cuisine.filter_by(group_list, "user_id") \
.groupby(key_columns='item_id',
operations={'rating': gl.aggregate.MEAN('rating')})
sf_avg_user.add_column(gl.SArray([group_name] * len(sf_avg_user)), "user_id")
# print sf_avg_user
sf_new = self.cuisine.append(sf_avg_user)
model = gl.recommender.create(sf_new, target='rating')
results = model.recommend([group_name], exclude_known=False)
# print results
result_cuisine = results["item_id"][:3]
option_list = []
for cuisine in result_cuisine:
sf_items = gl.SFrame(self.cuisine_items[cuisine])
model_items = gl.recommender.create(sf_items, target='rating')
results_items = model_items.recommend(group_list, exclude_known=False, k = 2)
# print results_items
if cuisine == "Pizza":
group_size = len(group_list)
num_pizza = int(group_size / 1.5)
item_results = [item for item, count in Counter(results_items["item_id"]).most_common()][:num_pizza]
option_list.append(("Pizza Party!", item_results))
else:
group_size = len(group_list)
item_results = [item for item, count in Counter(results_items["item_id"]).most_common()][:group_size]
option_list.append((cuisine, item_results))
return option_list
group_list = np.random.choice(df_cuisine["user_id"].unique(), size = 2, replace=False)
print group_list
model = group_recommender(df_cuisine, data_items)
result = model.recommend(group_list)
result
```
| github_jupyter |
# 1.1 - Introducing the challenge
# 1.2 - Exploring the data
#### > Load and preview the data
```
import pandas as pd
sample_df = pd.read_csv('sample_data.csv')
sample_df.head()
```
#### > Summarize the data
```
sample_df.info()
sample_df.describe()
```
# 1.3 - Looking at the datatypes
#### > Objects instead of categories
```
sample_df['label'].head()
```
#### > Encode labels as categories (sample data)
```
sample_df.label.head(2)
sample_df.label = sample_df.label.astype('category')
sample_df.label.head(2)
```
#### > Dummy variable encoding
```
dummies = pd.get_dummies(sample_df[['label']], prefix_sep='_')
dummies.head(2)
```
#### > Lambda functions
```
square = lambda x: x*x
square(2)
```
#### > Encode labels as categories
```
categorize_label = lambda x: x.astype('category')
sample_df.label = sample_df[['label']].apply(categorize_label, axis= 1)
sample_df.info()
```
# 1.4 - How do we measure success?
#### > Computing log loss with NumPy
```
import numpy as np
def compute_log_loss(predicted, actual, eps=1e-14):
""" Computes the logarithmic loss between predicted and
actual when these are 1D arrays.
:param predicted: The predicted probabilities as floats between 0-1
:param actual: The actual binary labels. Either 0 or 1.
:param eps (optional): log(0) is inf, so we need to offset our
predicted values slightly by eps from 0 or 1.
"""
predicted = np.clip(predicted, eps, 1 - eps)
loss = -1 * np.mean(actual * np.log(predicted) + (1 - actual)* np.log(1 - predicted))
return loss
compute_log_loss(predicted=0.9, actual=0)
compute_log_loss(predicted=0.5, actual=1)
```
# 2.1 - It's time to build a model
#### > Splitting the data
```
data_to_train = df[NUMERIC_COLUMNS].fillna(-1000)
labels_to_use = pd.get_dummies(df[LABELS])
X_train, X_test, y_train, y_test = multilabel_train_test_split(data_to_train,labels_to_use,size=0.2, seed=123)
```
#### > Training the model
```
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
clf = OneVsRestClassifier(LogisticRegression())
clf.fit(X_train, y_train)
```
# 2.2 - Making predictions
#### > Predicting on holdout data
```
holdout = pd.read_csv('HoldoutData.csv', index_col=0)
holdout = holdout[NUMERIC_COLUMNS].fillna(-1000)
predictions = clf.predict_proba(holdout)
```
#### > Format and submit predictions
```
prediction_df = pd.DataFrame(columns=pd.get_dummies(df[LABELS],prefix_sep='__').columns,index=holdout.index,data=predictions)
prediction_df.to_csv('predictions.csv')
score = score_submission(pred_path='predictions.csv')
```
# 2.3 - A very brief introduction to NLP
# 2.4 - Representing text numerically
#### > Using CountVectorizer() on column of main dataset
```
from sklearn.feature_extraction.text import CountVectorizer
TOKENS_BASIC = '\\\\S+(?=\\\\s+)'
df.Program_Description.fillna('' , inplace=True)
vec_basic = CountVectorizer(token_pattern=TOKENS_BASIC)
vec_basic.fit(df.Program_Description)
msg = 'There are {} tokens in Program_Description if tokens are any non-whitespace'
print(msg.format(len(vec_basic.get_feature_names())))
```
# 3.1 - Pipelines, feature & text preprocessing
#### > Instantiate simple pipeline with one step
```
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
pl = Pipeline([
('clf', OneVsRestClassifier(LogisticRegression()))])
```
#### > Train and test with sample numeric data
```
sample_df.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(sample_df[['numeric']],pd.get_dummies(sample_df['label']),random_state=2)
pl.fit(X_train, y_train)
accuracy = pl.score(X_test, y_test)
print('accuracy on numeric data, no nans: ', accuracy)
```
#### > Adding more steps to the pipeline
```
X_train, X_test, y_train, y_test = train_test_split(sample_df[['numeric','with_missing']],
pd.get_dummies(sample_df['label']), random_state
pl.fit(X_train, y_train)
```
#### > Preprocessing numeric features with missing data
```
from sklearn.preprocessing import Imputer
X_train, X_test, y_train, y_test = train_test_split(sample_df[['numeric', 'with_missing']],
pd.get_dummies(sample_df['label']),random_state=2)
pl = Pipeline([('imp', Imputer()),('clf', OneVsRestClassifier(LogisticRegression()))
])
pipeline.fit(X_train, y_train)
accuracy = pl.score(X_test, y_test)
print('accuracy on all numeric, incl nans: ', accuracy)
```
# 3.2 - Text features and feature unions
#### > Preprocessing text features
```
from sklearn.feature_extraction.text import CountVectorizer
X_train, X_test, y_train, y_test = train_test_split(sample_df['text']
pd.get_dummies(sample_df['label']
random_state=2)
pl = Pipeline([('vec', CountVectorizer()),('clf', OneVsRestClassifier(LogisticRegression()))
])
pl.fit(X_train, y_train)
accuracy = pl.score(X_test, y_test)
print('accuracy on sample data: ', accuracy)
```
#### > Putting it all together
```
X_train, X_test, y_train, y_test = train_test_split(sample_df[['numeric','with_missing', 'text']],
pd.get_dummies(sample_df['label']), random_state=2)
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import FeatureUnion
get_text_data = FunctionTransformer(lambda x: x['text'],validate=False)
get_numeric_data = FunctionTransformer(lambda x: x[['numeric','with_missing']], validate=False)
```
#### > FeatureUnion Text and Numeric Features
```
from sklearn.pipeline import FeatureUnion
union = FeatureUnion([
('numeric', numeric_pipeline),
('text', text_pipeline)
])
```
#### > Putting it all together
```
numeric_pipeline = Pipeline([
('selector', get_numeric_data),
('imputer', Imputer())
])
text_pipeline = Pipeline([
('selector', get_text_data),
('vectorizer', CountVectorizer())
])
pl = Pipeline([
('union', FeatureUnion([
('numeric', numeric_pipeline),
('text', text_pipeline)
])),
('clf', OneVsRestClassifier(LogisticRegression()))
])
```
# 3.3 - Choosing a classication model
#### > Main dataset: lots of text
```
LABELS = ['Function', 'Use', 'Sharing', 'Reporting', 'Student_Type',
'Position_Type', 'Object_Type', 'Pre_K', 'Operating_Status']
NON_LABELS = [c for c in df.columns if c not in LABELS]
len(NON_LABELS) - len(NUMERIC_COLUMNS)
```
#### > Using pipeline with the main dataset
```
import numpy as np
import pandas as pd
df = pd.read_csv('TrainingSetSample.csv', index_col=0)
dummy_labels = pd.get_dummies(df[LABELS])
X_train, X_test, y_train, y_test = multilabel_train_test_split(
df[NON_LABELS], dummy_labels,0.2)
get_text_data = FunctionTransformer(combine_text_columns,validate=False)
get_numeric_data = FunctionTransformer(lambda x:x[NUMERIC_COLUMNS], validate=False)
pl = Pipeline([
('union', FeatureUnion([
('numeric_features', Pipeline([
('selector', get_numeric_data),
('imputer', Imputer())
])),
('text_features', Pipeline([
('selector', get_text_data),
('vectorizer', CountVectorizer())
]))
])
),
('clf', OneVsRestClassifier(LogisticRegression()))
])
pl.fit(X_train, y_train)
```
#### > Easily try new models using pipeline
```
from sklearn.ensemble import RandomForestClassifier
pl = Pipeline([
('union', FeatureUnion([
('numeric_features', Pipeline([
('selector', get_numeric_data),
('imputer', Imputer())
])),
('text_features', Pipeline([
('selector', get_text_data),
('vectorizer', CountVectorizer())
]))
])
),
('clf', OneVsRest(RandomForestClassifier()))
])
```
# 4.1 - Learning from the expert: processing
#### > N-grams and tokenization
```
vec = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC,
ngram_range=(1, 2))
```
#### > Range of n-grams in scikit-learn
```
pl.fit(X_train, y_train)
holdout = pd.read_csv('HoldoutData.csv', index_col=0)
predictions = pl.predict_proba(holdout)
prediction_df = pd.DataFrame(columns=pd.get_dummies(df[LABELS]).columns, index=holdout.index,data=predictions)
prediction_df.to_csv('predictions.csv')
score = score_submission(pred_path='predictions.csv')
```
# 4.2 - Learning from the expert: a stats trick
#### > Adding interaction features with scikit-learn
```
from sklearn.preprocessing import PolynomialFeatures
x
interaction = PolynomialFeatures(degree=2,interaction_only=True,include_bias=False)
interaction.fit_transform(x)
```
#### > Sparse interaction features
```
SparseInteractions(degree=2).fit_transform(x).toarray()
```
# 4.3 - Learning from the expert: the winning model
#### > Implementing the hashing trick in scikit-learn
```
from sklearn.feature_extraction.text import HashingVectorizer
vec = HashingVectorizer(norm=None,
non_negative=True,
token_pattern=TOKENS_ALPHANUMERIC,
ngram_range=(1, 2))
```
| github_jupyter |
# Questionnaire: Section 1 of 2
Before starting to use the iRONs Notebooks could you please answer the questions of Section 1 of 2 of this questionnaire?
Please do not answer the questions of Section 2 of 2 but keep the questionnaire open until you are done with all the Notebooks, then you will be asked to answer Section 2 of 2.
https://forms.gle/wPVHMhdH5qmTnWf36
The questionnaire aims to evaluate the efficacy of iRONs in communication some technical concepts of hydrological modelling and water reservoir operation
# Sound wave example
# 1. Import libraries
To run some of the tools offered by Python we need to import libraries (or toolboxes). Most of them come installed with the Anaconda package, so we only need to import running the code below (🚨 in order to run the code, like in the box below, place the mouse pointer in the cell, then click on “run cell” button above or press shift + enter)
```
# Library for scientific computing
import numpy as np
# Libraries for visualization
import ipywidgets as widgets
from ipywidgets import FloatSlider, VBox
from IPython.display import display, Audio, clear_output
from bqplot import pyplot as plt
from bqplot import Axis
from bqplot import LinearScale
from bqplot import pyplot as plt
from bqplot.traits import *
warnings.filterwarnings('ignore') # to ignore warning messages
```
# 2. Data
<left><img src="../../util/images/Standing_wave.gif" width="400px">
In this example we will define and then plot a sound wave which can be represented as a sine curve with a certain amplitude, phase and frequency using the following equation:
$y(x) = A \sin(2 \pi(\nu x + \phi ))$
where
$A = amplitude$
$\phi = phase$
$\nu = frequency$
### Definition of the sine curve
In the cell below we use three variables to define the initial values of the parameters amplitude, phase and frequency, a vector *x* to define the points at which the curve will be evaluated, and a vector *y(x)* to define the curve. For this purpose, we use some of the functions of the imported Numpy library, e.g. *np.linspace* to generate evenly spaced numbers over a specified interval or *np.pi* to get the value of π.
```
amp = 10 # amplitude
phase = 1 # phase
freq = 10 # frequency
x = np.linspace(0,1,500) # x axis from 0 to 1 with a 1/500 step
y = amp * np.sin(2 * np.pi * (freq * x + phase))
```
# 3. Visualization of the data
### Plot of the curve
Using the imported visualization libraries we can now plot the sine curve using the parameter values (amplitude, phase and frequency) defined in the cell above.
```
# First, let's create a scale for the x attribute, and a scale for the y attribute
x_sc_1 = LinearScale(min=0,max=1)
y_sc_1 = LinearScale(min=-20,max=20)
# Then we can define some of the features of the plot axes such as their labels.
x_ax_1 = Axis(label='x', scale=x_sc_1)
y_ax_1 = Axis(label='y', scale=y_sc_1, orientation='vertical')
# Finally we can define other additional features of the figure and plot the sine curve
fig_1 = plt.figure(scales={'x': x_sc_1, 'y': y_sc_1}, axes=[x_ax_1, y_ax_1], title = 'Sine curve',
layout={'min_width': '1000px', 'max_height': '300px'})
sin_wave_1 = plt.plot(x,y) # Plot the sine curve
fig_1
```
### Excercise 1
We will now import a sound wave (displayed in a black curve) and look at how the sine curve (blue) compare to it. The goodness-of-fit between the two curves will be quantified by the root mean square error (RMSE)
```
# Black curve
from Modules.sound_wave import sound_wave
xk,yk = sound_wave()
#RMSE
RMSE = np.sqrt((y - yk) ** 2).mean()
# Plot
fig_2 = plt.figure(scales={'x': x_sc_1, 'y': y_sc_1}, axes=[x_ax_1, y_ax_1], title = 'RMSE = '+str("%.2f" % RMSE),
layout={'min_width': '1000px', 'max_height': '300px'}, animation_duration = 1000)
sine_curve_2 = plt.plot(x,y)
plt.plot(xk,yk,'k')
fig_2
```
Now you can change the values of amplitude, phase and frequency in the code cell below, trying to better fit the black curve (that is, to achieve RMSE = 0). Remember to run the cell after changing the values, so that you can see the changes in the figure above.
```
amp = 5
phase = 1
freq = 10
###############################################
sine_curve_2.y = amp * np.sin(2 * np.pi * (freq * x + phase))
RMSE = np.sqrt(((amp * np.sin(2 * np.pi * (freq * x + phase)) - yk) ** 2).mean())
fig_2.title = 'RMSE = '+str("%.2f" % RMSE)
```
### Excercise 2:
Try to do the same but now using the sliders (execute the cell code below to make the slider appears). Much easier, isn't it?.
```
def update_sine_curve(x,amp,freq,phase):
sine_curve = amp * np.sin(2 * np.pi * (freq * x + phase))
RMSE = np.sqrt(((sine_curve - yk) ** 2).mean())
return sine_curve,RMSE
def update_figure(change):
sine_curve.y = update_sine_curve(x,amp.value,freq.value,phase.value)[0]
RMSE = update_sine_curve(x,amp.value,freq.value,phase.value)[1]
if RMSE == 0:
fig_3.title = 'RMSE = ' +str("%.2f" % RMSE)+' Well done!!!'
else:
fig_3.title = 'RMSE = ' +str("%.2f" % RMSE)
amp = widgets.FloatSlider(min=1,max=15,value=8, description = 'Amplitude: ')
amp.observe(update_figure,'value')
phase = widgets.FloatSlider(min=0,max=1,value=0.5, description = 'Phase: ')
phase.observe(update_figure,'value')
freq = widgets.FloatSlider(min=1,max=10,value=5.5, description = 'Frequency: ')
freq.observe(update_figure,'value')
# First, let's create a scale for the x attribute, and a scale for the y attribute
x_sc = plt.LinearScale(min=0,max=1)
y_sc = plt.LinearScale(min=-20,max=20)
# Then we can define some of the features of the plot axes such as their labels.
x_ax = plt.Axis(label='x', scale=x_sc)
y_ax = plt.Axis(label='y', scale=y_sc, orientation='vertical')
fig_3 = plt.figure(scales={'x': x_sc, 'y': y_sc}, axes=[x_ax, y_ax],
title = 'RMSE = '+str("%.2f" % update_sine_curve(x,amp.value,freq.value,phase.value)[1]),
layout={'min_width': '900px', 'max_height': '300px'}, animation_duration = 1000)
sine_curve = plt.plot(x,update_sine_curve(x,amp.value,freq.value,phase.value)[0])
plt.plot(xk,yk,'k', label = 'sound wave')
plt.xlim(0,1)
plt.ylim(-20,20)
sine_curve.observe(update_figure, ['x', 'y'])
widgets.VBox([amp, freq, phase,fig_3])
```
### Exercise 3
How about not only plotting but also listenting to your curve? Check whether you have **"super hearing"** and you can hear sounds with frequencies lower than 20Hz (human audible spectrum = 20-20000 Hz). After changing the value of wave frequency remember to click on the Play button below to hear the sound.
```
%matplotlib nbagg
import matplotlib.pyplot as plt
fig_4, ax = plt.subplots(1, figsize= (10,4))
plt.suptitle('Interactive audio sine wave')
def interactive_audio_wave(freq):
x = np.linspace(0,1,44100) # x axis from 0 to 1 with a 1/500 step
y = 10 * np.sin(2 * np.pi * (freq * x + 1))
display(Audio(y, rate=44100))
ax.clear()
units = 'freq = {} $(Hz)$'
ax.plot(x,y, label='your curve: '+units.format(freq))
ax.set_xlabel('s')
ax.set_ylabel('$\psi$')
ax.set_ylim(-20,20)
ax.legend(loc=1)
fig_4
freq_audio = widgets.FloatSlider(min=1,max=200,value=50, description = 'Frequency (Hz): ',style = {'description_width': '300px'} ,
layout={'width': '1000px'})
widgets.interactive(interactive_audio_wave, freq=freq_audio)
```
#### Let's go to the next section!: [2.a. Calibration and evaluation of a rainfall-runoff model](2.a.%20Calibration%20and%20evaluation%20of%20a%20rainfall-runoff%20model.ipynb)
| github_jupyter |
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
# 多行输出
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from fastai import *
from fastai.text import *
doc(Config)
```
- IMDB 精简数据
```
path = untar_data(URLs.IMDB)
path
path.ls()
BATCH = 32
# data_lm = (TextList.from_folder(path)
# #Inputs: all the text files in path
# .filter_by_folder(include=['train', 'test'])
# #We may have other temp folders that contain text files so we only keep what's in train and test
# .split_by_rand_pct(0.1)
# #We randomly split and keep 10% (10,000 reviews) for validation
# .label_for_lm()
# #We want to do a language model so we label accordingly, 自己就是自己的标签
# .databunch(bs=BATCH))
# data_lm.save('data_lm')
data_lm = load_data(path, 'data_lm', bs=BATCH)
```
## 创建模型
```
learn_lm = language_model_learner(data_lm, Transformer, drop_mult=0.3)
learn_lm.loss_func
learn_lm.opt_func
learn_lm.model
```
### 编码器
```
encoder = get_model(learn_lm.model)[0]
encoder
```
- 生成假的评论,使用语言模型预测
```
TEXT = "The color of the sky is"
N_WORDS = 40
N_SENTENCES = 2
print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
```
- temperature 控制生成文本的随机性
```
print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.1) for _ in range(N_SENTENCES)))
```
- 预训练数据集
```
tr_itos = pickle.load(open(Config().model_path()/'transformer/itos_tfmer.pkl', 'rb'))
tr_itos[:10]
len(tr_itos), len(data_lm.vocab.itos) # wiki 词汇, IMDB 词汇
unks[:16]
```
## 精调模型
```
learn_lm.lr_find()
learn_lm.recorder.plot(skip_end=10)
learn_lm.fit_one_cycle(1, 1e-2, moms=(0.8, 0.7))
learn_lm.save('transformer_fit_1')
learn_lm=None
gc.collect()
```
- 保存
```
data_lm = load_data(path, 'data_lm', bs=BATCH//2)
learn_lm = language_model_learner(data_lm, Transformer, drop_mult=0.3)
learn_lm.unfreeze()
learn_lm.load('transformer_fit_1');
learn_lm.fit_one_cycle(1, 1e-3, moms=(0.8,0.7))
learn_lm.save('transformer_fit_2')
learn_lm=None
gc.collect()
learn_lm = language_model_learner(data_lm, Transformer, drop_mult=0.3)
learn_lm.load('transformer_fit_2');
TEXT = "i liked this movie because"
N_WORDS = 40
N_SENTENCES = 2
print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
learn_lm.save_encoder('fine_tuned_enc')
learn_lm = None
gc.collect()
```
## 语义分类任务
```
learn_c = None
gc.collect()
path = untar_data(URLs.IMDB)
BATCH = 16
# data_clas = (TextList.from_folder(path, vocab=data_lm.vocab)
# #grab all the text files in path
# .split_by_folder(valid='test')
# #split by train and valid folder (that only keeps 'train' and 'test' so no need to filter)
# .label_from_folder(classes=['neg', 'pos'])
# #label them all with their folders
# .databunch(bs=BATCH))
# data_clas.save('imdb_textlist_class')
data_clas = load_data(path, 'imdb_textlist_class', bs=BATCH//4)
data_clas.show_batch()
learn_c = text_classifier_learner(data_clas, Transformer, drop_mult=0.5)
learn_c.model
learn_c.loss_func
learn_c.load_encoder('fine_tuned_enc')
learn_c.lr_find()
learn_c.recorder.plot()
learn_c.fit_one_cycle(3, 5e-3, moms=(0.8, 0.7))
learn_c.save('fine1')
learn_c = None
gc.collect()
```
| github_jupyter |
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import asyncio
import aiohttp
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import requests
import seaborn as sns
from ast import literal_eval
from collections import defaultdict
pd.options.display.max_rows = 200
pd.options.display.max_columns = 50
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:80% !important; }</style>"))
# Copied from pyencoded-tools/encodedcc.py to avoid dependency.
class ENC_Key:
def __init__(self, keyfile, keyname):
if os.path.isfile(str(keyfile)):
keys_f = open(keyfile, 'r')
keys_json_string = keys_f.read()
keys_f.close()
keys = json.loads(keys_json_string)
else:
keys = keyfile
key_dict = keys[keyname]
self.authid = key_dict['key']
self.authpw = key_dict['secret']
self.server = key_dict['server']
if not self.server.endswith("/"):
self.server += "/"
class ENC_Connection(object):
def __init__(self, key):
self.headers = {'content-type': 'application/json', 'accept': 'application/json'}
self.server = key.server
self.auth = (key.authid, key.authpw)
# Define key if private data desired.
key = ENC_Key(os.path.expanduser("~/keypairs.json"), 'prod')
```
## Get accessions for all replaced items
```
# Pull accession of all Items with replaced status.
url = 'https://www.encodeproject.org/search/'\
'?type=File&type=Dataset&type=Donor&type=Library'\
'&type=Pipeline&type=Biosample&type=AntibodyLot&status=replaced'\
'&limit=all&format=json'
r = requests.get(url, auth=(key.authid, key.authpw))
search_results = r.json()['@graph']
len(search_results)
accessions = set()
for result in search_results:
accessions.add(result['accession'])
len(accessions)
```
## Search for each accession and check length of results
```
# loop.close()
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# Asyncio request.
result_length = []
bad_accessions = []
request_auth = aiohttp.BasicAuth(key.authid, key.authpw)
async def get_json(url, sem):
async with sem:
async with aiohttp.ClientSession() as session:
async with session.get(url, auth=request_auth) as resp:
return await resp.json()
async def get_request(accession, sem):
url = 'https://www.encodeproject.org/'\
'search/?type=Item&accession={}'\
'&limit=all&format=json'.format(accession)
result = await get_json(url, sem)
search_results = result['@graph']
num_results = len(search_results)
result_length.append({'accession': accession,
'result_length': num_results})
if num_results > 1:
bad_accessions.append({'accession': accession,
'results': search_results})
sem = asyncio.Semaphore(20)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*[get_request(accession, sem) for accession in accessions]));
# # Search for each accession, count number of results.
# counter = 0
# result_length = []
# bad_accessions = []
# for accession in accessions:
# url = 'https://www.encodeproject.org/search/'\
# '?type=Item&accession={}'\
# '&limit=all&format=json'.format(accession)
# r = requests.get(url, auth=(key.authid, key.authpw))
# search_results = r.json()['@graph']
# result_length.append({'accession': accession,
# 'result_length': len(search_results)})
# if len(search_results) > 1:
# bad_accessions.append({'accession': accession,
# 'results': search_results})
# counter += 1
# if counter % 100 == 0:
# print(".", end="")
# if counter % 1000 == 0:
# print("\n")
# Make sure search results returned for each accession.
#assert len(accessions) == counter
pd.DataFrame(result_length).result_length.value_counts()
len(bad_accessions)
bad_accessions[0]
duplicate_accession_data = []
for bad in bad_accessions:
for item in bad['results']:
duplicate_accession_data.append({'accession': item['accession'],
'file_format': item['file_format'],
'status': item['status'],
'dataset': item['dataset']})
duplicate_accessions = pd.DataFrame(duplicate_accession_data)
duplicate_accessions.dataset.value_counts()
```
## Associate duplicate accessions to Experiment lab.
```
experiment_list = duplicate_accessions.dataset.unique()
search_ids = "&@id=".join(experiment_list)
url = 'https://www.encodeproject.org/search/'\
'?type=Item&limit=all&frame=embedded&@id={}'.format(search_ids)
r = requests.get(url, auth=(key.authid, key.authpw))
search_results = r.json()['@graph']
search_id_map = {}
for experiment in search_results:
search_id_map[experiment['@id']] = experiment['lab']['name']
duplicate_accessions['lab'] = duplicate_accessions.dataset.apply(lambda x: search_id_map[x])
print(*sorted(duplicate_accessions.lab.unique()), sep='\n')
list(duplicate_accessions.accession.unique())
duplicate_accessions[duplicate_accessions.status == "replaced"].groupby(['lab',
'accession',
'status',
'file_format']).count().sort_index(0)[[]]
duplicate_accessions.groupby(['lab',
'status',
'dataset',
'accession',
'file_format']).count().sort_index(1, 0)
duplicate_accessions.groupby(['accession',
'status', 'file_format',
'lab',
'dataset',
'file_format']).count().sort_index(1, 0).unstack()
duplicate_accessions
```
## Data for all replaced Items
```
# Grab data of all replaced Items.
replaced_data = []
url = 'https://www.encodeproject.org/search/'\
'?type=File&type=Dataset&type=Donor&type=Library'\
'&type=Pipeline&type=Biosample&type=AntibodyLot&status=replaced'\
'&frame=embedded&limit=all&format=json'
r = requests.get(url, auth=(key.authid, key.authpw))
search_results = r.json()['@graph']
na = 'not_available'
for result in search_results:
sub_by = result.get('submitted_by', {})
if isinstance(sub_by, str):
submitted_by = sub_by
else:
submitted_by = sub_by.get('title', na)
lab = result.get('lab', {})
if isinstance(lab, str):
lab_name = lab
else:
lab_name = lab.get('name', na)
item_data = {'accession': result['accession'],
'submitted_by': submitted_by,
'derived_from': result.get('derived_from', na),
'superseded_by': result.get('superseded_by', na),
'supersedes': result.get('supersedes', na),
'@id': result['@id'],
'alternate_accessions': result.get('alternate_accessions', na),
'dataset': result.get('dataset', na),
'lab_name': lab_name,
'date_created': result.get('date_created', na),
'@type': result['@type'][0],
'output_type': result.get('output_type', na),
'file_format': result.get('file_format', na),
'assembly': result.get('assembly', na),
'paired_with': result.get('paired_with', na),
'paired_end': result.get('paired_end', na),
'file_format_type': result.get('file_format_type', na),
'technical_replicates': result.get('technical_replicates', na),
'replicate_uuid': result.get('replicate', {}).get('uuid', na),
'md5sum': result.get('md5sum', na),
'content_md5sum': result.get('content_md5sum', na),
'status': result['status'],
'product_id': result.get('product_id', na),
'culture_start_date': result.get('culture_start_date', na),
'biosample_type': result.get('biosample_type', na),
'description': result.get('description', na),
'treatments': result.get('treatments', na)
}
replaced_data.append(item_data)
replaced_data[900]
len(replaced_data)
def parse_lab_name(lab):
if isinstance(lab, str):
parse_lab = lab.replace("/", "").replace("labs", "")
return parse_lab
else:
return lab[0]
rd = pd.DataFrame(replaced_data)
rd.lab_name = rd.lab_name.apply(lambda x: parse_lab_name(x))
rd.loc[rd.assembly.apply(lambda x: len(x) == 0), 'assembly'] = 'empty_list'
rd.loc[rd.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list'
rd.loc[rd.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list'
rd.loc[rd.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list'
rd.loc[rd.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list'
rd.loc[rd.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list'
rd.loc[rd.treatments.apply(lambda x: len(x) == 0), 'treatments'] = 'empty_list'
```
## Check to see if replacement is similar to replaced (optional)
```
def drop_unique_fields(data):
drop_fields = ['@id',
'@accession',
'md5sum',
'content_md5sum',
'date_created']
data = {k: v for k, v in data.items() if k not in drop_fields}
return data
replacement_data = []
broken_pair = defaultdict(list)
for accession in rd.accession.unique():
replaced_values = rd[rd.accession == accession].to_dict(orient='records')[0]
url = 'https://www.encodeproject.org/{}/?format=json'.format(accession)
r = requests.get(url, auth=(key.authid, key.authpw))
if (r.status_code == 200):
result = r.json()
sub_by = result.get('submitted_by', {})
if isinstance(sub_by, str):
submitted_by = sub_by
else:
submitted_by = sub_by.get('title', na)
lab = result.get('lab', {})
if isinstance(lab, str):
lab_name = lab
else:
lab_name = lab.get('name', na)
item_data = {'accession': result['accession'],
'submitted_by': submitted_by,
'@id': result['@id'],
'alternate_accessions': result.get('alternate_accessions', na),
'dataset': result.get('dataset', na),
'lab_name': lab_name,
'date_created': result.get('date_created', na),
'@type': result['@type'][0],
'output_type': result.get('output_type', na),
'file_format': result.get('file_format', na),
'assembly': result.get('assembly', na),
'paired_with': result.get('paired_with', na),
'paired_end': result.get('paired_end', na),
'file_format_type': result.get('file_format_type', na),
'technical_replicates': result.get('technical_replicates', na),
'replicate_uuid': result.get('replicate', {}).get('uuid', na),
'md5sum': result.get('md5sum', na),
'content_md5sum': result.get('content_md5sum', na),
'status': result['status'],
'product_id': result.get('product_id', na),
'culture_start_date': result.get('culture_start_date', na),
'biosample_type': result.get('biosample_type', na),
'description': result.get('description', na),
'treatments': result.get('treatments', na)
}
item_temp = pd.DataFrame([item_data])
item_temp.lab_name = item_temp.lab_name.apply(lambda x: parse_lab_name(x))
item_temp.loc[item_temp.assembly.apply(lambda x: len(x) == 0), 'assembly'] = 'empty_list'
item_temp.loc[item_temp.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list'
item_temp.loc[item_temp.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list'
item_temp.loc[item_temp.treatments.apply(lambda x: len(x) == 0), 'treatments'] = 'empty_list'
item_temp = item_temp.to_dict(orient='records')[0]
replaced_dict = drop_unique_fields(replaced_values)
replacement_dict = drop_unique_fields(replaced_dict)
if replaced_dict != replacement_dict:
broken_pair['accession'].append(item_data)
replacement_data.append(item_data)
len(replacement_data)
```
## Data for portal redirect of replaced accessions
```
# loop.close()
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# Asyncio request.
replaced_by_file = []
na = 'not_available'
async def get_request(session, accession):
url = 'https://www.encodeproject.org/{}'.format(accession)
async with session.get(url, auth=request_auth, timeout=None) as response:
if response.status == 404:
item_data = {'searched_accession': accession,
'redirected_to_accession': 'no_result'}
replaced_by_file.append(item_data)
else:
result = await response.json()
sub_by = result.get('submitted_by', {})
if isinstance(sub_by, str):
submitted_by = sub_by
else:
submitted_by = sub_by.get('title', na)
lab = result.get('lab', {})
if isinstance(lab, str):
lab_name = lab
else:
lab_name = lab.get('name', na)
item_data = {'accession': result['accession'],
'submitted_by': submitted_by,
'derived_from': result.get('derived_from', na),
'superseded_by': result.get('superseded_by', na),
'supersedes': result.get('supersedes', na),
'@id': result['@id'],
'alternate_accessions': result.get('alternate_accessions', na),
'dataset': result.get('dataset', na),
'lab_name': lab_name,
'date_created': result.get('date_created', na),
'@type': result['@type'][0],
'output_type': result.get('output_type', na),
'file_format': result.get('file_format', na),
'assembly': result.get('assembly', na),
'paired_with': result.get('paired_with', na),
'paired_end': result.get('paired_end', na),
'file_format_type': result.get('file_format_type', na),
'technical_replicates': result.get('technical_replicates', na),
'replicate_uuid': result.get('replicate', {}).get('uuid', na),
'md5sum': result.get('md5sum', na),
'content_md5sum': result.get('content_md5sum', na),
'status': result['status'],
'product_id': result.get('product_id', na),
'culture_start_date': result.get('culture_start_date', na),
'biosample_type': result.get('biosample_type', na),
'description': result.get('description', na),
'treatments': result.get('treatments', na)}
replaced_by_file.append(item_data)
if len(replaced_by_file) % 100 == 0:
print(len(replaced_by_file))
async def create_session(accessions, loop):
connector = aiohttp.TCPConnector(keepalive_timeout=10, limit=100)
async with aiohttp.ClientSession(connector=connector, loop=loop) as session:
results = await asyncio.gather(*[get_request(session, accession) for accession in accessions])
loop = asyncio.get_event_loop()
loop.run_until_complete(create_session(accessions, loop))
len(replaced_by_file)
len(accessions)
# # Asyncio request.
# request_auth = aiohttp.BasicAuth(key.authid, key.authpw)
# replaced_by_file = []
# na = 'not_available'
# async def get_request(url, sem):
# async with sem:
# async with aiohttp.ClientSession() as session:
# async with session.get(url, auth=request_auth) as resp:
# return await resp.json()
# async def get_data(accession, sem):
# url = 'https://www.encodeproject.org/{}'.format(accession)
# result = await get_request(url, sem)
# if result.get('code', False) == 404:
# item_data = {'searched_accession': accession,
# 'redirected_to_accession': 'no_result'}
# replaced_by_file.append(item_data)
# else:
# sub_by = result.get('submitted_by', {})
# if isinstance(sub_by, str):
# submitted_by = sub_by
# else:
# submitted_by = sub_by.get('title', na)
# lab = result.get('lab', {})
# if isinstance(lab, str):
# lab_name = lab
# else:
# lab_name = lab.get('name', na)
# item_data = {'accession': result['accession'],
# 'submitted_by': submitted_by,
# 'derived_from': result.get('derived_from', na),
# 'superseded_by': result.get('superseded_by', na),
# 'supersedes': result.get('supersedes', na),
# '@id': result['@id'],
# 'alternate_accessions': result.get('alternate_accessions', na),
# 'dataset': result.get('dataset', na),
# 'lab_name': lab_name,
# 'date_created': result.get('date_created', na),
# '@type': result['@type'][0],
# 'output_type': result.get('output_type', na),
# 'file_format': result.get('file_format', na),
# 'assembly': result.get('assembly', na),
# 'paired_with': result.get('paired_with', na),
# 'paired_end': result.get('paired_end', na),
# 'file_format_type': result.get('file_format_type', na),
# 'technical_replicates': result.get('technical_replicates', na),
# 'replicate_uuid': result.get('replicate', {}).get('uuid', na),
# 'md5sum': result.get('md5sum', na),
# 'content_md5sum': result.get('content_md5sum', na),
# 'status': result['status'],
# 'product_id': result.get('product_id', na),
# 'culture_start_date': result.get('culture_start_date', na),
# 'biosample_type': result.get('biosample_type', na),
# 'description': result.get('description', na),
# 'treatments': result.get('treatments', na)
# }
# replaced_by_file.append(item_data)
# sem = asyncio.Semaphore(100)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(asyncio.gather(*[get_data(accession, sem) for accession in accessions]));
# loop = asyncio.get_event_loop()
# loop.run_until_complete(create_session(accessions, loop))
# # For every replaced accession:
# # Check if https://www.encodeproject.org/{accession} returns anything.
# # If so, does it match replaced file type?
# replaced_by_file = []
# na = 'not_available'
# for accession in accessions:
# url = 'https://www.encodeproject.org/{}'.format(accession)
# r = requests.get(url, auth=(key.authid, key.authpw))
# if r.status_code == 404:
# item_data = {'searched_accession': accession,
# 'redirected_to_accession': 'no_result'}
# replaced_by_file.append(item_data)
# else:
# result = r.json()
# sub_by = result.get('submitted_by', {})
# if isinstance(sub_by, str):
# submitted_by = sub_by
# else:
# submitted_by = sub_by.get('title', na)
# lab = result.get('lab', {})
# if isinstance(lab, str):
# lab_name = lab
# else:
# lab_name = lab.get('name', na)
# item_data = {'accession': result['accession'],
# 'submitted_by': submitted_by,
# 'derived_from': result.get('derived_from', na),
# 'superseded_by': result.get('superseded_by', na),
# 'supersedes': result.get('supersedes', na),
# '@id': result['@id'],
# 'alternate_accessions': result.get('alternate_accessions', na),
# 'dataset': result.get('dataset', na),
# 'lab_name': lab_name,
# 'date_created': result.get('date_created', na),
# '@type': result['@type'][0],
# 'output_type': result.get('output_type', na),
# 'file_format': result.get('file_format', na),
# 'assembly': result.get('assembly', na),
# 'paired_with': result.get('paired_with', na),
# 'paired_end': result.get('paired_end', na),
# 'file_format_type': result.get('file_format_type', na),
# 'technical_replicates': result.get('technical_replicates', na),
# 'replicate_uuid': result.get('replicate', {}).get('uuid', na),
# 'md5sum': result.get('md5sum', na),
# 'content_md5sum': result.get('content_md5sum', na),
# 'status': result['status'],
# 'product_id': result.get('product_id', na),
# 'culture_start_date': result.get('culture_start_date', na),
# 'biosample_type': result.get('biosample_type', na),
# 'description': result.get('description', na),
# 'treatments': result.get('treatments', na)
# }
# replaced_by_file.append(item_data)
len(accessions)
len(replaced_by_file)
rbf = pd.DataFrame(replaced_by_file)
rbf = rbf.fillna('is_null')
rbf.lab_name = rbf.lab_name.apply(lambda x: parse_lab_name(x))
rbf.loc[rbf.assembly.apply(lambda x: len(x) == 0), 'assembly'] = 'empty_list'
rbf.loc[rbf.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list'
rbf.loc[rbf.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list'
rbf.loc[rbf.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list'
rbf.loc[rbf.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list'
df = pd.read_excel('replaced_items_no_redirect_06_12_2017.xlsx')
df.shape
df['@type'].value_counts()
dff = df[df['@type'] == 'File']
dff.dataset
def get_assay_type(experiment):
url = 'https://www.encodeproject.org{}?format=json'.format(experiment)
r = requests.get(url, auth=(key.authid, key.authpw))
result = r.json()
return result.get('assay_term_name', 'na')
def get_lab_name(experiment):
url = 'https://www.encodeproject.org/{}/?format=json'.format(experiment)
r = requests.get(url, auth=(key.authid, key.authpw))
result = r.json()
return result.get('lab', {}).get('name', 'na')
dff.dataset
dff['assay_type'] = dff.dataset.apply(lambda x: get_assay_type(x))
dff.assay_type.value_counts()
dff['experiment_lab'] = dff.dataset.apply(lambda x: get_lab_name(x))
#rbf.to_csv("replaced_by_search.tsv", sep="\t")
```
## Merge redirect data with replaced Item data
```
no_redirect_accessions = rd[rd.accession.isin(rbf[rbf.redirected_to_accession == "no_result"].searched_accession.values)]
no_redirect_accessions = no_redirect_accessions.sort_values('@type').reset_index(drop=True)
no_redirect_accessions.loc[no_redirect_accessions.description.apply(lambda x: len(x) == 0), 'description'] = 'empty_string'
no_redirect_accessions['status'].value_counts()
no_redirect_accessions.content_md5sum.value_counts()
no_redirect_accessions.description.value_counts()
no_redirect_accessions.lab_name.value_counts()
no_redirect_accessions['@type'].value_counts()
no_redirect_accessions[no_redirect_accessions.md5sum != "not_available"].accession.unique()
len(no_redirect_accessions[no_redirect_accessions.md5sum != "not_available"].accession.unique())
len(no_redirect_accessions[no_redirect_accessions.md5sum == 'not_available'].accession.unique())
#.to_excel('replaced_items_no_redirect_06_12_2017.xlsx')
```
## Search for possible replacement files with same MD5sum
```
# possible_replacements = defaultdict(list)
# for md5 in no_redirect_accessions.md5sum.unique()[1:]:
# url = 'https://www.encodeproject.org/search/'\
# '?type=Item&md5sum={}&status%21=replaced'\
# '&frame=embedded&limit=all&format=json'.format(md5)
# r = requests.get(url, auth=(key.authid, key.authpw))
# if (r.status_code == 404) or (len(r.json()['@graph']) == 0):
# item_data = {'md5sum': md5,
# 'accession': 'no_result'}
# possible_replacements[md5].append(item_data)
# else:
# results = r.json()['@graph']
# for result in results:
# lab = result.get('lab', {})
# if isinstance(lab, str):
# lab_name = lab
# else:
# lab_name = lab.get('name', na)
# possible_replacements[md5].append({'accession': result['accession'],
# '@id': result['@id'],
# 'alternate_accessions': result.get('alternate_accessions', na),
# 'dataset': result.get('dataset', na),
# 'lab_name': lab_name,
# 'date_created': result.get('date_created', na),
# '@type': result['@type'][0],
# 'output_type': result.get('output_type', na),
# 'file_format': result.get('file_format', na),
# 'assembly': result.get('assembly', na),
# 'paired_with': result.get('paired_with', na),
# 'paired_end': result.get('paired_end', na),
# 'file_format_type': result.get('file_format_type', na),
# 'technical_replicates': result.get('technical_replicates', na),
# 'replicate_uuid': result.get('replicate', {}).get('uuid', na),
# 'md5sum': result.get('md5sum', na),
# 'content_md5sum': result.get('content_md5sum', na),
# 'status': result['status']
# })
loop.close()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
possible_replacements = defaultdict(list)
async def get_request(session, md5):
url = 'https://www.encodeproject.org/search/'\
'?type=Item&md5sum={}&status%21=replaced'\
'&frame=embedded&limit=all&format=json'.format(md5)
async with session.get(url, auth=request_auth) as response:
r = await response.json()
results = r['@graph']
if len(results) == 0:
item_data = {'md5sum': md5,
'accession': 'no_result'}
possible_replacements[md5].append(item_data)
else:
for result in results:
lab = result.get('lab', {})
if isinstance(lab, str):
lab_name = lab
else:
lab_name = lab.get('name', na)
possible_replacements[md5].append({'accession': result['accession'],
'@id': result['@id'],
'alternate_accessions': result.get('alternate_accessions', na),
'dataset': result.get('dataset', na),
'lab_name': lab_name,
'date_created': result.get('date_created', na),
'@type': result['@type'][0],
'output_type': result.get('output_type', na),
'file_format': result.get('file_format', na),
'assembly': result.get('assembly', na),
'paired_with': result.get('paired_with', na),
'paired_end': result.get('paired_end', na),
'file_format_type': result.get('file_format_type', na),
'technical_replicates': result.get('technical_replicates', na),
'replicate_uuid': result.get('replicate', {}).get('uuid', na),
'md5sum': result.get('md5sum', na),
'content_md5sum': result.get('content_md5sum', na),
'status': result['status']
})
async def create_session(md5s, loop):
conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100)
async with aiohttp.ClientSession(connector=conn, loop=loop) as session:
results = await asyncio.gather(*[get_request(session, md5) for md5 in md5s])
loop = asyncio.get_event_loop()
loop.run_until_complete(create_session(no_redirect_accessions.md5sum.unique()[1:], loop))
len(possible_replacements)
possible_replacements
possible_merge = [item for key, value in possible_replacements.items()
for item in value if item['accession'] != 'no_result']
possible_merge = pd.DataFrame(possible_merge)
possible_merge = possible_merge.rename(columns={'accession': 'possible_redirect_accession',
'status': 'possible_redirect_status'})
possible_merge.loc[possible_merge.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list'
possible_merge.shape
no_matches = no_redirect_accessions[~(no_redirect_accessions.md5sum.isin(possible_merge.md5sum.values))].reset_index(drop=True)
pm = possible_merge.merge(no_redirect_accessions,
how='left',
suffixes=('_new', '_old'),
on=['md5sum',
'@type',
'file_format',
'file_format_type'])[['md5sum',
'accession',
'status',
'possible_redirect_accession',
'possible_redirect_status',
'@type',
'file_format',
'file_format_type',
'assembly_old',
'assembly_new',
'dataset_old',
'dataset_new',
'date_created_old',
'date_created_new',
'lab_name_old',
'lab_name_new',
'technical_replicates_old',
'technical_replicates_new',
'@id_old',
'@id_new',
'output_type_old',
'output_type_new',
'paired_end_old',
'paired_end_new',
'paired_with_old',
'paired_with_new',
'replicate_uuid_old',
'replicate_uuid_new',
'alternate_accessions_old',
'alternate_accessions_new',
'content_md5sum_old',
'content_md5sum_new']]
pm#.to_excel('possible_redirect_accessions_for_replaced_files_06_12_2017.xlsx')
no_redirect_accessions[no_redirect_accessions.accession == 'ENCFF133IYK']
pm.shape
len(pm.accession.unique())
replacements_exact_match = pm[(pm.dataset_old == pm.dataset_new)].reset_index(drop=True)
replacements_exact_match.shape
replacements_exact_match[[col for col in replacements_exact_match]]
replacements_different = pm[~(pm.dataset_old == pm.dataset_new)].reset_index(drop=True)
replacements_different.shape
replacements_different
# Different datasets but same MD5. Have to update replaced file to have replacement dataset.
replacements_update_dataset = replacements_different[['@id_old', 'dataset_new']].rename(columns={'@id_old': '@id', 'dataset_new': 'dataset'})
#replacements_update_dataset.to_csv('../../update_dataset_of_replaced_filed_matching_md5_06_27_2017.tsv', index=False, sep='\t')
# Now set exact match
replacements_patch = replacements_exact_match[['possible_redirect_accession',
'accession']].rename(columns={'accession': 'alternate_accessions:array',
'possible_redirect_accession': 'accession'})
replacements_patch = replacements_patch.sort_values("alternate_accessions:array")
replacements_patch.shape
flat_list_patch = []
for accession in replacements_patch.accession.unique():
data = {'accession': accession,
'alternate_accessions:array': ", ".join(replacements_patch[replacements_patch.accession == accession]\
['alternate_accessions:array'].values)}
flat_list_patch.append(data)
replacements_patch_flat_list = pd.DataFrame(flat_list_patch)
#replacements_patch_flat_list.to_csv('../../replaced_with_matching_replacements_patch_06_27_2017.tsv', sep="\t", index=False)
#replacements_different.sort_values('possible_redirect_accession').to_excel('replaced_same_md5_mismatched_dataset_06_14_2017.xlsx', index=False)
```
## Extract the MD5sums with no matching replacements
```
no_matching_md5_replacements = [item['md5sum'] for key, value in possible_replacements.items()
for item in value if item['accession'] == 'no_result']
len(pd.DataFrame(list(set(no_matching_md5_replacements))).rename(columns={0: 'md5sum'}).merge(no_redirect_accessions,
how='left',
on='md5sum')['accession'].unique())
```
## Search for similar types of Files for possible replacement
```
no_redirect_file = no_redirect_accessions[no_redirect_accessions['@type'] == 'File'].reset_index(drop=True)
no_redirect_file
na = 'not_available'
possible_replacements = defaultdict(list)
async def get_request_two(session, url, r):
async with session.get(url, auth=request_auth) as response_two:
result_one = await response_two.json()
search_results = result_one['@graph']
if len(search_results) == 0:
possible_replacements[r['accession']].append({'accession': r['accession'],
'possible_replacement_accession': 'no_result'})
for result in search_results:
lab = result.get('lab', {})
sub_by = result.get('submitted_by', {})
if isinstance(sub_by, str):
submitted_by = sub_by
else:
submitted_by = sub_by.get('title', na)
if isinstance(lab, str):
lab_name = lab
else:
lab_name = lab.get('name', na)
possible_replacements[r['accession']].append({'accession': r['accession'],
'possible_replacement_accession': result['accession'],
'@id': result['@id'],
'alternate_accessions': result.get('alternate_accessions', na),
'dataset': result.get('dataset', na),
'lab_name': lab_name,
'date_created': result.get('date_created', na),
'@type': result['@type'][0],
'output_type': result.get('output_type', na),
'file_format': result.get('file_format', na),
'assembly': result.get('assembly', na),
'paired_with': result.get('paired_with', na),
'paired_end': result.get('paired_end', na),
'file_format_type': result.get('file_format_type', na),
'technical_replicates': result.get('technical_replicates', na),
'replicate_uuid': result.get('replicate', {}).get('uuid', na),
'md5sum': result.get('md5sum', na),
'content_md5sum': result.get('content_md5sum', na),
'status': result['status'],
'submitted_by': submitted_by,
'derived_from': result.get('derived_from', na),
'superseded_by': result.get('superseded_by', na),
'supersedes': result.get('supersedes', na)
})
async def get_request_one(session, file_id):
url = 'https://www.encodeproject.org/{}/?format=json'.format(file_id)
async with session.get(url, auth=request_auth) as response_one:
result_one = await response_one.json()
r = result_one
file_format = r['file_format']
output_type = r['output_type']
dataset = r['dataset']
assembly = r.get('assembly', '*')
try:
assay_term_name = r['quality_metrics'][0]['assay_term_name']
url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\
'&output_type={}&quality_metrics.assay_term_name={}'\
'&dataset={}&assembly={}&format=json&frame=embedded'\
'&status!=replaced'.format(file_format,
output_type,
assay_term_name,
dataset,
assembly)
except IndexError:
url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\
'&output_type={}&dataset={}&assembly={}&format=json&frame=embedded'\
'&status!=replaced'.format(file_format,
output_type,
dataset,
assembly)
if assembly == '*':
url = url.replace('&assembly=*', '&assembly!=*')
result_two = await get_request_two(session, url, r)
async def create_session(file_ids, loop):
conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100)
async with aiohttp.ClientSession(connector=conn, loop=loop) as session:
results = await asyncio.gather(*[get_request_one(session, file_id) for file_id in file_ids])
loop = asyncio.get_event_loop()
loop.run_until_complete(create_session(no_redirect_file['@id'].unique(), loop))
len(possible_replacements)
possible_replacements
```
## Fill in empty_lists for list fields
```
replacement_search = pd.DataFrame([item for key, value in possible_replacements.items() for item in value])
replacement_search = replacement_search.fillna('isnull')
replacement_search.loc[replacement_search.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list'
replacement_search.loc[replacement_search.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list'
replacement_search.loc[replacement_search.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list'
replacement_search.loc[replacement_search.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list'
replacement_search.loc[replacement_search.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list'
no_redirect_file.loc[no_redirect_accessions.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list'
no_redirect_file.loc[no_redirect_accessions.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list'
no_redirect_file.loc[no_redirect_file.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list'
no_redirect_file.loc[no_redirect_file.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list'
no_redirect_file.loc[no_redirect_file.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list'
rsm = replacement_search.merge(no_redirect_file,
how='left',
suffixes=('_new', '_old'),
on=['accession'])
rsm.shape
```
## Substitute replaced file_ids with replacement file_ids in derived_from fields
```
rsm = rsm[~(rsm.status_new.isin(['revoked', 'deleted']))]
# Extract lookup table from data with just one result.
# If derived_from File doesn't redirect then look up and see possible replacement.
# Use that as fill in value of comparison.
dfl = rsm[(rsm.possible_replacement_accession != 'no_result')
& (rsm.technical_replicates_old == rsm.technical_replicates_new)].drop_duplicates('accession',
keep=False).reset_index(drop=True)
dfl.shape
rsm[(rsm.possible_replacement_accession != 'no_result')
& (rsm.technical_replicates_old == rsm.technical_replicates_new)].drop_duplicates('accession',
keep=False).reset_index(drop=True).shape
# Create from previous iterations below.
derived_from_lookup = pd.concat([dfl, matching_rep.drop_duplicates('accession', keep=False)], axis=0).drop_duplicates('accession').reset_index(drop=True)
len(derived_from_lookup.accession.unique())
def get_json(id):
url = 'https://www.encodeproject.org/{}/?format=json'.format(id)
return requests.get(url, auth=(key.authid, key.authpw))
def parse_derived_from(x):
if len(x) == 0 or x == 'not_available':
return x
new_list = []
for y in x:
y_id = y.split('/')[2]
if y_id.startswith('ENC'):
new_list.append(y)
continue
else:
r = get_json(y)
try:
accession = r.json()['accession']
r = get_json(accession)
if r.status_code == 404:
# Pull from local lookup table.
try:
accession_replacement = derived_from_lookup[derived_from_lookup.accession == accession]\
.possible_replacement_accession.values[0]
new_list.append('/files/{}/'.format(accession_replacement))
# If no results returned from one-result table.
except IndexError:
new_list.append(y)
else:
accession_replacement = r.json()['accession']
new_list.append('/files/{}/'.format(accession_replacement))
except KeyError:
print(y)
print(x)
new_list.append(y)
return new_list
rsm_derived_from_old = rsm.derived_from_old.apply(lambda x: parse_derived_from(x))
rsm.derived_from_old = rsm_derived_from_old
rsm
rsm[~(rsm['@id_old'].isin(['/files/d9e23f37-9b33-41b9-b9df-0700ca87bc75/',
'/files/3efeced1-a3c5-4131-a721-7c5f743350a9/',
'/files/9fe192e9-af81-46f5-a16f-4d6b5cda577c/'])) & (rsm.supersedes_new != 'not_available')][cols]
```
## Parse lists for comparison
```
lazy_dict = {'_,e,i,l,m,p,s,t,t,y': 'empty_list',
'i,l,l,n,s,u': 'isnull',
'_,a,a,a,b,e,i,l,l,n,o,t,v': 'not_available'}
def parse_list(x):
return ','.join([y.strip() for y in sorted(x)])
rsm.date_created_old = rsm.date_created_old.apply(lambda x: pd.to_datetime(x))
for field in ['technical_replicates_old',
'technical_replicates_new',
'superseded_by_old',
'superseded_by_new',
'supersedes_old',
'supersedes_new',
'derived_from_old',
'derived_from_new']:
rsm[field] = rsm[field].apply(lambda x: parse_list(x)).apply(lambda x: lazy_dict[x] if x in lazy_dict.keys() else x)
rsm[rsm.technical_replicates_old != rsm.technical_replicates_new][['technical_replicates_old',
'technical_replicates_new']]
rsm[rsm.accession == 'ENCFF721IVN'][cols]
rsm[rsm.derived_from_old != rsm.derived_from_new][['derived_from_old', 'derived_from_new']]
```
## Matching content_md5sum, ready to patch
```
rsm_patch = rsm[(rsm.content_md5sum_old == rsm.content_md5sum_new)
& (rsm.content_md5sum_old != 'not_available')].reset_index(drop=True)
first_cols = ['accession', 'possible_replacement_accession']
cols = first_cols + [col for col in sorted(rsm_patch.columns, reverse=True) if col not in first_cols]
rsm_patch[cols]
#rsm_patch[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', 'accession': 'alternate_accessions:list'}).to_csv('../../matching_content_md5sum_patch_06_29_2017.tsv', sep='\t', index=False)
```
## Remove files to be patched
```
rsm = rsm[~(rsm.accession.isin(rsm_patch.accession.values))].reset_index(drop=True)
```
## Total Files that need replacement
```
len(rsm.accession.unique())
```
## Possible replacement with zero results
```
rsm_zero_result = rsm[rsm.possible_replacement_accession == 'no_result'].reset_index(drop=True)
len(rsm_zero_result.accession.unique())
rsm_zero_result.submitted_by_old.value_counts()
rsm_zero_result[cols]
# To set to deleted because no conservative IDR anymore.
#rsm_zero_result.loc[rsm_zero_result.submitted_by_old == 'J. Seth Strattan', 'status_old'] = 'deleted'
#rsm_zero_result[rsm_zero_result.submitted_by_old == 'J. Seth Strattan'][['@id_old', 'status_old']].rename(columns={'status_old': 'status', '@id_old': '@id'}).to_csv('../../zero_match_replaced_to_deleted_patch_06_28_2017.tsv', sep='\t', index=False)
```
### Check for superseded_by/supersedes field
```
rsm_zero_result.superseded_by_old.value_counts()
```
## Possible replacement with one result
```
rsm_one_result = rsm[rsm.possible_replacement_accession != 'no_result'].drop_duplicates('accession',
keep=False).reset_index(drop=True)
len(rsm_one_result)
rsm_one_result.submitted_by_old.value_counts()
rsm_one_result = rsm_one_result[cols]
rsm_one_result[rsm_one_result.submitted_by_old == "Diane Trout"]
```
### Check for superseded_by/supersedes field
```
rsm_one_result.superseded_by_old.value_counts()
rsm_one_result.supersedes_old.value_counts()
rsm_one_result.superseded_by_new.value_counts()
#rsm_one_result.supersedes_new.value_counts()
```
### Files that should be revoked instead of replaced?
```
rsm_one_result[(rsm_one_result.superseded_by_old != 'empty_list')][cols]
rsm_one_result_patch = rsm_one_result[(rsm_one_result.superseded_by_old != 'empty_list')].reset_index(drop=True)
rsm_one_result_patch[['accession', 'superseded_by_old']]
```
### Remove files with superseded_by values
```
rsm_one_result = rsm_one_result[~(rsm_one_result.accession.isin(rsm_one_result_patch.accession.values))].reset_index(drop=True)
rsm_one_result.shape
rsm_one_result[rsm_one_result.derived_from_old != rsm_one_result.derived_from_new][cols].submitted_by_old.value_counts() #[['derived_from_old', 'derived_from_new']].values
rsm_one_result[(rsm_one_result.derived_from_old != rsm_one_result.derived_from_new)
& (rsm_one_result.submitted_by_old == 'Anna Vlasova')][cols][['accession', 'possible_replacement_accession', 'derived_from_old', 'derived_from_new']]
rsm[(rsm['@type_old'] == 'File')]['@id_old'].unique()
```
### Replacements with one result and matching derived_from files != not_available
```
rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new)
& (rsm_one_result.derived_from_old != 'not_available')].shape
```
### Replacements with one result and derived_from both equal to not_available
```
rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new)
& (rsm_one_result.derived_from_old == 'not_available')]
# Patch one of Diane's that has missing derived_from but otherwise equal
# dp = rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new)
# & (rsm_one_result.derived_from_old == 'not_available')]
# dp[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession',
# 'accession': 'alternate_accessions:list'}).to_csv('../../one_match_missing_derived_from_patch_06_28_2017.tsv', sep='\t', index=False)
# Patch 58 narrowPeaks with one match after dropping revoked/deleted from possible replacements
# rsm_one_result[['possible_replacement_accession',
# 'accession']].rename(columns={'possible_replacement_accession': 'accession',
# 'accession': 'alternate_accessions:list'}).to_csv('../../one_match_after_dropping_deleted_revoked_patch_06_30_2017.tsv', sep='\t', index=False)
```
### Replacements with one result where derived_from_old but not derived_from_new equal to not_available
```
rsm_one_result[(rsm_one_result.derived_from_old != rsm_one_result.derived_from_new)
& (rsm_one_result.derived_from_old == 'not_available')].shape
```
### Replacements with one result where derived_from_new but not derived_from_old equal to not_available
```
rsm_one_result[(rsm_one_result.derived_from_old != rsm_one_result.derived_from_new)
& (rsm_one_result.derived_from_new == 'not_available')]
```
### Replacements with one result where either are not_available
```
rsm_one_result[(rsm_one_result.derived_from_old == 'not_available')
| (rsm_one_result.derived_from_new == 'not_available')]
```
### Replacements with one result where derived_from not matching
```
rsm_one_result[rsm_one_result.derived_from_old != rsm_one_result.derived_from_new].shape
```
### Replacements with one result where derived_from is matching
```
rsm_one_result[rsm_one_result.derived_from_old == rsm_one_result.derived_from_new].shape
rsm_one_result_full_match = rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new)
& (rsm_one_result.derived_from_old != 'not_available')][cols].reset_index(drop=True)
rsm_one_result_full_match
len(rsm_one_result_full_match.possible_replacement_accession.unique())
rsm_one_result_full_match[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession',
'accession': 'alternate_accessions:list'})
```
### Replacements with one result with no matching derived_from
```
rsm_one_result_no_match = rsm_one_result[~(rsm_one_result.accession.isin(rsm_one_result_full_match.accession.values))][cols].reset_index(drop=True)
rsm_one_result_no_match.shape
rsm_one_result_no_match
rsm_one_result_no_match.file_format_type_new.value_counts()
rsm_one_result_no_match[rsm_one_result_no_match.file_format_type_new == "not_available"]
rsm_one_result_no_match[['derived_from_new', 'derived_from_old']].values
rsm_one_result_no_match[rsm_one_result_no_match.submitted_by_old == 'J. Seth Strattan']
# Patch these narrowPeaks that match except for derived_from because upstream Files changed.
sp = rsm_one_result_no_match[rsm_one_result_no_match.submitted_by_old == 'J. Seth Strattan'][['possible_replacement_accession', 'accession']]
sp.rename(columns={'possible_replacement_accession': 'accession',
'accession': 'alternate_accessions:list'})#.to_csv('../../one_match_derived_from_mismatch_patch_06_28_2017.tsv', index=False, sep='\t')
```
## Possible replacement with many results
```
rsm_multi_result = rsm[rsm.duplicated('accession', keep=False)].reset_index(drop=True)
len(rsm_multi_result.accession.unique())
rsm_multi_result.drop_duplicates('accession', keep='first').reset_index().submitted_by_old.value_counts()
rsm_multi_result[rsm_multi_result.accession == 'ENCFF719FSK']
```
### Groups add back up to total number of accessions?
```
assert len(rsm_zero_result) + len(rsm_one_result) + len(rsm_one_result_patch) + len(rsm_multi_result.accession.unique()) == len(rsm.accession.unique())
```
### Does matching on technical replicates and derived_from reduce number of possible replacements with many results?
```
matching_rep = rsm_multi_result[(rsm_multi_result.technical_replicates_old == rsm_multi_result.technical_replicates_new)
& (rsm_multi_result.derived_from_old == rsm_multi_result.derived_from_new)].reset_index(drop=True)
len(matching_rep.accession.unique())
```
### Multiresults that now only have one result after matching on technical_replicate and derived_from
```
len(matching_rep.drop_duplicates('accession', keep=False).accession.unique())
rsm_multi_one_result = matching_rep.drop_duplicates('accession', keep=False)[cols].reset_index(drop=True)
rsm_multi_one_result[cols]
# rsm_multi_one_result[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession',
# 'accession': 'alternate_accessions:list'}).to_csv('../../multi_one_match_patch_06_27_2017.tsv',
# index=False, sep='\t')
# Patch multiresults that have one match when matched on tech_rep (only narrowPeaks)
# multi_one_narrow_peaks = rsm_multi_result[(rsm_multi_result.technical_replicates_old == rsm_multi_result.technical_replicates_new)
# & (rsm_multi_result.file_format_type_old == 'narrowPeak')].drop_duplicates('accession', keep=False).reset_index(drop=True)
# multi_one_narrow_peaks[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession',
# 'accession': 'alternate_accessions:list'}).to_csv('../../multi_narrow_peaks_tech_rep_match_patch_06_30_2017.tsv', sep='\t', index=False)
```
### Multiresults that still have more than one result after matching on technical_replicate and derived_from
```
len(matching_rep[matching_rep.duplicated('accession', keep=False)].accession.unique())
```
### Group by accession and possible_replacement
```
cols = ['accession','possible_replacement_accession']
cols = cols + [x for x in sorted(rsm.columns, reverse=True) if (x not in cols) and (x not in ['alternate_accessions_new',
'alternate_accessions_old'])]
mr = matching_rep[matching_rep.duplicated('accession', keep=False)].groupby(cols).count().reset_index()
matching_rep[matching_rep.duplicated('accession', keep=False)].groupby(cols).count()
# # Patch pointing to in progress replacement instead of deleted replacement.
# in_prog_multi_patch = mr[(mr.status_new == 'in progress')
# & (mr.accession.isin(['ENCFF219IZI',
# 'ENCFF362CIL',
# 'ENCFF522EVZ',
# 'ENCFF526SQT',
# 'ENCFF554QRY',
# 'ENCFF799OIZ',
# 'ENCFF826MUG',
# 'ENCFF832XOD',
# 'ENCFF833LEK']))]
# # in_prog_multi_patch[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession',
# # 'accession': 'alternate_accessions:list'})\
# # .to_csv('../../multi_result_point_to_in_progress_patch_06_28_2017.tsv', index=False, sep='\t')
# in_prog_multi_patch
# # Patch pointing to released replacement instead of revoked replacement.
# released_multi_patch = mr[(mr.status_new == 'released')
# & (mr.accession.isin(['ENCFF311CTD',
# 'ENCFF442FSP',
# 'ENCFF521DYG',
# 'ENCFF660PBO',
# 'ENCFF723DLE',
# 'ENCFF758WLI',
# 'ENCFF803YCX',
# 'ENCFF809POG']))]
# # released_multi_patch[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession',
# # 'accession': 'alternate_accessions:list'})\
# # .to_csv('../../multi_result_point_to_released_patch_06_28_2017.tsv', index=False, sep='\t')
# released_multi_patch
# # Patch these as deleted because merged fasta that was never released
# mr.loc[mr.submitted_by_old == 'Xintao Wei', 'status_old'] = 'deleted'
# mr[mr.submitted_by_old == 'Xintao Wei'].drop_duplicates('accession')[['@id_old', 'status_old']].rename(columns={'status_old': 'status', '@id_old': '@id'}).to_csv('../../two_match_to_deleted_patch_06_29_2017.tsv', sep='\t', index=False)
```
## Multiresults that don't match on technical_replicates or derived_from
```
no_matching_rep = rsm_multi_result[~(rsm_multi_result.accession.isin(matching_rep.accession.unique()))].reset_index(drop=True)
len(no_matching_rep.accession.unique())
no_matching_rep[~(no_matching_rep.accession.isin(multi_tech_match.accession)) & (no_matching_rep.submitted_by_old == "J. Seth Strattan")]['@id_old'].unique()
```
### Multiresults that have matching technical_replicates but not derived_from
```
len(no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].accession.unique())
no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].drop_duplicates('accession', keep=False)
multi_tech_match = no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)]
no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].groupby(cols).count()
multi_tech_match.superseded_by_old.value_counts()
multi_tech_match[multi_tech_match.superseded_by_old == 'empty_list'][cols]
multi_tech_match[multi_tech_match.supersedes_new != 'not_available'][cols]
multi_tech_match.supersedes_old.value_counts()
```
### One result after matching on technical_replicate
```
multi_tech_one_match = multi_tech_match.drop_duplicates('accession', keep=False)
len(multi_tech_match.drop_duplicates('accession', keep=False).accession.unique())
multi_tech_one_match.submitted_by_old.value_counts()
pd.crosstab(multi_tech_one_match.output_type_old, multi_tech_one_match.submitted_by_old, margins=False)
multi_tech_one_match
# Delete because no matching derived_from
#multi_tech_one_match[['@id_old', 'status_old']].rename(columns={'@id_old': '@id', 'status_old': 'status'}).to_csv('../../no_matching_derived_from_delete_patch_07_03_2017.tsv', index=False, sep='\t')
multi_tech_one_match.file_format_old.value_counts()
multi_tech_one_match[(multi_tech_one_match.output_type_old != 'alignments')][cols]
multi_tech_one_match[(multi_tech_one_match.submitted_by_old == 'Xintao Wei')
& (multi_tech_one_match.output_type_old != 'alignments')][cols]#[['@id_old', 'possible_replacement_accession']].values
multi_tech_one_match.groupby(cols).count()
multi_tech_one_match.file_format_type_old.value_counts()
multi_tech_one_match[multi_tech_one_match.submitted_by_old == "Jean Davidson"][cols]
```
### Multiresult after matching on technical_replicate
```
len(multi_tech_match[multi_tech_match.duplicated('accession', keep=False)].accession.unique())
mtm = multi_tech_match[multi_tech_match.duplicated('accession', keep=False)]
mtm.groupby(cols).count()
mtm[mtm.submitted_by_old == 'Jean Davidson'].groupby(cols).count()
mtm[mtm.submitted_by_old == 'J. Seth Strattan'].groupby(cols).count()
```
### Multiresults that have matching derived_from but not technical_replicates
```
no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old == no_matching_rep.derived_from_new)].shape
no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old == no_matching_rep.derived_from_new)].groupby(cols).count()
```
### Multiresults that have mismatching derived_from and technical_replicates
```
len(no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].accession.unique())
no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new)
& (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].groupby(cols).count()
cols = ['accession','possible_replacement_accession']
cols = cols + [x for x in sorted(matching_rep.columns, reverse=True) if (x not in cols) and (x not in ['alternate_accessions_new',
'alternate_accessions_old'])]
no_matching_rep.groupby(cols).count()
```
## Accessions of multiple results that don't have matching technical_replicates or derived_from
```
mis_matching_rep = rsm_multi_result[~(rsm_multi_result.accession.isin(matching_rep.accession))].reset_index(drop=True)
len(mis_matching_rep.accession.unique())
mis_matching_rep[['technical_replicates_old','technical_replicates_new', 'derived_from_old', 'derived_from_new']]
```
## Pull all accessions ready for patching
```
replacement_patch = pd.concat([rsm_patch,
rsm_one_result_full_match,
rsm_multi_one_result])
# Squash list for patching.
patch_list = []
for replacement_accession in replacement_patch.possible_replacement_accession.unique():
values = replacement_patch[replacement_patch.possible_replacement_accession == replacement_accession]['accession']
accession_list = []
for val in values:
accession_list.append(val)
patch_list.append({'accession': replacement_accession,
'alternate_accessions:array': ', '.join(accession_list)})
patch_data = pd.DataFrame(patch_list)
#patch_data.to_csv("replaced_with_matching_replacements_patch_06_21_2017.tsv", sep="\t", index=False)
with sns.plotting_context("notebook", font_scale=1.5):
fig = plt.figure(figsize=[14, 8])
sns.set_style('whitegrid')
sns.stripplot(x='date_created_old',
data=rsm[rsm.possible_replacement_accession == 'no_result'],
size=10,
color='black',
alpha=0.8)
```
## Biosamples
```
biosamples = no_redirect_accessions[no_redirect_accessions['@type'] == 'Biosample']
biosamples.submitted_by.value_counts()
na = 'not_available'
possible_replacements = defaultdict(list)
async def get_request_two(session, url, r):
async with session.get(url, auth=request_auth) as response_two:
result_one = await response_two.json()
search_results = result_one['@graph']
if len(search_results) == 0:
possible_replacements[r['accession']].append({'accession': r['accession'],
'possible_replacement_accession': 'no_result'})
for result in search_results:
lab = result.get('lab', {})
sub_by = result.get('submitted_by', {})
if isinstance(sub_by, str):
submitted_by = sub_by
else:
submitted_by = sub_by.get('title', na)
if isinstance(lab, str):
lab_name = lab
else:
lab_name = lab.get('name', na)
possible_replacements[r['accession']].append({'accession': r['accession'],
'possible_replacement_accession': result['accession'],
'@id': result['@id'],
'alternate_accessions': result.get('alternate_accessions', na),
'dataset': result.get('dataset', na),
'lab_name': lab_name,
'date_created': result.get('date_created', na),
'@type': result['@type'][0],
'output_type': result.get('output_type', na),
'file_format': result.get('file_format', na),
'assembly': result.get('assembly', na),
'paired_with': result.get('paired_with', na),
'paired_end': result.get('paired_end', na),
'file_format_type': result.get('file_format_type', na),
'technical_replicates': result.get('technical_replicates', na),
'replicate_uuid': result.get('replicate', {}).get('uuid', na),
'md5sum': result.get('md5sum', na),
'content_md5sum': result.get('content_md5sum', na),
'status': result['status'],
'submitted_by': submitted_by,
'derived_from': result.get('derived_from', na),
'superseded_by': result.get('superseded_by', na),
'supersedes': result.get('supersedes', na)
})
async def get_request_one(session, file_id):
url = 'https://www.encodeproject.org/{}/?format=json'.format(file_id)
async with session.get(url, auth=request_auth) as response_one:
result_one = await response_one.json()
r = result_one
file_format = r['file_format']
output_type = r['output_type']
dataset = r['dataset']
assembly = r.get('assembly', '*')
try:
assay_term_name = r['quality_metrics'][0]['assay_term_name']
url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\
'&output_type={}&quality_metrics.assay_term_name={}'\
'&dataset={}&assembly={}&format=json&frame=embedded'\
'&status!=replaced'.format(file_format,
output_type,
assay_term_name,
dataset,
assembly)
except IndexError:
url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\
'&output_type={}&dataset={}&assembly={}&format=json&frame=embedded'\
'&status!=replaced'.format(file_format,
output_type,
dataset,
assembly)
if assembly == '*':
url = url.replace('&assembly=*', '&assembly!=*')
result_two = await get_request_two(session, url, r)
async def create_session(file_ids, loop):
conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100)
async with aiohttp.ClientSession(connector=conn, loop=loop) as session:
results = await asyncio.gather(*[get_request_one(session, file_id) for file_id in file_ids])
na = 'not_available'
possible_biosample_replacements = defaultdict(list)
for biosample_id in biosamples['@id'].unique():
r = requests.get('https://www.encodeproject.org/{}/?format=json'.format(biosample_id),
auth=(key.authid, key.authpw))
r = r.json()
lab_old = r.get('lab', {})
if isinstance(lab_old, str):
lab_name_old = lab_old
else:
lab_name_old = lab_old.get('name', na)
donor_old = r.get('donor', {})
if isinstance(donor_old, str):
donor_name_old = donor_old
else:
donor_name_old = donor_old.get('@id', na)
sub_by_old = r.get('submitted_by', {})
if isinstance(sub_by_old, str):
submitted_by_old = sub_by_old
else:
submitted_by_old = sub_by_old.get('title', na)
try:
product_id = r['product_id']
health_status = r['health_status']
culture_start_date = r['culture_start_date']
url = 'https://www.encodeproject.org/search/'\
'?type=Biosample&product_id={}'\
'&health_status={}&culture_start_date={}'\
'&status%21=replaced&format=json&frame=embedded'.format(product_id,
health_status,
culture_start_date)
except KeyError:
description = r['description']
url = 'https://www.encodeproject.org/search/'\
'?type=Biosample&description={}'\
'&status%21=replaced&format=json&frame=embedded'.format(description)
search_results = requests.get(url, auth=(key.authid, key.authpw))
search_results = search_results.json()['@graph']
if len(search_results) == 0:
possible_biosample_replacements[r['accession']].append({'accession': r['accession'],
'possible_replacement_accession': 'no_result'})
for result in search_results:
lab_new = result.get('lab', {})
if isinstance(lab_new, str):
lab_name_new = lab_new
else:
lab_name_new = lab_new.get('name', na)
donor_new = result.get('donor', {})
if isinstance(donor_new, str):
donor_name_new = donor_new
else:
donor_name_new = donor_new.get('@id', na)
sub_by_new = result.get('submitted_by', {})
if isinstance(sub_by_new, str):
submitted_by_new = sub_by_new
else:
submitted_by_new = sub_by_new.get('title', na)
possible_biosample_replacements[r['accession']].append({'accession': r['accession'],
'possible_replacement_accession': result['accession'],
'@id_old': r['@id'],
'@id_new': result['@id'],
'alternate_accessions_new': r.get('alternate_accessions', na),
'alternate_accessions_old': result.get('alternate_accessions', na),
'donor_old': donor_name_old,
'donor_new': donor_name_new,
'lab_name_old': lab_name_old,
'lab_name_new': lab_name_new,
'date_created_old': r.get('date_created', na),
'date_created_new': result.get('date_created', na),
'@type_old': r['@type'][0],
'@type_new': result['@type'][0],
'status_old': r['status'],
'status_new': result['status'],
'product_id_old': r.get('product_id', na),
'product_id_new': result.get('product_id', na),
'health_status_old': r.get('health_status', na),
'health_status_new': result.get('health_status', na),
'culture_start_date_old': r.get('culture_start_date', na),
'culture_start_date_new': result.get('culture_start_date', na),
'biosample_type_old': r['biosample_type'],
'biosample_type_new': result['biosample_type'],
'treatment_old': r['treatments'],
'treatment_new': result['treatments'],
'biosample_term_name_old': r['biosample_term_name'],
'biosample_term_name_new': result['biosample_term_name'],
'summary_old': r['summary'],
'summary_new': result['summary'],
'description_old': r['description'],
'description_new': result['description'],
'pooled_from_old': r.get('pooled_from', na),
'pooled_from_new': result.get('pooled_from', na),
'part_of_old': r.get('part_of', na),
'part_of_new': result.get('part_of', na),
'culture_harvest_date_old': r.get('culture_harvest_date', na),
'culture_harvest_date_new': result.get('culture_harvest_date', na),
'passage_number_old': r.get('passage_number', na),
'passage_number_new': result.get('passage_number', na),
'lot_id_old': r.get('lot_id', na),
'lot_id_new': result.get('lot_id', na),
'submitted_by_old': submitted_by_old,
'submitted_by_new': submitted_by_new
})
len(possible_biosample_replacements)
possible_biosample_replacements
replacement_search = pd.DataFrame([item for key, value in possible_biosample_replacements.items() for item in value])
replacement_search = replacement_search.fillna('isnull')
replacement_search.loc[replacement_search.alternate_accessions_old.apply(lambda x: len(x) == 0), 'alternate_accessions_old'] = 'empty_list'
replacement_search.loc[replacement_search.alternate_accessions_new.apply(lambda x: len(x) == 0), 'alternate_accessions_new'] = 'empty_list'
#replacement_search.loc[replacement_search.pooled_from_old.apply(lambda x: len(x) == 0), 'pooled_from_old'] = 'empty_list'
#replacement_search.loc[replacement_search.pooled_from_new.apply(lambda x: len(x) == 0), 'pooled_from_new'] = 'empty_list'
replacement_search.shape
lazy_dict = {'_,e,i,l,m,p,s,t,t,y': 'empty_list',
'i,l,l,n,s,u': 'isnull',
'_,a,a,a,b,e,i,l,l,n,o,t,v': 'not_available'}
def parse_list(x):
return ','.join([y.strip() for y in sorted(x)])
replacement_search.date_created_old = replacement_search.date_created_old.apply(lambda x: pd.to_datetime(x))
replacement_search.date_created_new = replacement_search.date_created_new.apply(lambda x: pd.to_datetime(x))
for field in ['treatment_new',
'treatment_old',
'alternate_accessions_old',
'alternate_accessions_new',
'pooled_from_new',
'pooled_from_old',
'part_of_new',
'part_of_old']:
replacement_search[field] = replacement_search[field].apply(lambda x: parse_list(x)).apply(lambda x: lazy_dict[x] if x in lazy_dict.keys() else x)
bcols
biosamples_one_match = replacement_search.drop_duplicates('accession', keep=False)
first_cols = ['accession', 'possible_replacement_accession']
bcols = first_cols + [col for col in sorted(biosamples_one_match.columns, reverse=True) if col not in first_cols]
biosamples[biosamples['@id'].isin(replacement_search['@id_old'])].shape
biosamples_one_match[bcols].lab_name_old.value_counts()
biosamples_one_match[bcols]
flat_patch = []
for replacement in bs_patch.possible_replacement_accession.unique():
data = {'accession': replacement,
'alternate_accessions:array': ", ".join(bs_patch[bs_patch.possible_replacement_accession == replacement].accession.values)}
flat_patch.append(data)
fp = pd.DataFrame(flat_patch)
# fp.to_csv('../../biosample_one_match_patch_07_03_2017.tsv', sep='\t', index=False)
biosamples_multi_match[biosamples_multi_match.accession.isin(bs_multi_match.accession)]
bs_multi_match = bs[(bs.donor_old == bs.donor_old)
& (bs.passage_number_old == bs.passage_number_new)
& (bs.lot_id_old == bs.lot_id_new)
& (bs.product_id_old == bs.product_id_new)
& (bs.culture_harvest_date_old == bs.culture_harvest_date_new)
& (bs.culture_start_date_old == bs.culture_start_date_new)]
bs = biosamples_multi_match#.drop_duplicates('accession').shape
bs_multi_match.submitted_by_old.value_counts()
bs_multi_match.groupby(bcols).count()
ANTIBODIES:
product_id=A301-145A
@type=AntibodyLot
targets.gene_name: "NCOR1",
antigen_description: "Nuclear Receptor corepressor 1; N-CoR, TRAC1, KIAA1047, hN-CoR",
source.title: "Bethyl Labs",
https://www.encodeproject.org/search/?type=AntibodyLot&targets.gene_name=NCOR1&source.title=Bethyl+Labs&product_id=A301-145A&status%21=replaced
BIOSAMPLE
biosample_type: "immortalized cell line",
treatment: [ ]
lab.name: "gene-yeo"
culture_start_date: "2015-06-12",
health_status: "hepatocellular carcinoma",
product_id: "HB-8065",
biosample_term_name: "HepG2",
@type: "Biosample"
donor.@id: "/human-donors/ENCDO000AAC/",
summary: "Homo sapiens HepG2 immortalized cell line",
life_stage: "child",
source.title: "ATCC",
biosample_term_name: "HepG2",
https://www.encodeproject.org/search/
?type=Biosample&product_id=HB-8065
&health_status=hepatocellular+carcinoma
&culture_start_date=2015-06-12&status%21=replaced
FILE
quality_metrics.assay_term_name: "ChIP-seq",
file_type: "bam",
assembly: "hg19",
lab.name: "encode-processing-pipeline",
output_category: "alignment",
analysis_step_version.analysis_step.name: "bwa-raw-alignment-step-v-1",
biological_replicates: 1
technical_replicates: [
"1_1"
https://www.encodeproject.org/search/?type=File&file_format=bam
&output_type=alignments&quality_metrics.assay_term_name=ChIP-seq
&dataset=%2Fexperiments%2FENCSR021JFW%2F&assembly=hg19
LIBRARY
nucleic_acid_term_name: "DNA",
library_size_selection_method: "SPRI beads",
strand_specificity: false,
fragmentation_method: "shearing (Covaris S2)",
aliases: "tim-reddy:hic_dex.t0_brep1_lib"
lab: "/labs/tim-reddy/",
crosslinking_method: "formaldehyde",
biosample.summary: "Homo sapiens A549 immortalized cell line"
biosample.biosample_term_name: "A549"
https://www.encodeproject.org/search/?type=Library
&lab=%2Flabs%2Fthomas-gingeras%2F
&nucleic_acid_term_name=polyadenylated+mRNA
&strand_specificity=true&depleted_in_term_name=rRNA
&biosample.biosample_term_name=NCI-H460
&biosample.%40id=%2Fbiosamples%2FENCBS814QPR%2F&status%21=replaced
```
| github_jupyter |
```
import math
import os
import re
import urllib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
import pandas as pd
import seaborn as sns
import tqdm
from lxml import html
from matplotlib.colors import LogNorm
from rdkit import Chem
tqdm.tqdm = tqdm.tqdm_notebook
sns.set(style='white', rc={'axes.facecolor': (0, 0, 0, 0),
'font.family': 'serif'})
sns.set_context('paper', font_scale=2.5)
def get_num_spectrum_files(task_id, extension=None):
with urllib.request.urlopen(f'https://proteomics2.ucsd.edu/ProteoSAFe/'
f'status.jsp?task={task_id}') as f_url:
num_files_str = html.fromstring(f_url.read()).xpath(
'//th[text()="Spectrum Files"]/following-sibling::td/'
'descendant::*/text()')
return sum([1 for line in num_files_str
if extension is None
or line.strip().lower().endswith(extension.lower())])
def inchi_to_smiles(inchi):
try:
mol = Chem.rdinchi.InchiToMol(inchi)[0]
except ValueError:
mol = None
return Chem.MolToSmiles(mol, True) if mol is not None else None
def ridge_plot_scores(scores_df, num_files):
palette = sns.color_palette('Blues_d', n_colors=len(
scores_df['filename'].unique()))
palette.reverse()
fg = sns.FacetGrid(scores_df, row='filename', hue='filename',
height=0.5, aspect=15, palette=palette)
fg.map(sns.kdeplot, 'score', shade=True, alpha=1., lw=1.5)
fg.map(sns.kdeplot, 'score', color='white', lw=2.)
fg.map(plt.axhline, y=0., lw=2.)
for ax in fg.axes.ravel():
ax.set_xlim(0.4, 1.)
fg.set(xticks=np.arange(0.5, 1.05, 0.1))
fg.axes[-1, 0].set_xlabel('Cosine score')
fg.axes[len(fg.axes) // 2, 0].set_ylabel('Number of files')
for ax, label, color in zip(fg.axes.ravel(), num_files, palette):
ax.text(0, .2, label, color=color, ha='left', va='center',
transform=ax.transAxes)
fg.set_titles('')
fg.set(yticks=[])
fg.despine(bottom=True, left=True)
fg.fig.subplots_adjust(hspace=-0.35)
return fg
for data_dir in ['../data/ICL_breath_cancer_Study_1',
'../data/UCDavis_combined']:
dataset = data_dir[data_dir.rfind('/') + 1:]
metadata = (pd.read_csv(os.path.join(data_dir, 'metadata.csv'))
.dropna(subset=['GNPS deconvolution link',
'GNPS library search link']))
print(f'Process dataset {dataset}')
# Get all identifications for this dataset.
filenames_search, compounds = [], []
for library_link in tqdm.tqdm(metadata['GNPS library search link'],
'Files loaded', unit='files'):
task_id = library_link[library_link.rfind('=') + 1:][:8]
filename = (f'MOLECULAR-LIBRARYSEARCH-GC-{task_id}'
f'-view_all_annotations_DB-main.tsv')
filenames_search.append(filename)
compounds_file = (pd.read_csv(os.path.join(data_dir, filename),
sep='\t',
usecols=['#Scan#', 'INCHI', 'MQScore'],
skipinitialspace=True)
.dropna())
compounds_file['SMILES'] =\
compounds_file['INCHI'].apply(inchi_to_smiles)
compounds_file = compounds_file.drop('INCHI', 'columns').dropna()
compounds.append(compounds_file)
# Get the number of files for this dataset.
num_files = [
get_num_spectrum_files(
deconvolution_link[deconvolution_link.rfind('=') + 1:], '.cdf')
for deconvolution_link in metadata['GNPS deconvolution link']
]
# # Plot the score distribution for this dataset.
# for top in (1, 10):
# filenames_top, scores_top = [], []
# for filename, compounds_file in zip(filenames_search, compounds):
# compounds_top = (
# compounds_file.sort_values(['#Scan#', 'MQScore'],
# ascending=[True, False])
# .groupby('#Scan#').head(top))
# filenames_top.extend([filename] * len(compounds_top))
# scores_top.extend(compounds_top['MQScore'])
# ridge_plot_scores(pd.DataFrame(data={'filename': filenames_top,
# 'score': scores_top}),
# num_files)
# plt.savefig(f'cosine_distribution_{dataset}_top{top}.png', dpi=300)
# plt.show()
# plt.close()
# Final figure for the manuscript.
top = 1
filenames_top, scores_top = [], []
for filename, compounds_file in zip(filenames_search, compounds):
compounds_top = (
compounds_file.sort_values(['#Scan#', 'MQScore'],
ascending=[True, False])
.groupby('#Scan#').head(top))
filenames_top.extend([filename] * len(compounds_top))
scores_top.extend(compounds_top['MQScore'])
ridge_plot_scores(pd.DataFrame(data={'filename': filenames_top,
'score': scores_top}),
num_files)
plt.savefig(f'cosine_distribution_{dataset}_top{top}.svg', dpi=300)
plt.show()
plt.close()
```
| github_jupyter |
```
######################################################################3
# Example: Timing
#
# Timing in Python is easily accomplished using the "time"
# function provided by the "time" module. A simple
# example is provided below.
import time
nsizes = 8
for i in range(nsizes):
list1 = []
list2 = []
nelem = 10**i
t0 = time.time()
for j in range(nelem):
list1.append(j)
list2.append(i+j)
t1 = time.time()
dt_create = t1-t0
t0 = time.time()
for j in range(nelem):
list2[j] = list2[j]*list1[j]
t1 = time.time()
dt_prod = t1-t0
nestr = '{:8}'.format(nelem)
dtcstr = '{:.4e}'.format(dt_create)
dtpstr = '{:.4e}'.format(dt_prod)
print('')
print('/////////////////////////////////////////////////////////////////')
print(' Time to create two lists of length '+nestr+' : '+dtcstr+' seconds')
print(' Time to perform element-wise product on two lists of length '+nestr+' : '+dtpstr+' seconds')
```
<h2>Next stuff is Session1_NumPy example for initialization.py:</h2>
```
#################################################################
# Example: Numpy array initialization
#
# There are various ways to initialize a Numpy array.
# Several examples are shown below.
import numpy as np
#Array dimensions for use later
nx = 10
ny = 20
#1-D array, 8-byte real, using np.empty means that
# values are not initialized to anything (in particular...)
earr = np.empty(nx,dtype='float64')
print('earr: ', earr)
print(' ')
#1-D array, 4-byte real, using np.zeros
# means that values are initialized to zero
zarr = np.zeros(nx,dtype='float32')
print('zarr: ', zarr)
print(' ')
#2-d array, 4-byte integer, values set to zero
# Row-major ordering (second index is fastest; like C/C++)
iarr2da = np.zeros((nx,ny),dtype='int32')
print('iarr2da: ')
print(iarr2da)
print(' ')
#2-d array, 4-byte integer, values set to zero
#Column-major ordering (first index is fastest; like Fortran)
iarr2db = np.zeros((nx,ny),dtype='int32', order ='F')
print('iarr2db: ')
print(iarr2db)
print(' ')
#1-d array, 4-byte real, values spaced with integer spacing
# in the range [istart,iend], with stepsize istep
# This is accomplished via np.arange
istart = 10
iend = 20
istep = 2
arrsp = np.arange(istart,iend,istep,dtype='float32')
print('arrsp: ', arrsp)
print(' ')
#1-d array, 8-byte real, values spaced with non-integer spacing
# in the range [istart,iend], with stepsize = (iend-istart)/nstep
istart = 100
iend = 200
nstep = 200
arrsp2 = np.linspace(istart,iend,nstep,dtype='float64')
print('arrsp2: ')
print(arrsp2)
print(' ')
# 1-d array, 8-byte real, initialized using values from an existing list
oned = [0,1.2, 5.6, 8.9]
arrinit = np.array(oned,dtype='float64')
print('arrinit: ', arrinit)
print(' ')
# 2-d array, 4-byte integer, initialized using values from existing 2-D list
twod = [ [ 1, 2], [3,4] ]
arrinit2d = np.array(twod,dtype='float64')
print('arrinit2d: ')
print(arrinit2d)
print(' ')
array_names = ['zarr', 'earr', 'iarr2da', 'iarr2db','arrsp', 'arrsp2', 'arrinit', 'arrinit2d']
arrays = [zarr,earr,iarr2da,iarr2db, arrsp, arrsp2, arrinit, arrinit2d]
for i,o in enumerate(arrays):
ndim = o.ndim
dtype = o.dtype
isize = o.itemsize
ssp = o.nbytes
ne = o.size
print('////////////////////////////////////////')
print(' Information for ndarray '+array_names[i])
print(' data type : ', dtype)
print(' number of elements : ', ne)
print(' element size (bytes) : ', isize)
print(' dimensions : ', ndim)
for j in range(ndim):
print(' dimension '+str(j)+' size :', o.shape[j])
print(' storage space (bytes): ', ssp)
if (ndim > 1):
print(' Element spacing along dimension 1 (bytes): ', o.strides[0])
print(' Element spacing along dimension 2 (bytes): ', o.strides[1])
print('')
```
<h2>Exercise1: for rewriting for better efficiency</h2>
```
#//////////////////////////////////////////////////////////////
# Exercise 1:
# Rewrite the following program using numpy arrays and
# array operations where possible (instead of explicit loops).
n = 1000000
a = []
b = []
for i in range(1,n+1):
a.append(4*i)
b.append(i**2)
dsum = 0.0
for i in range(n):
dsum += a[i]*b[i]
print('dsum is: ', dsum)
import numpy as np
n = 1000000
a = 4 * (np.arange(n) + 1)
b = np.square(np.arange(n) + 1)
#dsum = np.sum(a * b, dtype='float64')
dsum = np.sum(a.astype('float64') * b)
print("Our sum is : ", dsum)
```
<h2>In-Place Operations</h2>
```
###########################################################################
# Example: In-place vs. out-of-place operations
#
# We can avoid unnecessary array copying, and save time,
# by using in-place operators where possible.
# Use a += 2 instead of a = a+2, a *=2 instead of a = a*2, etc.
import numpy as np
import time
npts = 10000000
ntrials = 4
a = np.zeros(npts)
b = np.zeros(npts)
print(' ')
print(' Timing in-place vs. out-of-place array operations')
print(' Number of elements: ', npts)
print(' Number of trials : ', ntrials)
print('')
# This appears to be in-place, but in fact a new array is made (a*2) and reassigned to a
t0 = time.time()
for i in range(ntrials):
a = a*2
t1 = time.time()
dt1 = t1-t0
# This is truly in-place
t0 = time.time()
for i in range(ntrials):
a *= 2
t1 = time.time()
dt2 = t1-t0
# And here, we have a clearly out-of-place operation (a*2 is calculated and then assigned to b)
t0 = time.time()
for i in range(ntrials):
b = a*2
t1 = time.time()
dt3 = t1-t0
tstr1 = '{:.4e}'.format(dt1)
tstr2 = '{:.4e}'.format(dt2)
tstr3 = '{:.4e}'.format(dt3)
print(' "In-place" ( a = a*2 ) : '+tstr1+' seconds')
print(' In-place ( a *= 2 ) : '+tstr2+' seconds')
print(' Out-of-place ( b = a*2 ) : '+tstr3+' seconds')
```
<h2>Array Ordering example</h2>
```
##################################################################
# Example: Numpy array ordering
# In this example, we demonstrate the difference between
# row-major and column-major ordering of the array.
import numpy as np
dt = 'int32' # 4-byte integers
#Create a 1-D array with elements numbered 1 through 8
values = np.arange(1,9,1,dtype=dt)
#Reshape the 1-D array in two different ways
#Row-major ordering (default behavior; C-like).
#Values are loaded into row 0, then row 1, etc.
array2d_row_major = np.reshape(values,(4,2),order='C')
#Column-major ordering (Fortran-like)
#Values are loaded into column 0, then column 1, etc.
array2d_col_major = np.reshape(values,(4,2),order='F')
print('')
print('values: ')
print(values)
print('')
print('2-D reshape; row-major):')
print(array2d_row_major)
print('')
print('')
print('2-D reshape; column-major):')
print(array2d_col_major)
print('')
```
<h2>Access Patterns</h2>
```
####################################################################
# Example: Numpy array access patterns
#
# In general, try not to loop over array elements; use
# vectorized operations whenever possible. That said,
# sometimes loops cannot be avoided. When you HAVE to write a
# loop, the order in which you access the array can affect
# computation speed. If the array is row-major (default),
# it is most efficient to work row-by-row. If the array is
# column major, work column-by-column instead.
#
# In this example, we perform vector dot products with
# rows and columns of an arrays that are both row-major
# and column-major
from time import time
import numpy as np
nx = 64
nsizes = 6
ntests = 50
orders = ['C', 'F']
otitles = [' Row Major (C-style)', 'Column Major (Fortran-style)']
print('Vector-Matrix Multiplication Timings')
for i,o in enumerate(orders):
print(' ')
print('Array ordering: ', otitles[i])
for k in range(nsizes):
nxy = nx*(2**k)
nxys = 'nx: '+'{:4}'.format(nxy)
vec = np.zeros(nxy,dtype='float64', order=o)
mat = np.zeros((nxy,nxy),dtype='float64', order=o)
# first pass: vec = mat[:,j]
t0 = time()
for n in range(ntests):
for j in range(nxy):
vec2 = mat[:,j]
dsum = np.sum(vec2*vec)
t1 = time()
dt1 = (t1-t0)/ntests
# second pass: vec = mat[j,:]
t0 = time()
for n in range(ntests):
for j in range(nxy):
vec2 = mat[j,:]
dsum = np.sum(vec2*vec)
t1 = time()
dt2 = (t1-t0)/ntests
s2 = 1.0/dt2
s1 = 1.0/dt1
ds = (s2-s1)/s1
ds = ds*100
dratio = dt1/dt2
dss = '{:.4f}'.format(dratio)
dss = '(Column-wise Time)/(Row-wise time) = '+dss
print(nxys, ';', dss)
```
<h2>Numpy i/o</h2>
```
##################################################################################
# Example: Writing & reading numpy arrays
#
# We can write array contents in unformatted binary using tofile.
# We can read from a file using fromfile.
#
# NOTE: Regardless of an array's ordering, tofile will
# ALWAYS write the array in row-major order.
# To see this, change the ordering of simple_array to 'F.'
# array2d[a,b] and array3d will remain unchanged.
import numpy as np
ofile = 'numpy_output.dat'
dt = 'int32' # 4-byte integers
simple_array = np.zeros((4,2),dtype=dt)
simple_array[0,0] = 1
simple_array[0,1] = 2
simple_array[1,0] = 3
simple_array[1,1] = 4
simple_array[2,0] = 5
simple_array[2,1] = 6
simple_array[3,0] = 7
simple_array[3,1] = 8
#Writing an array is easy - just specify a filename
simple_array.tofile(ofile)
#When reading unformatted binary, we specify
# (1) The filename
# (2) The datatype
# (3) The number of values to read
values = np.fromfile(ofile,dtype=dt,count=8)
#We can reshape the data as desired
array2da = np.reshape(values,(4,2))
array2db = np.reshape(values,(2,4))
array3d = np.reshape(values,(2,2,2))
print('')
print('values: ')
print(values)
print('')
print('2-D array; 4 rows, 2 columns):')
print(array2da)
print('')
print('')
print('2-D array; 2 rows, 4 columns):')
print(array2db)
print('')
print('')
print('3-D array:')
print(array3d)
print('')
```
<h2> Reduction (how to use map_reduce)</h2>
```
##########################################################################
# Example: Map & Reduction in Serial Python
#
# If we have a function designed to accept one argument,
# but want to evaluate it for multiple data values, we
# can use Python's "map" function as shorthand for a loop.
#
# If we have a function that we would like to call again
# and again, using results from one call in tandem with
# the subsequent call, we can use "reduce" function.
#
# The following program demonstrates the use of map & reduce.
from functools import reduce
def squarex(x):
return (x*x)
def add(x):
return (x+x)
def addtwo(x,y):
return x+y
def multtwo(x,y):
return x*y
arg_list = [1,3,9,27]
# Compute the square of 1,3,9, and 27
vals = map(squarex, [1,3,9,27])
for i,v in enumerate(vals):
str1 = '{:2d}'.format(arg_list[i])
str2 = '{:3d}'.format(v)
print(str1+' squared is '+str2+'.')
#Compute ((1+2)+3)
# -- first 1 and 2 are passed to addtwo
# -- next, (1+2 = 3) and 3 are passed to addtwo
# -- the final result is assigned to v2
res1 = reduce(addtwo,[1,2,3])
#Compute (((1*2) * 3) * 4)
# -- first 1 and 2 are passed to multtwo
# -- next, (1*2=2) and 3 are passed to addtwo
# -- next (2*3=6) and 4 are passed to addtwo
# -- the final result is assigned to res2
res2 = reduce(multtwo,[1,2,3,4])
print('')
print(' 1+2+3 = ', res1)
print('1*2*3*4 = ', res2)
```
<h2>Exercise2</h2>
```
#///////////////////////////////////////////////////////////////
# Exercise 2:
# Rewrite the following code using the map and
# reduce functions.
from functools import reduce
n = 4
b = []
for i in range(1,n+1):
b.append(2*i)
print(b)
prod = 1
for i in range(n):
prod = prod*b[i]
print(prod)
def times2(x):
return(x*2)
n = 4
idx = np.arange(1,n+1,1,dtype='int32')
b = map(times2, idx)
vals = [val for val in b] # N.B. !!!!!
print(vals)
# N.B. !!!!! b can only be iterated over once, after that it disappears!!!!
#for i, val in enumerate(b):
# print(i,val)
def product(x, y):
return(x * y)
# this is the same as: lambda x,y: x*y
prod = reduce(product, vals)
print("prod = ", prod)
new = list(map(times2, idx))
print(new)
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title ##### Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Bayesian Neural Network
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/python/experimental/nn/examples/bnn_mnist_advi.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/python/experimental/nn/examples/bnn_mnist_advi.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
### 1 Imports
```
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics as sklearn_metrics
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static
# Globally Enable XLA.
# tf.config.optimizer.set_jit(True)
try:
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
tfb = tfp.bijectors
tfd = tfp.distributions
tfn = tfp.experimental.nn
```
### 2 Load Dataset
```
dataset_name = 'emnist'
batch_size = 32
[train_dataset, eval_dataset], datasets_info = tfds.load(
name=dataset_name,
split=['train', 'test'],
with_info=True,
as_supervised=True,
shuffle_files=True)
def _preprocess(image, label):
image = tf.cast(image, dtype=tf.float32) / 255.
if dataset_name == 'emnist':
image = tf.transpose(image, perm=[1, 0, 2])
label = tf.cast(label, dtype=tf.int32)
return image, label
train_size = datasets_info.splits['train'].num_examples
eval_size = datasets_info.splits['test'].num_examples
num_classes = datasets_info.features['label'].num_classes
image_shape = datasets_info.features['image'].shape
if dataset_name == 'emnist':
import string
yhuman = np.array(list(string.digits +
string.ascii_uppercase +
string.ascii_lowercase))
else:
yhuman = np.range(num_classes).astype(np.int32)
if True:
orig_train_size = train_size
train_size = int(10e3)
train_dataset = train_dataset.shuffle(orig_train_size // 7).repeat(1).take(train_size)
train_dataset = tfn.util.tune_dataset(
train_dataset,
batch_size=batch_size,
shuffle_size=int(train_size / 7),
preprocess_fn=_preprocess)
if True:
orig_eval_size = eval_size
eval_size = int(10e3)
eval_dataset = eval_dataset.shuffle(orig_eval_size // 7).repeat(1).take(eval_size)
eval_dataset = tfn.util.tune_dataset(
eval_dataset,
repeat_count=None,
preprocess_fn=_preprocess)
x, y = next(iter(eval_dataset.batch(10)))
tfn.util.display_imgs(x, yhuman[y.numpy()]);
```
### 3 Define Model
```
#@title Optional Custom Posterior
def make_posterior(
kernel_shape,
bias_shape,
dtype=tf.float32,
kernel_initializer=None,
bias_initializer=None,
kernel_name='posterior_kernel',
bias_name='posterior_bias'):
if kernel_initializer is None:
kernel_initializer = tf.initializers.glorot_uniform()
if bias_initializer is None:
bias_initializer = tf.zeros
make_loc = lambda shape, init, name: tf.Variable( # pylint: disable=g-long-lambda
init(shape, dtype=dtype),
name=name + '_loc')
make_scale = lambda shape, name: tfp.util.TransformedVariable( # pylint: disable=g-long-lambda
tf.fill(shape, tf.constant(0.01, dtype)),
tfb.Chain([tfb.Shift(1e-5), tfb.Softplus()]),
name=name + '_scale')
return tfd.JointDistributionSequential([
tfd.Independent(
tfd.Normal(loc=make_loc(kernel_shape, kernel_initializer, kernel_name),
scale=make_scale(kernel_shape, kernel_name)),
reinterpreted_batch_ndims=prefer_static.size(kernel_shape),
name=kernel_name),
tfd.Independent(
tfd.Normal(loc=make_loc(bias_shape, bias_initializer, bias_name),
scale=make_scale(bias_shape, bias_name)),
reinterpreted_batch_ndims=prefer_static.size(bias_shape),
name=bias_name),
])
#@title Optional Custom Prior
def make_prior(
kernel_shape,
bias_shape,
dtype=tf.float32,
kernel_initializer=None, # pylint: disable=unused-argument
bias_initializer=None, # pylint: disable=unused-argument
kernel_name='prior_kernel',
bias_name='prior_bias'):
k = tfd.MixtureSameFamily(
tfd.Categorical(tf.zeros(3, dtype)),
tfd.StudentT(
df=[1,1.,1.], loc=[0,3,-3], scale=tf.constant([1, 10, 10], dtype)))
#df=[0.5, 1., 1.], loc=[0, 2, -2], scale=tf.constant([0.25, 5, 5], dtype)))
b = tfd.Normal(0, tf.constant(1000, dtype))
return tfd.JointDistributionSequential([
tfd.Sample(k, kernel_shape, name=kernel_name),
tfd.Sample(b, bias_shape, name=bias_name),
])
max_pool = tf.keras.layers.MaxPooling2D( # Has no tf.Variables.
pool_size=(2, 2),
strides=(2, 2),
padding='SAME',
data_format='channels_last')
def batchnorm(axis):
def fn(x):
m = tf.math.reduce_mean(x, axis=axis, keepdims=True)
v = tf.math.reduce_variance(x, axis=axis, keepdims=True)
return (x - m) / tf.math.sqrt(v)
return fn
maybe_batchnorm = batchnorm(axis=[-4, -3, -2])
# maybe_batchnorm = lambda x: x
bnn = tfn.Sequential([
lambda x: 2. * tf.cast(x, tf.float32) - 1., # Center.
tfn.ConvolutionVariationalReparameterization(
input_size=1,
output_size=8,
filter_shape=5,
padding='SAME',
init_kernel_fn=tf.initializers.he_uniform(),
penalty_weight=1 / train_size,
# penalty_weight=1e2 / train_size, # Layer specific "beta".
# make_posterior_fn=make_posterior,
# make_prior_fn=make_prior,
name='conv1'),
maybe_batchnorm,
tf.nn.leaky_relu,
tfn.ConvolutionVariationalReparameterization(
input_size=8,
output_size=16,
filter_shape=5,
padding='SAME',
init_kernel_fn=tf.initializers.he_uniform(),
penalty_weight=1 / train_size,
# penalty_weight=1e2 / train_size, # Layer specific "beta".
# make_posterior_fn=make_posterior,
# make_prior_fn=make_prior,
name='conv2'),
maybe_batchnorm,
tf.nn.leaky_relu,
max_pool, # [28, 28, 8] -> [14, 14, 8]
tfn.ConvolutionVariationalReparameterization(
input_size=16,
output_size=32,
filter_shape=5,
padding='SAME',
init_kernel_fn=tf.initializers.he_uniform(),
penalty_weight=1 / train_size,
# penalty_weight=1e2 / train_size, # Layer specific "beta".
# make_posterior_fn=make_posterior,
# make_prior_fn=make_prior,
name='conv3'),
maybe_batchnorm,
tf.nn.leaky_relu,
max_pool, # [14, 14, 16] -> [7, 7, 16]
tfn.util.flatten_rightmost(ndims=3),
tfn.AffineVariationalReparameterizationLocal(
input_size=7 * 7 * 32,
output_size=num_classes - 1,
penalty_weight=1. / train_size,
# make_posterior_fn=make_posterior,
# make_prior_fn=make_prior,
name='affine1'),
tfb.Pad(),
lambda x: tfd.Categorical(logits=x, dtype=tf.int32),
], name='BNN')
# bnn_eval = tfn.Sequential([l for l in bnn.layers if l is not maybe_batchnorm],
# name='bnn_eval')
bnn_eval = bnn
print(bnn.summary())
```
### 4 Loss / Eval
```
def compute_loss_bnn(x, y, beta=1., is_eval=False):
d = bnn_eval(x) if is_eval else bnn(x)
nll = -tf.reduce_mean(d.log_prob(y), axis=-1)
kl = bnn.extra_loss
loss = nll + beta * kl
return loss, (nll, kl), d
train_iter_bnn = iter(train_dataset)
def train_loss_bnn():
x, y = next(train_iter_bnn)
loss, (nll, kl), _ = compute_loss_bnn(x, y)
return loss, (nll, kl)
opt_bnn = tf.optimizers.Adam(learning_rate=0.003)
fit_bnn = tfn.util.make_fit_op(
train_loss_bnn,
opt_bnn,
bnn.trainable_variables,
grad_summary_fn=lambda gs: tf.nest.map_structure(tf.norm, gs))
#@title Eval Helpers
def all_categories(d):
num_classes = tf.shape(d.logits_parameter())[-1]
batch_ndims = tf.size(d.batch_shape_tensor())
expand_shape = tf.pad(
[num_classes], paddings=[[0, batch_ndims]], constant_values=1)
return tf.reshape(tf.range(num_classes, dtype=d.dtype), expand_shape)
def rollaxis(x, shift):
return tf.transpose(x, tf.roll(tf.range(tf.rank(x)), shift=shift, axis=0))
def compute_eval_stats(y, d, threshold=None):
# Assume we have evidence `x`, targets `y`, and model function `dnn`.
all_pred_log_prob = tf.math.log_softmax(d.logits, axis=-1)
yhat = tf.argmax(all_pred_log_prob, axis=-1)
pred_log_prob = tf.reduce_max(all_pred_log_prob, axis=-1)
# all_pred_log_prob = d.log_prob(all_categories(d))
# yhat = tf.argmax(all_pred_log_prob, axis=0)
# pred_log_prob = tf.reduce_max(all_pred_log_prob, axis=0)
# Alternative #1:
# all_pred_log_prob = rollaxis(all_pred_log_prob, shift=-1)
# pred_log_prob, yhat = tf.math.top_k(all_pred_log_prob, k=1, sorted=False)
# Alternative #2:
# yhat = tf.argmax(all_pred_log_prob, axis=0)
# pred_log_prob = tf.gather(rollaxis(all_pred_log_prob, shift=-1),
# yhat,
# batch_dims=len(d.batch_shape))
if threshold is not None:
keep = pred_log_prob > tf.math.log(threshold)
pred_log_prob = tf.boolean_mask(pred_log_prob, keep)
yhat = tf.boolean_mask(yhat, keep)
y = tf.boolean_mask(y, keep)
hit = tf.equal(y, tf.cast(yhat, y.dtype))
avg_acc = tf.reduce_mean(tf.cast(hit, tf.float32), axis=-1)
num_buckets = 10
(
avg_calibration_error,
acc,
conf,
cnt,
edges,
bucket,
) = tf.cond(tf.size(y) > 0,
lambda: tfp.stats.expected_calibration_error_quantiles(
hit,
pred_log_prob,
num_buckets=num_buckets,
log_space_buckets=True),
lambda: (tf.constant(np.nan),
tf.fill([num_buckets], np.nan),
tf.fill([num_buckets], np.nan),
tf.fill([num_buckets], np.nan),
tf.fill([num_buckets + 1], np.nan),
tf.constant([], tf.int64)))
return avg_acc, avg_calibration_error, (acc, conf, cnt, edges, bucket)
eval_iter_bnn = iter(eval_dataset.batch(2000).repeat())
@tfn.util.tfcompile
def eval_bnn(threshold=None, num_inferences=5):
x, y = next(eval_iter_bnn)
loss, (nll, kl), d = compute_loss_bnn(x, y, is_eval=True)
if num_inferences > 1:
before_avg_predicted_log_probs = tf.map_fn(
lambda _: tf.math.log_softmax(bnn(x).logits, axis=-1),
elems=tf.range(num_inferences),
dtype=loss.dtype)
d = tfd.Categorical(logits=tfp.math.reduce_logmeanexp(
before_avg_predicted_log_probs, axis=0))
avg_acc, avg_calibration_error, (acc, conf, cnt, edges, bucket) = \
compute_eval_stats(y, d, threshold=threshold)
n = tf.reduce_sum(cnt, axis=0)
return loss, (nll, kl, avg_acc, avg_calibration_error, n)
```
### 5 Train
```
DEBUG_MODE = False
tf.config.experimental_run_functions_eagerly(DEBUG_MODE)
num_train_epochs = 2. # @param { isTemplate: true}
num_evals = 50 # @param { isTemplate: true
dur_sec = dur_num = 0
num_train_steps = int(num_train_epochs * train_size)
for i in range(num_train_steps):
start = time.time()
trn_loss, (trn_nll, trn_kl), g = fit_bnn()
stop = time.time()
dur_sec += stop - start
dur_num += 1
if i % int(num_train_steps / num_evals) == 0 or i == num_train_steps - 1:
tst_loss, (tst_nll, tst_kl, tst_acc, tst_ece, tst_tot) = eval_bnn()
f, x = zip(*[
('it:{:5}', opt_bnn.iterations),
('ms/it:{:6.4f}', dur_sec / max(1., dur_num) * 1000.),
('tst_acc:{:6.4f}', tst_acc),
('tst_ece:{:6.4f}', tst_ece),
('tst_tot:{:5}', tst_tot),
('trn_loss:{:6.4f}', trn_loss),
('tst_loss:{:6.4f}', tst_loss),
('tst_nll:{:6.4f}', tst_nll),
('tst_kl:{:6.4f}', tst_kl),
('sum_norm_grad:{:6.4f}', sum(g)),
])
print(' '.join(f).format(*[getattr(x_, 'numpy', lambda: x_)()
for x_ in x]))
sys.stdout.flush()
dur_sec = dur_num = 0
# if i % 1000 == 0 or i == maxiter - 1:
# bnn.save('/tmp/bnn.npz')
```
### 6 Evaluate
```
#@title More Eval Helpers
@tfn.util.tfcompile
def compute_log_probs_bnn(x, num_inferences):
lp = tf.map_fn(lambda _: tf.math.log_softmax(bnn_eval(x).logits, axis=-1),
elems=tf.range(num_inferences),
dtype=tf.float32)
log_mean_prob = tfp.math.reduce_logmeanexp(lp, axis=0)
# ovr = "one vs rest"
log_avg_std_ovr_prob = tfp.math.reduce_logmeanexp(lp + tf.math.log1p(-lp), axis=0)
#log_std_prob = 0.5 * tfp.math.log_sub_exp(log_mean2_prob, log_mean_prob * 2.)
tiny_ = np.finfo(lp.dtype.as_numpy_dtype).tiny
log_std_prob = 0.5 * tfp.math.reduce_logmeanexp(
2 * tfp.math.log_sub_exp(lp + tiny_, log_mean_prob),
axis=0)
return log_mean_prob, log_std_prob, log_avg_std_ovr_prob
num_inferences = 50
num_chunks = 10
eval_iter_bnn = iter(eval_dataset.batch(eval_size // num_chunks))
@tfn.util.tfcompile
def all_eval_labels_and_log_probs_bnn():
def _inner(_):
x, y = next(eval_iter_bnn)
return x, y, compute_log_probs_bnn(x, num_inferences)
x, y, (log_probs, log_std_probs, log_avg_std_ovr_prob) = tf.map_fn(
_inner,
elems=tf.range(num_chunks),
dtype=(tf.float32, tf.int32,) + ((tf.float32,) * 3,))
return (
tf.reshape(x, (-1,) + image_shape),
tf.reshape(y, [-1]),
tf.reshape(log_probs, [-1, num_classes]),
tf.reshape(log_std_probs, [-1, num_classes]),
tf.reshape(log_avg_std_ovr_prob, [-1, num_classes]),
)
(
x_, y_,
log_probs_, log_std_probs_,
log_avg_std_ovr_prob_,
) = all_eval_labels_and_log_probs_bnn()
#@title Run Eval
x, y, log_probs, log_std_probs, log_avg_std_ovr_prob = (
x_, y_, log_probs_, log_std_probs_, log_avg_std_ovr_prob_)
yhat = tf.argmax(log_probs, axis=-1)
max_log_probs = tf.gather(log_probs, yhat, batch_dims=1)
max_log_std_probs = tf.gather(log_std_probs, yhat, batch_dims=1)
max_log_avg_std_ovr_prob = tf.gather(log_avg_std_ovr_prob, yhat, batch_dims=1)
# Sort by ascending confidence.
score = max_log_probs # Mean
#score = -max_log_std_probs # 1 / Sigma
#score = max_log_probs - max_log_std_probs # Mean / Sigma
#score = abs(tf.math.expm1(max_log_std_probs - (max_log_probs + tf.math.log1p(-max_log_probs))))
idx = tf.argsort(score)
score = tf.gather(score, idx)
x = tf.gather(x, idx)
y = tf.gather(y, idx)
yhat = tf.gather(yhat, idx)
hit = tf.cast(tf.equal(y, tf.cast(yhat,y.dtype)), tf.int32)
log_probs = tf.gather(log_probs, idx)
max_log_probs = tf.gather(max_log_probs, idx)
log_std_probs = tf.gather(log_std_probs, idx)
max_log_std_probs = tf.gather(max_log_std_probs, idx)
log_avg_std_ovr_prob = tf.gather(log_avg_std_ovr_prob, idx)
max_log_avg_std_ovr_prob = tf.gather(max_log_avg_std_ovr_prob, idx)
d = tfd.Categorical(logits=log_probs)
max_log_probs = tf.reduce_max(log_probs, axis=-1)
keep = tf.range(500,eval_size)
#threshold = 0.95;
# keep = tf.where(max_log_probs > tf.math.log(threshold))[..., 0]
x_keep = tf.gather(x, keep)
y_keep = tf.gather(y, keep)
log_probs_keep = tf.gather(log_probs, keep)
yhat_keep = tf.gather(yhat, keep)
d_keep = tfd.Categorical(logits=log_probs_keep)
(
avg_acc, ece,
(acc, conf, cnt, edges, bucket),
) = tfn.util.tfcompile(lambda: compute_eval_stats(y, d))()
(
avg_acc_keep, ece_keep,
(acc_keep, conf_keep, cnt_keep, edges_keep, bucket_keep),
) = tfn.util.tfcompile(lambda: compute_eval_stats(y_keep, d_keep))()
print('Accurary (all) : {}'.format(avg_acc))
print('Accurary (certain) : {}'.format(avg_acc_keep))
print('ECE (all) : {}'.format(ece))
print('ECE (certain) : {}'.format(ece_keep))
print('Number undecided: {}'.format(eval_size - tf.size(keep)))
print('Most uncertain:')
ss = (6,12); n = np.prod(ss); s = ss+image_shape
tfn.util.display_imgs(
tf.reshape(x[:n], s),
yhuman[tf.reshape(y[:n], ss).numpy()])
print(tf.reshape(hit[:n], ss).numpy())
print(yhuman[tf.reshape(yhat[:n], ss).numpy()])
print('Least uncertain:')
tfn.util.display_imgs(
tf.reshape(x[-n:], s),
yhuman[tf.reshape(y[-n:], ss).numpy()])
print(tf.reshape(hit[-n:], ss).numpy())
print(yhuman[tf.reshape(yhat[-n:], ss).numpy()])
a = tf.math.exp(max_log_probs)
b = tf.math.exp(max_log_std_probs)
plt.plot(a, b, '.', label='observed');
#sns.jointplot(a.numpy(), b.numpy())
plt.xlabel('mean');
plt.ylabel('std');
p = tf.linspace(0.,1,100)
plt.plot(p, tf.math.sqrt(p * (1 - p)), label='theoretical');
b = max_log_probs
# b = tf.boolean_mask(b, b < 0.)
sns.distplot(tf.math.exp(b).numpy(), bins=20);
plt.xlabel('Posterior Mean Pred Prob');
plt.ylabel('Freq');
b = max_log_std_probs
tiny_ = np.finfo(b.dtype.as_numpy_dtype).tiny
b = tf.boolean_mask(b, b > tf.math.log(tiny_))
sns.distplot(tf.math.exp(b).numpy(), bins=20);
plt.xlabel('Posterior Std. Pred Prob');
plt.ylabel('Freq');
b = max_log_avg_std_ovr_prob
sns.distplot(tf.math.exp(b).numpy(), bins=20);
plt.xlabel('Posterior Avg Std. Pred Prob (OVR)');
plt.ylabel('Freq');
#@title Avg One-vs-Rest AUC
try:
bnn_auc = sklearn_metrics.roc_auc_score(
y_keep,
log_probs_keep,
average='macro',
multi_class='ovr')
print('Avg per class AUC:\n{}'.format(bnn_auc))
except TypeError:
bnn_auc = np.array([
sklearn_metrics.roc_auc_score(tf.equal(y_keep, i), log_probs_keep[:, i])
for i in range(num_classes)])
print('Avg per class AUC:\n{}'.format(bnn_auc.mean()))
```
### 7 Appendix: Compare against DNN
```
max_pool = tf.keras.layers.MaxPooling2D( # Has no tf.Variables.
pool_size=(2, 2),
strides=(2, 2),
padding='SAME',
data_format='channels_last')
maybe_batchnorm = batchnorm(axis=[-4, -3, -2])
# maybe_batchnorm = lambda x: x
dnn = tfn.Sequential([
lambda x: 2. * tf.cast(x, tf.float32) - 1., # Center.
tfn.Convolution(
input_size=1,
output_size=8,
filter_shape=5,
padding='SAME',
init_kernel_fn=tf.initializers.he_uniform(),
name='conv1'),
maybe_batchnorm,
tf.nn.leaky_relu,
tfn.Convolution(
input_size=8,
output_size=16,
filter_shape=5,
padding='SAME',
init_kernel_fn=tf.initializers.he_uniform(),
name='conv1'),
maybe_batchnorm,
tf.nn.leaky_relu,
max_pool, # [28, 28, 8] -> [14, 14, 8]
tfn.Convolution(
input_size=16,
output_size=32,
filter_shape=5,
padding='SAME',
init_kernel_fn=tf.initializers.he_uniform(),
name='conv2'),
maybe_batchnorm,
tf.nn.leaky_relu,
max_pool, # [14, 14, 16] -> [7, 7, 16]
tfn.util.flatten_rightmost(ndims=3),
tfn.Affine(
input_size=7 * 7 * 32,
output_size=num_classes - 1,
name='affine1'),
tfb.Pad(),
lambda x: tfd.Categorical(logits=x, dtype=tf.int32),
], name='DNN')
# dnn_eval = tfn.Sequential([l for l in dnn.layers if l is not maybe_batchnorm],
# name='dnn_eval')
dnn_eval = dnn
print(dnn.summary())
def compute_loss_dnn(x, y, is_eval=False):
d = dnn_eval(x) if is_eval else dnn(x)
nll = -tf.reduce_mean(d.log_prob(y), axis=-1)
return nll, d
train_iter_dnn = iter(train_dataset)
def train_loss_dnn():
x, y = next(train_iter_dnn)
nll, _ = compute_loss_dnn(x, y)
return nll, None
opt_dnn = tf.optimizers.Adam(learning_rate=0.003)
fit_dnn = tfn.util.make_fit_op(
train_loss_dnn,
opt_dnn,
dnn.trainable_variables,
grad_summary_fn=lambda gs: tf.nest.map_structure(tf.norm, gs))
eval_iter_dnn = iter(eval_dataset.batch(2000).repeat())
@tfn.util.tfcompile
def eval_dnn(threshold=None):
x, y = next(eval_iter_dnn)
loss, d = compute_loss_dnn(x, y, is_eval=True)
avg_acc, avg_calibration_error, _ = compute_eval_stats(
y, d, threshold=threshold)
return loss, (avg_acc, avg_calibration_error)
num_train_epochs = 2. # @param { isTemplate: true}
num_evals = 25 # @param { isTemplate: true
dur_sec = dur_num = 0
num_train_steps = int(num_train_epochs * train_size)
for i in range(num_train_steps):
start = time.time()
trn_loss, _, g = fit_dnn()
stop = time.time()
dur_sec += stop - start
dur_num += 1
if i % int(num_train_steps / num_evals) == 0 or i == num_train_steps - 1:
tst_loss, (tst_acc, tst_ece) = eval_dnn()
f, x = zip(*[
('it:{:5}', opt_dnn.iterations),
('ms/it:{:6.4f}', dur_sec / max(1., dur_num) * 1000.),
('tst_acc:{:6.4f}', tst_acc),
('tst_ece:{:6.4f}', tst_ece),
('trn_loss:{:6.4f}', trn_loss),
('tst_loss:{:6.4f}', tst_loss),
('tst_nll:{:6.4f}', tst_nll),
('tst_kl:{:6.4f}', tst_kl),
('sum_norm_grad:{:6.4f}', sum(g)),
])
print(' '.join(f).format(*[getattr(x_, 'numpy', lambda: x_)()
for x_ in x]))
sys.stdout.flush()
dur_sec = dur_num = 0
# if i % 1000 == 0 or i == maxiter - 1:
# dnn.save('/tmp/dnn.npz')
#@title Run Eval
eval_iter_dnn = iter(eval_dataset.batch(eval_size))
@tfn.util.tfcompile
def compute_log_probs_dnn():
x, y = next(eval_iter_dnn)
lp = tf.math.log_softmax(dnn_eval(x).logits, axis=-1)
return x, y, lp
x, y, log_probs = compute_log_probs_dnn()
max_log_probs = tf.reduce_max(log_probs, axis=-1)
idx = tf.argsort(max_log_probs)
x = tf.gather(x, idx)
y = tf.gather(y, idx)
log_probs = tf.gather(log_probs, idx)
max_log_probs = tf.gather(max_log_probs, idx)
yhat = tf.argmax(log_probs, axis=-1)
d = tfd.Categorical(logits=log_probs)
hit = tf.cast(tf.equal(y, tf.cast(yhat, y.dtype)), tf.int32)
#threshold = 1.-1e-5
#keep = tf.where(max_log_probs >= np.log(threshold))[..., 0]
keep = tf.range(500, eval_size)
x_keep = tf.gather(x, keep)
y_keep = tf.gather(y, keep)
yhat_keep = tf.gather(yhat, keep)
log_probs_keep = tf.gather(log_probs, keep)
max_log_probs_keep = tf.gather(max_log_probs, keep)
hit_keep = tf.gather(hit, keep)
d_keep = tfd.Categorical(logits=log_probs_keep)
(
avg_acc, ece,
(acc, conf, cnt, edges, bucket),
) = tfn.util.tfcompile(lambda: compute_eval_stats(y, d))()
(
avg_acc_keep, ece_keep,
(acc_keep, conf_keep, cnt_keep, edges_keep, bucket_keep),
) = tfn.util.tfcompile(lambda: compute_eval_stats(y_keep, d_keep))()
print('Number of examples undecided: {}'.format(eval_size - tf.size(keep)))
print('Accurary before excluding undecided ones: {}'.format(avg_acc))
print('Accurary after excluding undecided ones: {}'.format(avg_acc_keep))
print('ECE before/after.', ece.numpy(), ece_keep.numpy())
print('Most uncertain:')
ss = (6,12); n = np.prod(ss); s = ss+image_shape
tfn.util.display_imgs(
tf.reshape(x[:n], s),
yhuman[tf.reshape(y[:n], ss).numpy()])
print(tf.reshape(hit[:n], ss).numpy())
print(yhuman[tf.reshape(yhat[:n], ss).numpy()])
print('Least uncertain:')
tfn.util.display_imgs(
tf.reshape(x[-n:], s),
yhuman[tf.reshape(y[-n:], ss).numpy()])
print(tf.reshape(hit[-n:], ss).numpy())
print(yhuman[tf.reshape(yhat[-n:], ss).numpy()])
b = max_log_probs + tf.math.log1p(-max_log_probs); b=tf.boolean_mask(b,b<-1e-12)
sns.distplot(tf.math.exp(b).numpy(), bins=20);
#@title Avg One-vs-Rest AUC
try:
dnn_auc = sklearn_metrics.roc_auc_score(
y_keep,
log_probs_keep,
average='macro',
multi_class='ovr')
print('Avg per class AUC:\n{}'.format(dnn_auc))
except TypeError:
dnn_auc = np.array([
sklearn_metrics.roc_auc_score(tf.equal(y_keep, i), log_probs_keep[:, i])
for i in range(num_classes)])
print('Avg per class AUC:\n{}'.format(dnn_auc.mean()))
```
| github_jupyter |
# Structured Dataset Profiling with Lens
### Find the code
This notebook can be found on [github](https://github.com/credo-ai/credoai_lens/blob/develop/docs/notebooks/lens_demos/dataset_profiling.ipynb).
## Contents
1. [What is Covered](#What-is-Covered)
2. [Introduction](#Introduction)
3. [Dataset](#Dataset)
4. [Running Lens](#Running-Lens)
## What is Covered <a name="What-is-Covered"></a>
* **Domain:**
* Applications that rely on structured datasets.
* **ML task:**
* Exploratory data analysis for model training, validation, and testing with structured datasets.
## Introduction <a name="Introduction"></a>
Structured data conforms to a tabular format with relationship between the different rows and columns. Many machine learning models are trained, validated, and tested on structured datasets.
Exploratory analysis of a structured dataset provides insights for a more informed assessment of the ML model. Lens Dataset Profiling module uses pandas_profiling to enable this analysis through generating data profiles.
## Dataset <a name="Dataset"></a>
The [Census Adult Dataset](https://archive.ics.uci.edu/ml/datasets/adult) is from the Census Bureau and the label is whether a given adult makes more than $50K a year based attributes such as sex and education.
The dataset provides 13 input variables that are a mixture of categorical, ordinal, and numerical data types. The complete list of variables is as follows:
Age, Workclass, Education, Education Number of Years, Marital-status, Occupation, Relationship, Race, Sex, Capital-gain, Capital-loss, Hours-per-week, and Native-country.
```
import numpy as np
# Imports for demo data
from credoai.data import fetch_censusincome
# Base Lens imports
import credoai.lens as cl
import credoai.assessment as assess
cl.set_logging_level('info')
# set default format for image displays. Change to 'png' if 'svg' is failing
%config InlineBackend.figure_formats = ['svg']
data = fetch_censusincome()
df = data['data'].copy()
df['target'] = data['target']
df.head(3)
```
Prepare missing values
```
df = df.replace("\\?", np.nan, regex=True)
```
## Running Lens <a name="Running-Lens"></a>
First step is creating a Lens CredoData artifact. This will hold the structured dataset and the meta information needed for doing the assessment. CredoData has the following paramters:
`name` : an arbitrary name that you want to assign to the object (str)
`data` : dataset dataframe that includes all features and labels (pd.DataFrame)
`label_key` : name of the label column in your data, like "label" (`str`)
```
label_key = 'target'
categorical_features_keys = ['workclass', 'education', 'marital.status', 'occupation', 'relationship', 'race', 'sex', 'native.country']
# Set up the data artifact
credo_data = cl.CredoData(name='census-income',
data=df,
label_key=label_key)
lens = cl.Lens(data=credo_data, assessments=[assess.DatasetProfilingAssessment])
results = lens.run_assessments().display_results()
```
| github_jupyter |
```
import time
```
- There is a data dir which contains two txt files
- Use your GoogleFu to figure out how to use Python to open these files
- Save "books_published "books_published_last_two_years.txt" in the variable "recent_books"
- Save "all_coding_books.txt" in the variable "coding_books"
```
# Your code here
import os
notebook_path = os.path.abspath("CodeChallenge.ipynb")
recent_books_path = os.path.join(os.path.dirname(notebook_path), "data\\books_published_last_two_years.txt")
all_coding_books_path = os.path.join(os.path.dirname(notebook_path), "data\\all_coding_books.txt")
recent_books_path
all_coding_books_path
with open(recent_books_path, 'r') as f_open:
recent_books = f_open.read()
with open(all_coding_books_path, 'r') as f_open:
all_coding_books = f_open.read()
```
Print how many books each file has
```
with open(recent_books_path, 'r') as f_open:
lines = len(f_open.readlines())
print(lines)
with open(all_coding_books_path, 'r') as f_open:
lines = len(f_open.readlines())
print(f_open.readlines())
print(lines)
```
Problem: Using a loop, find which books "recent_books" and "coding_books" have in common (time how long it takes).
```
# Starter code
start = time.time() # This allows you to time the code
# Your code here
with open(recent_books_path, 'r') as f_open:
list1 = f_open.read().split("\n")
with open(all_coding_books_path, 'r') as f_open:
list2 = f_open.read().split("\n")
final_list = list(set(list1) & set(list2))
#print(final_list)
print(len(final_list))
print(f"Execution time: {time.time() - start}") # This prints how long it took to run your code
# print(final_list)
```
Problem: Can you think of a way to make this code run faster? Anything YOU (emphaiss on you!!) can think of is fair game (time your code).
```
start = time.time()
# Your code here
import threading
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, counter, booklist):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
self.booklist = booklist
def run(self):
with open(self.booklist, 'r') as f_open:
list1 = f_open.read().split("\n")
# Create new threads
thread1 = myThread(1, "Thread-1", 1, recent_books_path)
thread2 = myThread(2, "Thread-2", 2, all_coding_books_path)
# Start new Threads
thread1.start()
thread2.start()
final_list = list(set(list1) & set(list2))
print(len(final_list))
#print(final_list)
print(f"Execution time: {time.time() - start}")
```
| github_jupyter |
# Training a Linear Model to Predict Length of Stay
The [Population Health Management Solution](https://github.com/Azure/cortana-intelligence-population-health-management/tree/master/Azure%20Data%20Lake) uses U-SQL queries in Data Lake Analytics to apply trained models to input data. The solution copies pre-trained models to an Azure Data Lake Store account (under the folder `forphmdeploymentbyadf`) for this purpose. In this notebook, we provide the code demonstrating how xgboost models can be created for this purpose. (See the notebook named "Length Of Stay Models -- lm" for the code used to create the linear models employed by the solution.)
Running this notebook will create the models used in this solution and store them in a folder named `myxgboostLOSmodelsfolder` in your current working directory.
## Outline <a id="BackToTop"></a>
* [Problem Description](#probdesc)
* [Get Data](#getdata)
* [Data Description](#datadesc)
* [Data Exploration](#dataexp)
* [Feature Engineering](#featureeng)
* [Create the Model](#model)
* [Results](#results)
## Problem Description <a id="probdesc"></a>
### Why predict the length of a hospital stay?
Recent legislative changes have standardized payments for procedures performed, regardless of the number of days a patient actually spends in the hospital. Hospitals are therefore strongly incentivized to use resources more efficiently and find ways to accommodate more patients with the same volume of resources. An accurate prediction of each patient's length of stay can help hospitals:
1. Manage bed occupancy
2. Effectively schedule elective admissions
3. Improve patient satisfaction during their hospital stay
Extended lengths of stay costs hospitals millions of dollars a year. By identifying patients at risk for an extended stay, they can take proactive measures to formulate a treatment plan to reduce the expected length of stay.
### When should the prediction be used?
Hospitals want to predict the length of each patient's stay at the time of admission and provide this information to the admitting nurse or staff. Our model is trained using encounter-level records for a million or so patients from 23 hospitals (obtained from the Healthcare Cost and Utilization Project, or [HCUP](https://www.hcup-us.ahrq.gov/)) and is suitable for use on similar patient populations, though we recommend that hospitals retrain the model using their own historical patient data for best results. To be applied to newly-admitted patients, the model must be trained using only features that are available for each patient at the time of their admission.
[Back To Top](#BackToTop)
<a id="getdata"></a>
## Get Data
Here, we download copies of the training data (~2 GB) from the web to your current working directory. This will likely take a few minutes, depending on your bandwidth:
```
# clear workspace and collect garbage
rm(list=ls())
gc()
Sys.time()
url1 <- "https://phm.blob.core.windows.net/models/core_data.csv"
url2 <- "https://phm.blob.core.windows.net/models/charges_data.csv"
url3 <- "https://phm.blob.core.windows.net/models/severity_data.csv"
url4 <- "https://phm.blob.core.windows.net/models/dxpr_data.csv"
pathd <- getwd()
dest1 <- paste(pathd, "/core_data.csv", sep='')
dest2 <- paste(pathd, "/charges_data.csv", sep='')
dest3 <- paste(pathd, "/severity_data.csv", sep='')
dest4 <- paste(pathd, "/dxpr_data.csv", sep='')
download.file(url1, dest1)
download.file(url2, dest2)
download.file(url3, dest3)
download.file(url4, dest4)
Sys.time()
```
We now read the input files into memory (this will likely take a few minutes, given the combined size of the files):
```
Sys.time()
dat_core <- read.csv(dest1)
dat_chrg <- read.csv(dest2)
dat_sevr <- read.csv(dest3)
dat_dxpr <- read.csv(dest4)
Sys.time()
```
Confirm that the input files were downloaded and read fully by checking the data dimensions:
```
dim(dat_core) # expected: 1103172 x 248
dim(dat_chrg) # expected: 1103172 x 155
dim(dat_sevr) # expected: 1103172 x 30
dim(dat_dxpr) # expected: 1103172 x 180
```
Merge the input files and delete the intermediate variables:
```
dat <- merge(merge(merge(dat_sevr, dat_chrg, by="KEY"), dat_core, by="KEY"), dat_dxpr, by="KEY")
dim(dat) # expected: 1103172 x 610
rm(dat_core, dat_chrg, dat_sevr, dat_dxpr)
```
[Back To Top](#BackToTop)
<a id="datadesc"></a>
## Data Description
We list the columns contained in the dataset below. A full description of the columns in this dataset can be found in the [data dictionary](https://www.hcup-us.ahrq.gov/db/state/siddist/siddistvarnote2013.jsp).
```
names(dat)
```
We perform a few sanity checks on the integrity of the downloaded data, ensuring that the month column contains the values for months and so on:
```
unique(dat$AYEAR)
unique(dat$AMONTH)
length(unique(dat$AMONTH))
unique(dat$FEMALE)
```
The dataset currently contains 610 columns, but some are not suitable for use with our model because they are not available at the time of a patient's admission (when the length-of-stay prediction will be made). We will use just the following 135 columns for building the length-of-stay model:
```
cols_demographic <- c('AGE', 'FEMALE', 'RACE', 'MEDINCSTQ', 'PSTATE', 'ZIP', 'HOSPST', 'PAY1', 'PAY2', 'PAY3')
cols_admitinfo <- c('KEY', 'VisitLink', 'DSHOSPID', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN')
cols_Diagnosis_present_on_admission <- grep('DXPOA', names(dat), value=T)
cols_ECode_present_on_admission <- grep('E_POA', names(dat), value=T)
cols_ICD9_CM_Chronic_Condition_Indicators <- grep('^CHRON[0-9]', names(dat), value=T)
cols_Chronic_Condition_Indicators_BodySystem <- grep('^CHRONB', names(dat), value=T)
cols_comorbidity_measure_ICD9_CMcodes <- grep('^CM_', names(dat), value=T)
cols_primaryDiagnosis <- c('DX1', 'DXCCS1', 'DXMCCS1')
cols_counts <- c('NDX', 'NCHRONIC')
cols_Target <- c('LOS')
cols4los <- c(cols_demographic, cols_admitinfo, cols_Diagnosis_present_on_admission, cols_ECode_present_on_admission,
cols_ICD9_CM_Chronic_Condition_Indicators, cols_Chronic_Condition_Indicators_BodySystem,
cols_comorbidity_measure_ICD9_CMcodes, cols_primaryDiagnosis, cols_counts, cols_Target)
length(cols4los)
```
We now reduce the dataset to just the columns of interest:
```
dat4los <- dat[,cols4los]
dim(dat4los)
```
[Back To Top](#BackToTop)
## Data Exploration <a id="dataexp"></a>
### Categorical features
The dataset contains 13 categorical features.
'DSHOSPID', 'FEMALE', 'RACE', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN', 'MEDINCSTQ', 'PSTATE', 'PAY1', 'DXCCS1', 'DXMCCS1', 'ZIP'
Below, we enumerate some of these features and show their possible values:
```
cat_cols <- c('DSHOSPID', 'FEMALE', 'RACE', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN',
'MEDINCSTQ', 'PSTATE', 'PAY1', 'DXMCCS1')
apply(dat4los[, cat_cols], 2, FUN=function(x){length(unique(x))})
apply(dat4los[, cat_cols], 2, FUN=function(x){unique(x)})
```
### Distribution of Length Of Stay
Below, we plot the distribution of values for our model's prediction target, `LOS` (length of stay):
```
losbreaks <- c(0, 2, 4, 6, 10, 20, 365)
loslabels <- c('vshort', 'short', 'medium', 'long', 'vlong', 'extreme')
losdist <- data.frame(table(cut(as.numeric(dat4los$LOS), breaks=losbreaks, labels=loslabels)))
options(repr.plot.width=7, repr.plot.height=6)
bp <- barplot(losdist$Freq, names.arg=losdist$Var1, main="Length of Stay")
bp
legend("topright",
fill=c("grey"),
c('vshort (0-2d)', 'short (2-4d)', 'medium (4-6d)', 'long (6-10d)', 'vlong (10-20d)', 'extreme (20-365d)'))
```
[Back To Top](#BackToTop)
<a id="featureeng"></a>
## Feature Engineering
Create some additional features from raw data.
### Count diagnoses present on admission
Find the columns named DXPOA1 - DXPOA25, convert them from string to integer values, perform the count, then delete the original DXPOA columns:
```
indDXPOA <- grep('DXPOA', names(dat4los))
dat4los[, indDXPOA] <- apply(dat4los[, indDXPOA], 2, FUN=function(x){ifelse(x=='Y', 1, x)})
dat4los[, indDXPOA] <- apply(dat4los[, indDXPOA], 2, FUN=function(x){ifelse(x==1, x, 0)})
dat4los$num_DXPOA <- apply(dat4los[, indDXPOA], 1, FUN=function(x){length(x[x=='1'])})
dat4los <- dat4los[, -indDXPOA]
```
### Count external causes of injury present on admission
Delete the original binary indicators.
```
indE_POA <- grep('E_POA', names(dat4los))
dat4los[, indE_POA] <- apply(dat4los[, indE_POA], 2, FUN=function(x){ifelse(x=='Y', 1, x)})
dat4los[, indE_POA] <- apply(dat4los[, indE_POA], 2, FUN=function(x){ifelse(x==1, x, 0)})
dat4los$num_E_POA <- apply(dat4los[, indE_POA], 1, FUN=function(x){length(x[x=='1'])})
dat4los <- dat4los[, -indE_POA]
```
### Count the number of body systems affected by chronic conditions
Delete the original binary indicators (both at the body system and individual condition levels).
```
indchronB <- grep('^CHRONB[0-9]', names(dat4los))
dat4los$num_uCHRONB <- apply(dat4los[indchronB], 1, FUN=function(x){length(unique(x[!is.na(x)]))})
dat4los <- dat4los[, -indchronB]
# Won't use these for anything either
indCHRON <- grep('^CHRON[0-9]', names(dat4los))
dat4los <- dat4los[, -indCHRON]
```
### Count number of payers (medicare, medicaid, private insurance, ...)
Delete the mostly-missing columns `PAY2` and `PAY3` when done.
```
indPAY <- grep('PAY', names(dat4los), value=T)
dat4los$num_PAY <- apply(dat4los[, grep('PAY', names(dat4los), value=T)],
1,
FUN=function(x){length(x[!is.na(x)])})
dat4los$PAY2 <- NULL
dat4los$PAY3 <- NULL
```
### Count pre-existing (comorbid) conditions
Delete the original binary indicators when done.
```
indCM <- grep('CM_', names(dat4los))
dat4los$num_CM <- apply(dat4los[, indCM], 1, FUN=function(x){(length(x[x==1]))})
dat4los <- dat4los[, -indCM]
```
### Remove rows with invalid point of origin values
```
indgood <- grep('[0-9A-Za-z]', dat4los$PointOfOriginUB04)
dat4los <- dat4los[indgood, ]
```
### Remove `HOSPT` and `DX1` columns
We will apply the model to simulated data where there is only one state, so we will derive no benefit from including the hospital state (`HOSPST`) column. We will also remove the `DX1` column (we'll use the more specific `DXCCS1` feature instead).
```
dat4los <- dat4los[, !names(dat4los) %in% c('HOSPST')]
dat4los <- dat4los[, !names(dat4los) %in% c('DX1')]
```
### Keep just the first three digits of the zip code
Allows us to group hospitals by larger geographical regions.
```
dat4los$ZIP3 <- substr(dat4los$ZIP, 1, 3)
dat4los$ZIP <- NULL
```
After this, we are left with only 24 columns for modeling:
```
dim(dat4los)
```
## Cast features and save the cleaned data
Now we ensure that the data types for these columns are properly defined as categorical or numeric:
```
# make these columns categorical
cat_cols <- c('DSHOSPID', 'FEMALE', 'RACE', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN',
'MEDINCSTQ', 'PSTATE', 'PAY1', 'DXCCS1', 'DXMCCS1', 'ZIP3')
makecatg <- sapply(dat4los[, cat_cols], FUN=function(x){as.factor(x)})
makecatg <- as.data.frame(makecatg)
dat4los[, cat_cols] <- makecatg
# make these columns numeric
cat_num <- c('AGE', 'LOS', 'NDX', 'NCHRONIC', 'num_DXPOA', 'num_E_POA', 'num_uCHRONB', 'num_PAY', 'num_CM')
makenum <- sapply(dat4los[, cat_num], FUN=function(x){as.numeric(x)})
makenum <- as.data.frame(makenum)
dat4los[, cat_num] <- makenum
data_mod <- dat4los
str(data_mod)
```
As a sanity check, we enumerate the possible values of the categorical features specifically:
```
levelinfo <- sapply(dat4los[, cat_cols], FUN=function(x){unique(x)})
str(levelinfo)
levelinfo[['FEMALE']]
levelinfo[['RACE']]
save(levelinfo, file='listoflevels.Rdata')
```
### Define function to evaluate model performance
Computes and returns the following common regression evaluation metrics:
- Mean Absolute Error (MAE)
- Root Mean Squared Error (RMSE)
- Coefficient of determination (Rsquare), as a percentage
- Relative Absolute Error (RAE)
- Relative Squared Error (RSE)
```
regression_res <- function(Target, Prediction){
res <- data.frame(Target=Target, Scored.Labels=Prediction)
res$delta <- abs(res$Target - res$Scored.Labels)
res$percdelta <- res$delta / res$Target
n <- nrow(res)
MAE <- sum(res$delta) / n
RMSE <- sqrt(sum(res$delta ^ 2) / n)
Rsquare <- (cor(res$Target, res$Scored.Labels) ^ 2) * 100
RAE <- sum(res$delta) / sum(abs(mean(res$Target) - res$Target))
RSE <- sum(res$delta ^ 2) / sum(abs(mean(res$Target) - res$Target) ^ 2)
results <- data.frame(MAE=MAE, RMSE=RMSE, RAE=RAE, RSE=RSE, Rsq=Rsquare)
return(results)
}
#create a data frame for storing training and testing metrics
res_train <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0)
res_train <- res_train[-1,]
res_test <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0)
res_test <- res_test[-1,]
```
[Back To Top](#BackToTop)
## Create the Models <a id="model"></a>
We will create 10 models for 10 individual hospitals and an additional model for all the other hospitals.
```
selected_hosp <- c('hosp_1', 'hosp_2', 'hosp_3', 'hosp_4', 'hosp_5', 'hosp_6', 'hosp_7',
'hosp_8', 'hosp_9', 'hosp_10')
allotherhosp <- unique(data_mod$DSHOSPID)[!unique(data_mod$DSHOSPID) %in% selected_hosp]
allotherhosp <- as.character(allotherhosp)
allotherhosp
```
For each model, we will split the available data into training and test sets with the following approach:
- Attempt to evenly split the data in each level of `DXCCS1` (the categorical feature with the most levels) to create an initial train/test partitioning.
- Because some categorical levels are rare, we may find that the training dataset does not contain some levels that are present in the test set. Remedy this by moving any such rows from the test set to the training set.
- Ensure that no patients are present in both the training and test sets (by transferring half of any offending patients to the training set, and the other half to the test set)
- Drop columns that we do not want to use for testing or training:
- `myrownum`, `KEY`, and `VisitLink`, because they are uninformative and could result in overfitting
- `DSHOSPID`, because most of our models will be trained on data from a single hospital
```
# Create a folder in current working directory to save the trained models.
wrdir <- getwd()
modeldir <- 'xgboostLOSmodelsfolder'
dir.create(file.path(wrdir, modeldir), showWarnings=FALSE)
modelsLocation <- paste(wrdir, modeldir, sep='/')
modelsLocation <- paste0(modelsLocation, '/')
# create a data frame for storing training and testing metrics
res_train <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0)
res_train <- res_train[-1,]
res_test <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0)
res_test <- res_test[-1,]
shaheen_lib_path = 'C:/dsvm/notebooks/HealthcareSolution/shaheen_lib'
library("caret", lib.loc=shaheen_lib_path)
library("xgboost", lib.loc=shaheen_lib_path)
if(!require(xgboost)) {
install.packages("xgboost")
library(xgboost)
}
if(!require(caret)) {
install.packages("caret")
library(caret)
}
```
Train all of the models and store data on their performance:
```
for (h in 1:(length(selected_hosp)+1)){
# subset data for the hospital to build a model for
cat('h=',h,'\n')
if(h==(length(selected_hosp)+1)){
cat('allotherhosp',allotherhosp,'\n')
sub_data_mod <- subset(data_mod,data_mod$DSHOSPID %in% allotherhosp)
cat(unique(as.character(sub_data_mod$DSHOSPID)),'\n')
model_name <- paste('allotherhosp','_LOSmodel',sep='')
model_name <- paste0(modelsLocation, model_name)
cat('model_name =',model_name,'\n')
} else {
cat('selected_hosp[h]',selected_hosp[h],'\n')
sub_data_mod <- subset(data_mod,data_mod$DSHOSPID %in% selected_hosp[h])
cat(unique(as.character(sub_data_mod$DSHOSPID)),'\n')
model_name <- paste(unique(as.character(sub_data_mod$DSHOSPID)),'_LOSmodel',sep='')
model_name <- paste0(modelsLocation, model_name)
cat('model_name =',model_name,'\n')
}
dim(sub_data_mod)
# sub_data_mod contains data for a hospital, build LOS model for this hospital and save the model with name model_name
sub_data_mod <- sub_data_mod[complete.cases(sub_data_mod),] # ensure all rows are complete
# convert categorical variable into numeric vector using One Hot Encoding
# --- one-hot-encoding categorical features
ohe_feats <- cat_cols
oheformula <- as.formula(paste('~',paste(cat_cols,collapse=' + '),sep=' '))
dummies <- dummyVars(oheformula, data = sub_data_mod)
df_all_ohe <- as.data.frame(predict(dummies, newdata = sub_data_mod)) # takes time
df_all_combined <- cbind(sub_data_mod[,-c(which(colnames(sub_data_mod) %in% ohe_feats))],df_all_ohe)
# split df_all_combined into train and test
# 60% of the sample size for training
smp_size <- floor(0.6 * nrow(df_all_combined))
set.seed(18) ## set the seed
split.index <- sample(seq_len(nrow(df_all_combined)), size = smp_size,replace=F)
train_xgb <- df_all_combined[split.index, ]
test_xgb <- df_all_combined[-split.index, ]
#now have the data split into training and testing ~60% training and 40% testing
#==================================================================
#ensuring unique patients in train and test
#==================================================================
# check how many patients overlap
# table(unique(test_xgb$VisitLink) %in% unique(train_xgb$VisitLink))
# table(unique(train_xgb$VisitLink) %in% unique(test_xgb$VisitLink))
# patient ids that occur in both train and test
vk <- unique(test_xgb$VisitLink)[unique(test_xgb$VisitLink) %in% unique(train_xgb$VisitLink)]
vk1 <- vk[1:round(length(vk)/2)]
vk2 <- vk[(round(length(vk)/2)+1) : length(vk)]
torm4mtest_xgb <- which(test_xgb$VisitLink %in% vk1) # patient ids to remove from test
train_xgb <- rbind(train_xgb,test_xgb[torm4mtest_xgb,]) # append rows with these patients to train
test_xgb <- test_xgb[-torm4mtest_xgb,] # remove these patient rows from test
torm4mtrain_xgb <- which(train_xgb$VisitLink %in% vk2) # patient ids to remove from train
test_xgb <- rbind(test_xgb,train_xgb[torm4mtrain_xgb,]) # append rows with these patients to test
train_xgb <- train_xgb[-torm4mtrain_xgb,] # remove these patient rows from train
# confirm unique patients in training and test data
# table(unique(test_xgb$VisitLink) %in% unique(train_xgb$VisitLink))
# table(unique(train_xgb$VisitLink) %in% unique(test_xgb$VisitLink))
#-----------------------------------------
# remove unnecessary cols
train_xgb$myrownum <- NULL
test_xgb$myrownum <- NULL
train_xgb$KEY <- NULL
test_xgb$KEY <- NULL
train_xgb$VisitLink <- NULL
test_xgb$VisitLink <- NULL
train_xgb$DSHOSPID <- NULL
test_xgb$DSHOSPID <- NULL
# now have the data split into training and testing ~60% training and ~40% testing
#---------------------------------------------
labels_xgb <- train_xgb$LOS
#remove LOS column from training data
grep('LOS',names(train_xgb))
train_xgb_2 <- train_xgb[-grep('LOS',names(train_xgb))]
#----------------------------------------------------------------
#Tune and Run the model
rm(xgb_LOSmod)
set.seed(18)
xgb_LOSmod <- xgboost(data = data.matrix(train_xgb_2), label = labels_xgb,
booster = "gbtree",
eta = 0.1,
gamma = 0,
max_depth = 3,
nround=25,
subsample = 0.5,
colsample_bytree = 0.5,
eval_metric = "rmse",
objective = "reg:linear"
)
# save model to binary local file
xgb.save(xgb_LOSmod, model_name) # should return TRUE
# Object "xgb_LOSmod" is an xgboost model.
# To load binary model to R we would - xgb.load(model_name)
# check how the model does on test data - save in res_test
y_pred <- predict(xgb_LOSmod, data.matrix(test_xgb))
Target <- test_xgb$LOS
res_test_xgboost <- regression_res(Target,y_pred)
tst <- data.frame(model_name=as.character(as.data.frame(strsplit(model_name,split = '/xgboostLOSmodelsfolder/'))[2,1]))
tst <- cbind(tst,res_test_xgboost)
res_test <- rbind(res_test, tst)
# model results - training data - save in res_train
y_predTr <- predict(xgb_LOSmod, data.matrix(train_xgb))
TargetTr <- train_xgb$LOS
res_train_xgboost <- regression_res(TargetTr,y_predTr)
trn <- data.frame(model_name=as.character(as.data.frame(strsplit(model_name,split = '/xgboostLOSmodelsfolder/'))[2,1]))
trn <- cbind(trn,res_train_xgboost)
res_train <- rbind(res_train, trn)
}
```
[Back To Top](#BackToTop)
## Model Performance <a id="results"></a>
```
cat('Performance on training data\n')
res_train
cat('Performance on test data\n')
res_test
```
| github_jupyter |
<a href="https://colab.research.google.com/github/EmilSkaaning/DeepStruc/blob/main/DeepStruc.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# DeepStruc
**Github:** https://github.com/EmilSkaaning/DeepStruc
**Paper:** DeepStruc: Towards structure solution from pair distribution function data using deep generative models
**Questions:** andy@chem.ku.dk or etsk@chem.ku.dk
Welcome to DeepStruc that is a Deep Generative Model (DGM) which learns the relation between PDF and atomic structure and thereby solve a structure based on a PDF!
This script guides you through a simple example of how to use DeepStruc to predict a structure on a given PDF.
Aftwerwards, you can upload a PDF and use DeepStruc to predict the structure.
# First install requirements for DeepStruc (this step takes 5 - 10 minutes)
```
%%capture
!git clone https://github.com/EmilSkaaning/DeepStruc
!pip3 install torch==1.10.1+cpu torchvision==0.11.2+cpu torchaudio==0.10.1+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html
!pip install pytorch_lightning torch-geometric==1.7.2 torch-scatter
!pip3 install torch-sparse -f https://data.pyg.org/whl/torch-1.10.1+cpu.html
!pip install matplotlib==3.4.3 ase nglview ipywidgets
from google.colab import output, files
from ase.io import read
from ase.visualize import view
from IPython.display import Image
import shutil
import os
os.chdir("DeepStruc")
```
# Example of how to use DeepStruc on a simulated dataset
We here provide an example of how to use DeepStruc on simulated data. The script can both take a single PDF or a directory of PDFs as input.
Be aware that the PDF(s) will be made to have an r-range between 2 - 30 Å in steps of 0.01 Å (2800 points PDF). Any data outside this range will not be used. Check the dataformat of our datasets (often made with PDFGui) if in doubt.
```
PDFFile = "/data/PDFs_simulated/FCC_h_3_k_6_l_7.gr" # Path to PDF(s).
Nstructures = 10 # Number of samples/structures generated for each unique PDF
structure = 0 # Which of the Nstructures to visualize. (Goes from 0 to Nstructures - 1)
sigma = 3 # Sample to '-s' sigma in the normal distribution
plot = True # Plots sampled structures on top of DeepStruc training data.
```
**Outcomment the following line to use DeepStruc on experimental PDF(s) from your local computer.** <br>
Some browsers do not support this upload option. Use Google Chrome or simply upload the file manually in the left menu in the DeepStruc-main' folder.
```
#PDFFile = list(files.upload())[0] # Upload PDF(s) from local computer
```
## Predict with DeepStruc
```
# Use DeepStruc on the uploaded PDF(s)
!python predict.py -d $PDFFile -n $Nstructures -s $sigma -p $plot -i $structure
# Get the latest results
all_subdirs = [d for d in os.listdir('.') if os.path.isdir(d)]
latest_subdir = max(all_subdirs, key=os.path.getmtime)
# Plot the latent space
Image(latest_subdir + '/PDFs.png', width = 480, height = 360)
```
**The raw input PDF and the normalised PDF.** The raw input PDF is normalised to have the highest peak at G(r) = 1 and to be in between r = 2 Å and 30 Å.
## Visualization of the two-dimensional latent space (compressed feature space of the structures)
```
# Plot the latent space
Image(latest_subdir + '/ls.png', width = 900, height = 360)
```
**The two-dimensional latent space with location of the input.** The size of the points relates to the size of the embedded structure. Each point is coloured after its structure type, FCC (light blue), octahedral (dark grey), decahedral (orange), BCC (green), icosahedral (dark blue), HCP (pink) and SC (red). Each point in the latent space corresponds to a structure based on its simulated PDF. Test data point are plotted on top of the training and validation data, which is made semi-transparent. The latent space locations of the reconstructed structures from the input are shown with black markers and the specific reconstructed structure that is shown in the next box is shown with a black and white marker.
## Visualization of a reconstructed structure
```
# Get folder of structures
subfolder = [f.path for f in os.scandir(latest_subdir) if f.is_dir()]
# Define which structure to plot and plot it
output.enable_custom_widget_manager()
view(read(subfolder[0] + "/" + os.listdir(subfolder[0])[structure]) , viewer='ngl')
```
**The reconstructed structure from the input.** The reconstructed structure is indicated at the latent space above using a black and white marker.
**Be aware** that DeepStruc are only created to predict mono-metallic nanoparticles (MMNP) of up to 200 atoms. If the PDF file is not a MMNP, it is highly likely that DeepStruc will not output an meaningful structure.
## Download the latest results
```
# Download the latest results
shutil.make_archive(latest_subdir, 'zip', latest_subdir)
files.download(latest_subdir + ".zip")
```
# Cite
If you use DeepStruc, please consider citing our paper. Thanks in advance!
```
@article{kjær2022DeepStruc,
title={DeepStruc: Towards structure solution from pair distribution function data using deep generative models},
author={Emil T. S. Kjær, Andy S. Anker, Marcus N. Weng, Simon J. L. Billinge, Raghavendra Selvan, Kirsten M. Ø. Jensen},
year={2022}}
```
# LICENSE
This project is licensed under the Apache License Version 2.0, January 2004 - see the LICENSE file at https://github.com/EmilSkaaning/DeepStruc/blob/main/LICENSE.md for details.
| github_jupyter |
# chapter 1 introducing pandas objects
有三种基本的数据结构:Series,DataFrame,Index
## Series
```
import pandas as pd
import numpy as np
data = pd.Series([0.25,0.5,0.75,2.0])
#we can access with the values and index attributes
#numpy array
data.values
#pd.Index
data.index
#data[1]
#data[1:3]
#比numpy array更灵活,它的index可以是string
data = pd.Series([0.25,0.5,0.75,1.0],index=['a','b','c','d'])
#索引可以是不连续的
data = pd.Series([0.25,0.5,0.75,1.0],index=[2,3,5,7])
#Series as specialized dictionary
#也可以把它当成字典,不过更灵活有效
population_dict = {'California': 38332521,
'Texas': 26448193,
'New York': 19651127,
'Florida': 19552860,
'Illinois': 12882135}
population = pd.Series(population_dict)
#Unlike a dictionary, though, the Series also supports array-style operations such as slicing
population['California':'Illinois']
#根据字典的key排序
population
```
## DataFrame
```
#DataFrame as a generalized NumPy array
area_dict = {'California': 423967, 'Texas': 695662, 'New York': 141297,
'Florida': 170312, 'Illinois': 149995}
area = pd.Series(area_dict)
states = pd.DataFrame({'population':population,'area':area})
#DataFrame has an index(common with Series) and columns(values in Series) attributes
states.index,states.columns
#DataFrame as specialized dictionary
#construct dataframe
#1. From a single Series object
pd.DataFrame(population,columns=['population'])
#2. From a list of dicts
data = [{'a':i,'b':i*2} for i in range(3)]#字典推导
pd.DataFrame(data)
#不需要键值一样
pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}])
#3. From a dictionary of Series objects
#just like before
#4. From a two-dimensional NumPy array
pd.DataFrame(np.random.rand(3, 2),
columns=['foo', 'bar'],
index=['a', 'b', 'c'])
```
## Index
```
ind = pd.Index([2,3,5,6,7])
ind
#Index as immutable array
ind[::2]
#ind[1]=0 #will be wrong,cause it's immutable
#Index as ordered set,pandas会涉及很多数据的交集操作,这会依赖很多集合(set)算法,Index也支持set操作
indA = pd.Index([1, 3, 5, 7, 9])
indB = pd.Index([1, 2, 5, 7, 9])
indA & indB,indA | indB,indA ^ indB
```
# Data Indexing and Selection
```
#indexing(arr[2,1]),slicing(arr[:,1:5]),masking(arr[arr>0]),fancy indexing(arr[0,[1,5]]),conbinitions thereof(arr[:,[1,5])
#关于Series 记住它表现的像numpy array 和 dictionary,对这些的操作能运用到Series
#dictionary like
data = pd.Series([0.25, 0.5, 0.75, 1.0],index=['a', 'b', 'c', 'd'])
data['b'],'a' in data,data.keys(),list(data.items())
data['e'] = 1.25
#array like
#slicing by implicit index
```
| github_jupyter |
# Project: Identify Customer Segments
In this project, I will apply unsupervised learning techniques to identify segments of the population that form the core customer base for a mail-order sales company in Germany. These segments can then be used to direct marketing campaigns towards audiences that will have the highest expected rate of returns. The data that I will use has been provided by our partners at Bertelsmann Arvato Analytics, and represents a real-life data science task. In real-life tasks, there may be many valid ways to approach an analysis task. One of the most important things you can do is clearly document your approach so that other scientists can understand the decisions you've made.
```
# importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# magic word for producing visualizations in notebook
%matplotlib inline
```
### Step 0: Load the Data
There are four files associated with this project (not including this one):
- `Udacity_AZDIAS_Subset.csv`: Demographics data for the general population of Germany; 891211 persons (rows) x 85 features (columns).
- `Udacity_CUSTOMERS_Subset.csv`: Demographics data for customers of a mail-order company; 191652 persons (rows) x 85 features (columns).
- `Data_Dictionary.md`: Detailed information file about the features in the provided datasets.
- `AZDIAS_Feature_Summary.csv`: Summary of feature attributes for demographics data; 85 features (rows) x 4 columns
Each row of the demographics files represents a single person, but also includes information outside of individuals, including information about their household, building, and neighborhood. I will use this information to cluster the general population into groups with similar demographic properties. Then, I will see how the people in the customers dataset fit into those created clusters. The hope here is that certain clusters are over-represented in the customers data, as compared to the general population; those over-represented clusters will be assumed to be part of the core userbase. This information can then be used for further applications, such as targeting for a marketing campaign.
To start off with, I will load in the demographics data for the general population into a pandas DataFrame, and do the same for the feature attributes summary. Note for all of the `.csv` data files in this project: they're semicolon (`;`) delimited, so I'll need an additional argument in your [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) call to read in the data properly. Also, considering the size of the main dataset, it may take some time for it to load completely.
Once the dataset is loaded, it's recommended that you take a little bit of time just browsing the general structure of the dataset and feature summary file. I'll be getting deep into the innards of the cleaning in the first major step of the project, so gaining some general familiarity can help you get your bearings.
```
# Load in the general demographics data.
azdias = pd.read_csv('Udacity_AZDIAS_Subset.csv', sep = ';')
# Load in the feature summary file.
feat_info = pd.read_csv('AZDIAS_Feature_Summary.csv', sep = ';')
azdias.shape
azdias.head()
azdias.describe()
feat_info.shape
feat_info.head()
feat_info.describe()
```
## Step 1: Preprocessing
### Step 1.1: Assess Missing Data
The feature summary file contains a summary of properties for each demographics data column. I will use this file to help you make cleaning decisions during this stage of the project. First of all, I should assess the demographics data in terms of missing data.
#### Step 1.1.1: Convert Missing Value Codes to NaNs
The fourth column of the feature attributes summary (loaded in above as `feat_info`) documents the codes from the data dictionary that indicate missing or unknown data. While the file encodes this as a list (e.g. `[-1,0]`), this will get read in as a string object. I'll need to do a little bit of parsing to make use of it to identify and clean the data. I will convert data that matches a 'missing' or 'unknown' value code into a numpy NaN value.
```
azdias.info()
# Identifying missing or unknown data values and converting them to NaNs.
naturally_missing = azdias.isnull().sum().sum()
naturally_missing
feat_info['missing_or_unknown'].unique()
for i in range(azdias.shape[1]):
print(azdias[azdias.columns[i]].dtype)
%%time
for i in range(feat_info.shape[0]):
cols=azdias.columns[i]
var = feat_info['missing_or_unknown'][i]
var_list = var.replace('[','').replace(']','').split(',')
if var_list != ['']:
for j in var_list:
if azdias[cols].dtype =='object':
azdias.loc[azdias[cols]==j,cols]=np.nan
else:
j=int(j)
azdias.loc[azdias[cols]==j,cols]=np.nan
coded_missing = azdias.isnull().sum().sum()
coded_missing
coded_missing - naturally_missing
```
#### Step 1.1.2: Assess Missing Data in Each Column
How much missing data is present in each column? There are a few columns that are outliers in terms of the proportion of values that are missing. I will want to use matplotlib's [`hist()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html) function to visualize the distribution of missing value counts to find these columns. I will identify and document these columns. While some of these columns might have justifications for keeping or re-encoding the data, for this project I should just remove them from the dataframe.
For the remaining features, are there any patterns in which columns have, or share, missing data?
```
# Performing an assessment of how much missing data there is in each column of the
# dataset.
missing_data_columns = (azdias.isnull().sum()/azdias.shape[0])*100
missing_data_columns
# Investigating patterns in the amount of missing data in each column.
plt.hist(missing_data_columns, bins = 101);
plt.axvline(x=20, color='red');
# Removing the outlier columns from the dataset.
outliers_columns = []
for i in range(azdias.shape[1]):
if missing_data_columns[i] > 20:
outliers_columns.append(azdias.columns[i])
print(outliers_columns)
azdias = azdias.drop(outliers_columns, axis=1)
azdias.shape[1]
```
#### Discussion 1.1.2: Assess Missing Data in Each Column
I removed all columns to the right of the red vertical line in the histogram of the ratio of missing data per column. The columns removed were missing more than 20% of data.
#### Step 1.1.3: Assess Missing Data in Each Row
Now, I'll perform a similar assessment for the rows of the dataset. How much data is missing in each row? I will dividing the data into two subsets: one for data points that are above some threshold for missing values, and a second subset for points below that threshold.
In order to know what to do with the outlier rows, we should see if the distribution of data values on columns that are not missing data (or are missing very little data) are similar or different between the two groups. I will select at least five of these columns and compare the distribution of values.
If the distributions of non-missing features look similar between the data with many missing values and the data with few or no missing values, then we could argue that simply dropping those points from the analysis won't present a major issue. On the other hand, if the data with many missing values looks very different from the data with few or no missing values, then we should make a note on those data as special. We'll revisit these data later on.
```
# How much data is missing in each row of the dataset?
missing_data_rows = azdias.isnull().sum(axis=1)
plt.hist(missing_data_rows, bins=50);
plt.axvline(x = 10, color = 'red');
missing_data_rows[:5]
len(missing_data_rows) == azdias.shape[0]
azdias['missing_data_rows'] = missing_data_rows
azdias.head()
# Writing code to divide the data into two subsets based on the number of missing
# values in each row.
azdias_1 = azdias[azdias['missing_data_rows'] < 10]
azdias_2 = azdias[azdias['missing_data_rows'] >= 10]
print(len(azdias_1))
print(len(azdias_2))
print(len(azdias_1)-len(azdias_2))
# Comparing the distribution of values for at least five columns where there are
# no or few missing values, between the two subsets.
def compare(num_col=5):
col_name = azdias_1.columns[:num_col]
for name in col_name:
figure, axes = plt.subplots(nrows=1, ncols=2)
azdias_1[name].hist(ax=axes[0])
axes[0].set_title(name+' azdias_1')
axes[0].set_xlabel('Value')
axes[0].set_ylabel('Count')
azdias_2[name].hist(ax=axes[1])
axes[1].set_title(name+' azdias_2')
axes[1].set_xlabel('Value')
axes[1].set_ylabel('Count')
figure.tight_layout(pad=0.1)
compare(5)
```
#### Discussion 1.1.3: Assess Missing Data in Each Row
For 3 of the 5 plots we see the datasets agree on the most frequent value. For the dataset with lots of missing values we can see from the last two plots that one value is much more common than the others. The dataset with less variables gives us a better range of values and generally agrees with the conclusion of the the dataset with lots of missing values. For these reasons the analysis should contine with the dataset with few missing values.
### Step 1.2: Select and Re-Encode Features
Checking for missing data isn't the only way in which you can prepare a dataset for analysis. Since the unsupervised learning techniques to be used will only work on data that is encoded numerically, I need to make a few encoding changes or additional assumptions to be able to make progress. In addition, while almost all of the values in the dataset are encoded using numbers, not all of them represent numeric values. Check the third column of the feature summary (`feat_info`) for a summary of types of measurement.
- For numeric and interval data, these features can be kept without changes.
- Most of the variables in the dataset are ordinal in nature. While ordinal values may technically be non-linear in spacing, make the simplifying assumption that the ordinal variables can be treated as being interval in nature (that is, kept without any changes).
- Special handling may be necessary for the remaining two variable types: categorical, and 'mixed'.
In the first two parts of this sub-step, I will perform an investigation of the categorical and mixed-type features and make a decision on each of them, whether I will keep, drop, or re-encode each. Then, in the last part, you will create a new data frame with only the selected and engineered columns.
Data wrangling is often the trickiest part of the data analysis process, and there's a lot of it to be done here.
```
# How many features are there of each data type?
feat_info.type.value_counts()
```
#### Step 1.2.1: Re-Encode Categorical Features
For categorical data, you would ordinarily need to encode the levels as dummy variables. Depending on the number of categories, perform one of the following:
- For binary (two-level) categoricals that take numeric values, you can keep them without needing to do anything.
- There is one binary variable that takes on non-numeric values. For this one, you need to re-encode the values as numbers or create a dummy variable.
- For multi-level categoricals (three or more values), you can choose to encode the values using multiple dummy variables (e.g. via [OneHotEncoder](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html)), or (to keep things straightforward) just drop them from the analysis.
```
# Assess categorical variables
feat_info[feat_info['type'] == 'categorical']
cat_col = feat_info[feat_info['type'] == 'categorical'].attribute
binary_col = []
mixed_col = []
for name in cat_col:
if name in azdias_1.columns:
print(azdias_1[name].unique())
if len(azdias_1[name].unique()) <= 3:
binary_col.append(name)
else:
mixed_col.append(name)
print(binary_col)
print(mixed_col)
# Re-encoding categorical variable(s) to be kept in the analysis.
azdias_1['OST_WEST_KZ'] = azdias_1['OST_WEST_KZ'].replace({'O': 0.0,'W': 1.0});
azdias_1['OST_WEST_KZ'].unique()
for name in mixed_col:
azdias_1= pd.concat([azdias_1.drop(name, axis=1),
pd.get_dummies(data = azdias_1[name], columns = name, prefix = name)], axis=1)
```
#### Discussion 1.2.1: Re-Encode Categorical Features
I re-encoded the OST_WEST_KZ binary column for integer values 0 and 1. For the multi level columns I added dummy variables. I decided to keep the multilevel variables because we haven't used any machine learning yet and I'm not sure on their importance.
#### Step 1.2.2: Engineer Mixed-Type Features
There are a handful of features that are marked as "mixed" in the feature summary that require special treatment in order to be included in the analysis. There are two in particular that deserve attention:
- "PRAEGENDE_JUGENDJAHRE" combines information on three dimensions: generation by decade, movement (mainstream vs. avantgarde), and nation (east vs. west). While there aren't enough levels to disentangle east from west, you should create two new variables to capture the other two dimensions: an interval-type variable for decade, and a binary variable for movement.
- "CAMEO_INTL_2015" combines information on two axes: wealth and life stage. Break up the two-digit codes by their 'tens'-place and 'ones'-place digits into two new ordinal variables (which, for the purposes of this project, is equivalent to just treating them as their raw numeric values).
Be sure to check `Data_Dictionary.md` for the details.
```
# Investigating "PRAEGENDE_JUGENDJAHRE" and engineering two new variables.
azdias_1['PRAEGENDE_JUGENDJAHRE'].unique()
azdias_1['PRAEGENDE_JUGENDJAHRE'].value_counts()
azdias_1['MAINTSTREAM'] = azdias_1['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]).astype(int)
# I couldve seperated it into one variable but would have used one hot encoding anyways
azdias_1['EAST'] = azdias_1['PRAEGENDE_JUGENDJAHRE'].isin([7,12,13]).astype(int)
azdias_1['WEST'] = azdias_1['PRAEGENDE_JUGENDJAHRE'].isin([6,10,11]).astype(int)
azdias_1['EAST + WEST'] = azdias_1['PRAEGENDE_JUGENDJAHRE'].isin([1,2,3,4,5,8,9,14,15]).astype(int)
# Investigating "CAMEO_INTL_2015" and engineering two new variables.
azdias_1['CAMEO_INTL_2015'].unique()
azdias_1['CAMEO_INTL_2015'].value_counts()
wealth_encoding = {'11': 1.0,'12': 1.0,'13': 1.0,'14': 1.0,'15': 1.0,'21': 2.0,'22': 2.0,'23': 2.0,
'24': 2.0,'25': 2.0,'31': 3.0,'32': 3.0,'33': 3.0,'34': 3.0,'35': 3.0,'41': 4.0,
'42': 4.0,'43': 4.0,'44': 4.0,'45': 4.0,'51': 5.0,'52': 5.0,'53': 5.0,'54': 5.0,
'55': 5.0}
azdias_1['WEALTH'] = azdias_1['CAMEO_INTL_2015'].replace(wealth_encoding)
azdias_1= pd.concat([azdias_1.drop('WEALTH', axis=1),
pd.get_dummies(data = azdias_1['WEALTH'], columns = 'WEALTH', prefix = 'WEALTH')], axis=1)
life_style_encoding = {'11': 1.0,'12': 2.0,'13': 3.0,'14': 4.0,'15': 5.0,'21': 1.0,'22': 2.0,'23': 3.0,
'24': 4.0,'25': 5.0,'31': 1.0,'32': 2.0,'33': 3.0,'34': 4.0, '35': 5.0,'41': 1.0,
'42': 2.0,'43': 3.0,'44': 4.0,'45': 5.0,'51': 1.0,'52': 2.0,'53': 3.0,'54': 4.0,
'55': 5.0}
azdias_1['LIFE_STYLE_TOPOLOGY'] = azdias_1['CAMEO_INTL_2015'].replace(life_style_encoding)
azdias_1= pd.concat([azdias_1.drop('LIFE_STYLE_TOPOLOGY', axis=1),
pd.get_dummies(data = azdias_1['LIFE_STYLE_TOPOLOGY'],
columns = 'LIFE_STYLE_TOPOLOGY', prefix = 'LIFE_STYLE_TOPOLOGY')], axis=1)
```
#### Discussion 1.2.2: Engineer Mixed-Type Features
PRAEGENDE_JUGENDJAHRE had 15 unique values, I split them based on Data_Dictionary.md creating a new column for the variable 'MAINSTREAM' which describes the dominant movement of a person's life. It is encoded using a 1 or 0 for mainstream or avantgarde respectively. CAMEO_INTL_2015 had 15 unique values, I split them based on Data_Dictionary.md creating a new column for the variable 'WEALTH' which encodes how wealthy the person is and used dummy variables for each level. I also created 'LIFE_STYLE_TOPOLOGY' from CAMEO_INTL_2015 I split them based on Data_Dictionary.md creating a new column which describes the type of family, I used dummy variables for each level.
#### Step 1.2.3: Complete Feature Selection
In order to finish this step up, I need to make sure that my data frame now only has the columns that I want to keep. To summarize, the dataframe should consist of the following:
- All numeric, interval, and ordinal type columns from the original dataset.
- Binary categorical features (all numerically-encoded).
- Engineered features from other multi-level categorical features and mixed features.
I need to make sure that for any new columns that I have engineered, that I've excluded the original columns from the final dataset. Otherwise, their values will interfere with the analysis later on the project. For example, I should not keep "PRAEGENDE_JUGENDJAHRE", since its values won't be useful for the algorithm: only the values derived from it in the engineered features I created should be retained.
```
missing_data_rows = azdias.isnull().sum(axis=1)
azdias['missing_data_rows'] = missing_data_rows
azdias = azdias[azdias['missing_data_rows'] < 10]
cat_col = feat_info[feat_info['type'] == 'categorical'].attribute
binary_col = []
mixed_col = []
for name in cat_col:
if name in azdias.columns:
if len(azdias[name].unique()) <= 3:
binary_col.append(name)
else:
mixed_col.append(name)
azdias['OST_WEST_KZ'] = azdias['OST_WEST_KZ'].replace({'O': 0.0,'W': 1.0});
azdias['OST_WEST_KZ'].unique()
azdias['MAINTSTREAM'] = azdias['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]).astype(int)
azdias['EAST'] = azdias['PRAEGENDE_JUGENDJAHRE'].isin([7,12,13]).astype(int)
azdias['WEST'] = azdias['PRAEGENDE_JUGENDJAHRE'].isin([6,10,11]).astype(int)
azdias['EAST + WEST'] = azdias['PRAEGENDE_JUGENDJAHRE'].isin([1,2,3,4,5,8,9,14,15]).astype(int)
wealth_encoding = {'11': 1.0,'12': 1.0,'13': 1.0,'14': 1.0,'15': 1.0,'21': 2.0,'22': 2.0,'23': 2.0,
'24': 2.0,'25': 2.0,'31': 3.0,'32': 3.0,'33': 3.0,'34': 3.0,'35': 3.0,'41': 4.0,
'42': 4.0,'43': 4.0,'44': 4.0,'45': 4.0,'51': 5.0,'52': 5.0,'53': 5.0,'54': 5.0,
'55': 5.0}
azdias['WEALTH'] = azdias['CAMEO_INTL_2015'].replace(wealth_encoding)
life_style_encoding = {'11': 1.0,'12': 2.0,'13': 3.0,'14': 4.0,'15': 5.0,'21': 1.0,'22': 2.0,'23': 3.0,
'24': 4.0,'25': 5.0,'31': 1.0,'32': 2.0,'33': 3.0,'34': 4.0, '35': 5.0,'41': 1.0,
'42': 2.0,'43': 3.0,'44': 4.0,'45': 5.0,'51': 1.0,'52': 2.0,'53': 3.0,'54': 4.0,
'55': 5.0}
azdias['LIFE_STYLE_TOPOLOGY'] = azdias['CAMEO_INTL_2015'].replace(life_style_encoding)
# Do whatever you need to in order to ensure that the dataframe only contains
# the columns that should be passed to the algorithm functions.
for name in mixed_col:
azdias = pd.concat([azdias.drop(name, axis=1),
pd.get_dummies(data = azdias[name], columns = name, prefix = name)], axis=1)
azdias = pd.concat([azdias.drop('WEALTH', axis=1),
pd.get_dummies(data = azdias['WEALTH'], columns = 'WEALTH', prefix = 'WEALTH')], axis=1)
azdias = pd.concat([azdias.drop('LIFE_STYLE_TOPOLOGY', axis=1),
pd.get_dummies(data = azdias['LIFE_STYLE_TOPOLOGY'],
columns = 'LIFE_STYLE_TOPOLOGY', prefix = 'LIFE_STYLE_TOPOLOGY')], axis=1)
```
### Step 1.3: Create a Cleaning Function
Even though we've finished cleaning up the general population demographics data, it's important to look ahead to the future and realize that you'll need to perform the same cleaning steps on the customer demographics data. In this substep, I will complete the function below to execute the main feature selection, encoding, and re-engineering steps I performed above. Then, when it comes to looking at the customer data in Step 3, I can just run this function on that DataFrame to get the trimmed dataset in a single step.
```
azdias = pd.read_csv('Udacity_AZDIAS_Subset.csv', sep = ';')
def clean_data(df):
"""
Perform feature trimming, re-encoding, and engineering for demographics
data
INPUT: Demographics DataFrame
OUTPUT: Trimmed and cleaned demographics DataFrame
"""
feat_info = pd.read_csv('AZDIAS_Feature_Summary.csv', sep = ';')
# Convert missing value codes into NaNs, ...
for i in range(feat_info.shape[0]):
cols = df.columns[i]
var = feat_info['missing_or_unknown'][i]
var_list = var.replace('[','').replace(']','').split(',')
if var_list != ['']:
for j in var_list:
if df[cols].dtype =='object':
df.loc[df[cols]==j,cols]=np.nan
else:
j=int(j)
df.loc[df[cols]==j,cols]=np.nan
# remove selected columns and rows, ...
missing_data_columns = (df.isnull().sum()/df.shape[0])*100
outliers_columns = []
for i in range(df.shape[1]):
if missing_data_columns[i] > 20:
outliers_columns.append(df.columns[i])
df = df.drop(outliers_columns, axis=1)
missing_data_rows = df.isnull().sum(axis=1)
df['missing_data_rows'] = missing_data_rows
df = df[df['missing_data_rows'] < 10]
# select, re-encode, and engineer column values.
cat_col = feat_info[feat_info['type'] == 'categorical'].attribute
binary_col = []
mixed_col = []
for name in cat_col:
if name in df.columns:
if len(df[name].unique()) <= 3:
binary_col.append(name)
else:
mixed_col.append(name)
for name in mixed_col:
df = pd.concat([df.drop(name, axis=1),
pd.get_dummies(data = df[name], columns = name, prefix = name)], axis=1)
df['OST_WEST_KZ'] = df['OST_WEST_KZ'].replace({'O': 0.0,'W': 1.0});
df['OST_WEST_KZ'].unique()
df['MAINTSTREAM'] = df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]).astype(int)
df['EAST'] = df['PRAEGENDE_JUGENDJAHRE'].isin([7,12,13]).astype(int)
df['WEST'] = df['PRAEGENDE_JUGENDJAHRE'].isin([6,10,11]).astype(int)
df['EAST + WEST'] = df['PRAEGENDE_JUGENDJAHRE'].isin([1,2,3,4,5,8,9,14,15]).astype(int)
wealth_encoding = {'11': 1.0,'12': 1.0,'13': 1.0,'14': 1.0,'15': 1.0,'21': 2.0,'22': 2.0,'23': 2.0,
'24': 2.0,'25': 2.0,'31': 3.0,'32': 3.0,'33': 3.0,'34': 3.0,'35': 3.0,'41': 4.0,
'42': 4.0,'43': 4.0,'44': 4.0,'45': 4.0,'51': 5.0,'52': 5.0,'53': 5.0,'54': 5.0,
'55': 5.0}
df['WEALTH'] = df['CAMEO_INTL_2015'].replace(wealth_encoding)
df = pd.concat([df.drop('WEALTH', axis=1),
pd.get_dummies(data = df['WEALTH'], columns = 'WEALTH', prefix = 'WEALTH')], axis=1)
life_style_encoding = {'11': 1.0,'12': 2.0,'13': 3.0,'14': 4.0,'15': 5.0,'21': 1.0,'22': 2.0,'23': 3.0,
'24': 4.0,'25': 5.0,'31': 1.0,'32': 2.0,'33': 3.0,'34': 4.0, '35': 5.0,'41': 1.0,
'42': 2.0,'43': 3.0,'44': 4.0,'45': 5.0,'51': 1.0,'52': 2.0,'53': 3.0,'54': 4.0,
'55': 5.0}
df['LIFE_STYLE_TOPOLOGY'] = df['CAMEO_INTL_2015'].replace(life_style_encoding)
df = pd.concat([df.drop('LIFE_STYLE_TOPOLOGY', axis=1),
pd.get_dummies(data = df['LIFE_STYLE_TOPOLOGY'],
columns = 'LIFE_STYLE_TOPOLOGY', prefix = 'LIFE_STYLE_TOPOLOGY')], axis=1)
# Return the cleaned dataframe.
return df
azdias = clean_data(azdias);
```
## Step 2: Feature Transformation
### Step 2.1: Apply Feature Scaling
Before I apply dimensionality reduction techniques to the data, we need to perform feature scaling so that the principal component vectors are not influenced by the natural differences in scale for features. Starting from this part of the project, you'll want to keep an eye on the [API reference page for sklearn](http://scikit-learn.org/stable/modules/classes.html) to help you navigate to all of the classes and functions that we'll need. In this substep, I'll need to check the following:
- sklearn requires that data not have missing values in order for its estimators to work properly. So, before applying the scaler to our data, we need to make sure that we've cleaned the DataFrame of the remaining missing values. This can be as simple as just removing all data points with missing data, or applying an [Imputer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html) to replace all missing values. we could also try a more complicated procedure where we temporarily remove missing values in order to compute the scaling parameters before re-introducing those missing values and applying imputation. Think about how much missing data we have and what possible effects each approach might have on our analysis.
- For the actual scaling function, a [StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) instance is suggested, scaling each feature to mean 0 and standard deviation 1.
- For these classes, we can make use of the `.fit_transform()` method to both fit a procedure to the data as well as apply the transformation to the data at the same time.
```
columns = azdias.columns
azdias.isnull().sum(axis=0)
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan , strategy='most_frequent')
azdias = imp.fit_transform(azdias)
# Applying feature scaling to the general population demographics data.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
azdias = scaler.fit_transform(azdias)
azdias = pd.DataFrame(azdias)
azdias.columns = columns
azdias.head()
azdias.isnull().sum().sum()
```
### Discussion 2.1: Apply Feature Scaling
I imputed the missing values using the most frequent count because not all variables were floats or integers which prevented them from being filled using mean or median.
### Step 2.2: Perform Dimensionality Reduction
On our scaled data, we are now ready to apply dimensionality reduction techniques.
- I will use sklearn's [PCA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) class to apply principal component analysis on the data, thus finding the vectors of maximal variance in the data. To start, we should not set any parameters (so all components are computed) or set a number of components that is at least half the number of features (so there's enough features to see the general trend in variability).
- I will check out the ratio of variance explained by each principal component as well as the cumulative variance explained. I will try plotting the cumulative or sequential values using matplotlib's [`plot()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html) function. Based on what I find, I can select a value for the number of transformed features I'll retain for the clustering part of the project.
- Once I've made a choice for the number of components to keep, I will make sure we re-fit a PCA instance to perform the decided-on transformation.
```
# Applying PCA to the data.
from sklearn.decomposition import PCA
pca = PCA()
azdias_pca = pca.fit_transform(azdias)
# Investigating the variance accounted for by each principal component.
def scree_plot(pca):
'''
Creates a scree plot associated with the principal components
INPUT: pca - the result of instantian of PCA in scikit learn
OUTPUT:
None
'''
num_components=len(pca.explained_variance_ratio_)
ind = np.arange(num_components)
vals = pca.explained_variance_ratio_
plt.figure(figsize=(10, 6))
ax = plt.subplot(111)
cumvals = np.cumsum(vals)
ax.bar(ind, vals)
ax.plot(ind, cumvals)
for i in range(num_components):
ax.annotate(r"%s%%" % ((str(vals[i]*100)[:4])), (ind[i]+0.2, vals[i]), va="bottom", ha="center", fontsize=12)
ax.xaxis.set_tick_params(width=0)
ax.yaxis.set_tick_params(width=2, length=12)
ax.set_xlabel("Principal Component")
ax.set_ylabel("Variance Explained (%)")
plt.title('Explained Variance Per Principal Component')
scree_plot(pca)
# Re-applying PCA to the data while selecting for number of components to retain.
pca = PCA(120)
azdias_pca = pca.fit_transform(azdias)
```
### Discussion 2.2: Perform Dimensionality Reduction
I choose to keep 120 components to acheive a variance of approximately 90% for the next step in my analysis.
### Step 2.3: Interpret Principal Components
Now that we have our transformed principal components, it's a nice idea to check out the weight of each variable on the first few components to see if they can be interpreted in some fashion.
As a reminder, each principal component is a unit vector that points in the direction of highest variance (after accounting for the variance captured by earlier principal components). The further a weight is from zero, the more the principal component is in the direction of the corresponding feature. If two features have large weights of the same sign (both positive or both negative), then increases in one tend expect to be associated with increases in the other. To contrast, features with different signs can be expected to show a negative correlation: increases in one variable should result in a decrease in the other.
- To investigate the features, we should map each weight to their corresponding feature name, then sort the features according to weight. The most interesting features for each principal component, then, will be those at the beginning and end of the sorted list. Using the data dictionary document to help I can understand these most prominent features, their relationships, and what a positive or negative value on the principal component might indicate.
- We should investigate and interpret feature associations from the first three principal components in this substep. To help facilitate this, I should write a function that you can call at any time to print the sorted list of feature weights, for the *i*-th principal component. This might come in handy in the next step of the project, when we interpret the tendencies of the discovered clusters.
```
# Mapping weights for the first principal component to corresponding feature names
# and then print the linked values, sorted by weight.
def pca_components(component):
pca_components = pd.DataFrame(pca.components_[component])
pca_components.index = azdias.columns
pca_components.columns = ['PC']
pca_components = pca_components.sort_values(by=['PC'], axis=0, ascending=False)
return pca_components
pca_components(0)
# Mapping weights for the second principal component to corresponding feature names
# and then print the linked values, sorted by weight.
pca_components(1)
# Mapping weights for the third principal component to corresponding feature names
# and then print the linked values, sorted by weight.
pca_components(2)
```
### Discussion 2.3: Interpret Principal Components
The top 5 positive and negative weights for each component are:
>PC 1(Positive Weights): LP_STATUS_GROB_1.0, CAMEO_INTL_2015, PLZ8_ANTG3, HH_EINKOMMEN_SCORE, PLZ8_ANTG4
>PC 1(Negative Weights): MOBI_REGIO, PLZ8_ANTG1, KBA05_ANTG1, KBA05_GBZ, FINANZ_MINIMALIST
>PC 2(Positive Weights): ALTERSKATEGORIE_GROB, FINANZ_VORSORGER, ZABEOTYP_3.0, SEMIO_ERL, SEMIO_LUST
>PC 2(Negative Weights): PRAEGENDE_JUGENDJAHRE, FINANZ_SPARER, SEMIO_REL, FINANZ_UNAUFFAELLIGER, SEMIO_PFLICHT
>PC 3(Positive Weights): SEMIO_VERT, SEMIO_SOZ, SEMIO_FAM, SEMIO_KULT, FINANZTYP_5.0
>PC 3(Negative Weights): ANREDE_KZ, SEMIO_KAEM, SEMIO_DOM, SEMIO_KRIT, SEMIO_ERL
Using DataDictionary.md we can conclude that the components must be related by:
>PC 1: The positive features are about wealth, family size and family size in the nieghbourhood. The negative features are about movement, family size and number of buildings. We can conclude this component is about wealth, family size, and the type of buildings in the neighbourhood.
>PC 2: The positive features are about age, whether youre a saver, and energy consumption. The negative features are also about whether youre a saver and other features about your personanilty. We can conclude this component is about age.
>PC 3: The postive features are about personality types. the negative features are also about personality types. we can conlude this component is about personality.
## Step 3: Clustering
### Step 3.1: Apply Clustering to General Population
I've assessed and cleaned the demographics data, then scaled and transformed them. Now, it's time to see how the data clusters in the principal components space. In this substep, I will apply k-means clustering to the dataset and use the average within-cluster distances from each point to their assigned cluster's centroid to decide on a number of clusters to keep.
- Using sklearn's [KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans) class to perform k-means clustering on the PCA-transformed data.
- Then, I compute the average difference from each point to its assigned cluster's center. The KMeans object's `.score()` method might be useful here, but note that in sklearn, scores tend to be defined so that larger is better.
- Performing the above two steps for a number of different cluster counts, we can then see how the average distance decreases with an increasing number of clusters. However, each additional cluster provides a smaller net benefit. Using this fact we can select a final number of clusters in which to group the data. **Warning**: because of the large size of the dataset, it can take a long time for the algorithm to resolve. The more clusters to fit, the longer the algorithm will take.
- Once we've selected a final number of clusters to use, I will re-fit a KMeans instance to perform the clustering operation.
```
# Over a number of different cluster counts...
# running k-means clustering on the data and...
from sklearn.cluster import KMeans
def get_kmeans_score(data, center):
kmeans = KMeans(n_clusters=center)
model = kmeans.fit(data)
score = np.abs(model.score(data))
return score
scores = []
centers = list(range(1,11))
# computing the average within-cluster distances.
for center in centers:
scores.append(get_kmeans_score(azdias_pca, center))
# Investigating the change in within-cluster distance across number of clusters.
plt.plot(centers, scores, linestyle='--', marker='o', color='b');
plt.xlabel('K');
plt.ylabel('SSE');
plt.title('SSE vs. K');
# Re-fitting the k-means model with the selected number of clusters and obtain
# cluster predictions for the general population demographics data.
kmeans = KMeans(n_clusters = 5)
model = kmeans.fit(azdias_pca)
azdias_labels = model.predict(azdias_pca)
```
### Discussion 3.1: Apply Clustering to General Population
I choose to use 5 clusters for KMeans. After observing the scree plot we can identify two elbows at 2 and 5. I opted for 5 clusters because it had a lower score.
### Step 3.2: Apply All Steps to the Customer Data
Now that we have clusters and cluster centers for the general population, it's time to see how the customer data maps on to those clusters. Take care to not confuse this for re-fitting all of the models to the customer data. Instead, I'm going to use the fits from the general population to clean, transform, and cluster the customer data. In the last step of the project, I will interpret how the general population fits apply to the customer data.
- Using the sklearn objects from the general demographics data, and applying their transformations to the customers data. That is, we should not be using a `.fit()` or `.fit_transform()` method to re-fit the old objects, nor should we be creating new sklearn objects! We should carry the data through the feature scaling, PCA, and clustering steps, obtaining cluster assignments for all of the data in the customer demographics data.
```
# Loading in the customer demographics data.
customers = pd.read_csv('Udacity_CUSTOMERS_Subset.csv', sep=';')
print(customers.shape)
customers.head()
# Applying preprocessing, feature transformation, and clustering from the general
# demographics onto the customer data, obtaining cluster predictions for the
# customer demographics data.
for i in range(feat_info.shape[0]):
cols = customers.columns[i]
var = feat_info['missing_or_unknown'][i]
var_list = var.replace('[','').replace(']','').split(',')
if var_list != ['']:
for j in var_list:
if customers[cols].dtype =='object':
customers.loc[customers[cols]==j,cols]=np.nan
else:
j=int(j)
customers.loc[customers[cols]==j,cols]=np.nan
outliers_columns = ['AGER_TYP', 'GEBURTSJAHR', 'TITEL_KZ', 'ALTER_HH', 'KK_KUNDENTYP', 'KBA05_BAUMAX']
customers = customers.drop(outliers_columns, axis=1)
missing_data_rows = customers.isnull().sum(axis=1)
customers['missing_data_rows'] = missing_data_rows
customers = customers[customers['missing_data_rows'] < 10]
cat_col = feat_info[feat_info['type'] == 'categorical'].attribute
binary_col = []
mixed_col = []
for name in cat_col:
if name in customers.columns:
if len(customers[name].unique()) <= 3:
binary_col.append(name)
else:
mixed_col.append(name)
for name in mixed_col:
customers = pd.concat([customers.drop(name, axis=1),
pd.get_dummies(data = customers[name], columns = name, prefix = name)], axis=1)
customers['OST_WEST_KZ'] = customers['OST_WEST_KZ'].replace({'O': 0.0,'W': 1.0});
customers['MAINTSTREAM'] = customers['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]).astype(int)
customers['EAST'] = customers['PRAEGENDE_JUGENDJAHRE'].isin([7,12,13]).astype(int)
customers['WEST'] = customers['PRAEGENDE_JUGENDJAHRE'].isin([6,10,11]).astype(int)
customers['EAST + WEST'] = customers['PRAEGENDE_JUGENDJAHRE'].isin([1,2,3,4,5,8,9,14,15]).astype(int)
wealth_encoding = {'11': 1.0,'12': 1.0,'13': 1.0,'14': 1.0,'15': 1.0,'21': 2.0,'22': 2.0,'23': 2.0,
'24': 2.0,'25': 2.0,'31': 3.0,'32': 3.0,'33': 3.0,'34': 3.0,'35': 3.0,'41': 4.0,
'42': 4.0,'43': 4.0,'44': 4.0,'45': 4.0,'51': 5.0,'52': 5.0,'53': 5.0,'54': 5.0,
'55': 5.0}
customers['WEALTH'] = customers['CAMEO_INTL_2015'].replace(wealth_encoding)
customers = pd.concat([customers.drop('WEALTH', axis=1),
pd.get_dummies(data = customers['WEALTH'], columns = 'WEALTH', prefix = 'WEALTH')], axis=1)
life_style_encoding = {'11': 1.0,'12': 2.0,'13': 3.0,'14': 4.0,'15': 5.0,'21': 1.0,'22': 2.0,'23': 3.0,
'24': 4.0,'25': 5.0,'31': 1.0,'32': 2.0,'33': 3.0,'34': 4.0, '35': 5.0,'41': 1.0,
'42': 2.0,'43': 3.0,'44': 4.0,'45': 5.0,'51': 1.0,'52': 2.0,'53': 3.0,'54': 4.0,
'55': 5.0}
customers['LIFE_STYLE_TOPOLOGY'] = customers['CAMEO_INTL_2015'].replace(life_style_encoding)
customers = pd.concat([customers.drop('LIFE_STYLE_TOPOLOGY', axis=1),
pd.get_dummies(data = customers['LIFE_STYLE_TOPOLOGY'],
columns = 'LIFE_STYLE_TOPOLOGY', prefix = 'LIFE_STYLE_TOPOLOGY')], axis=1)
for i in range(208):
if azdias.columns[i] != customers.columns[i]:
print(azdias.columns[i])
print(customers.columns[i])
break
customers['GEBAEUDETYP_5.0'] = 0
len(azdias.columns)
len(customers.columns)
imp = SimpleImputer(missing_values = np.nan , strategy='most_frequent')
customers = imp.fit_transform(customers)
scaler = StandardScaler()
customers = scaler.fit_transform(customers)
customers = pd.DataFrame(customers)
customers.columns = columns
pca = PCA(120)
customers_pca = pca.fit_transform(customers)
customers_labels = model.predict(customers_pca)
```
### Step 3.3: Compare Customer Data to Demographics Data
At this point, we have clustered data based on demographics of the general population of Germany, and seen how the customer data for a mail-order sales company maps onto those demographic clusters. In this final substep, I will compare the two cluster distributions to see where the strongest customer base for the company is.
Consider the proportion of persons in each cluster for the general population, and the proportions for the customers. If we think the company's customer base to be universal, then the cluster assignment proportions should be fairly similar between the two. If there are only particular segments of the population that are interested in the company's products, then we should see a mismatch from one to the other. If there is a higher proportion of persons in a cluster for the customer data compared to the general population (e.g. 5% of persons are assigned to a cluster for the general population, but 15% of the customer data is closest to that cluster's centroid) then that suggests the people in that cluster to be a target audience for the company. On the other hand, the proportion of the data in a cluster being larger in the general population than the customer data (e.g. only 2% of customers closest to a population centroid that captures 6% of the data) suggests that group of persons to be outside of the target demographics.
Take a look at the following points in this step:
- I will compute the proportion of data points in each cluster for the general population and the customer data. Visualizations will be useful here: both for the individual dataset proportions, but also to visualize the ratios in cluster representation between groups. Seaborn's [`countplot()`](https://seaborn.pydata.org/generated/seaborn.countplot.html) or [`barplot()`](https://seaborn.pydata.org/generated/seaborn.barplot.html) function could be handy.
- Recall the analysis we performed in step 1.1.3 of the project, where I separated out certain data points from the dataset if they had more than a specified threshold of missing values. If we found that this group was qualitatively different from the main bulk of the data, we should treat this as an additional data cluster in this analysis. Make sure that you account for the number of data points in this subset, for both the general population and customer datasets, when making your computations!
- Which cluster or clusters are overrepresented in the customer dataset compared to the general population? We can select at least one such cluster and infer what kind of people might be represented by that cluster. Using the principal component interpretations from step 2.3 or looking at additional components to help me make this inference. Alternatively, I can use the `.inverse_transform()` method of the PCA and StandardScaler objects to transform centroids back to the original data space and interpret the retrieved values directly.
- Performing a similar investigation for the underrepresented clusters. Which cluster or clusters are underrepresented in the customer dataset compared to the general population, and what kinds of people are typified by these clusters?
```
# Comparing the proportion of data in each cluster for the customer data to the
# proportion of data in each cluster for the general population.
azdias_labels = pd.DataFrame(azdias_labels)
azdias_labels.columns = ['clusters']
customers_labels = pd.DataFrame(customers_labels)
customers_labels.columns = ['clusters']
figure, axes = plt.subplots(nrows=1, ncols=2, figsize=(15,8), sharey=True)
azdias_labels['clusters'].hist(ax=axes[0],
weights=np.ones(len(azdias_labels['clusters'])) / len(azdias_labels['clusters']),
align='right',
bins = range(0,6),
rwidth=0.5)
axes[0].set_title('General Population')
axes[0].set_xlabel('Cluster')
axes[0].set_ylabel('Proportion')
customers_labels['clusters'].hist(ax=axes[1],
weights=np.ones(len(customers_labels['clusters'])) / len(customers_labels['clusters']),
align='right',
bins = range(0,6),
rwidth=0.5)
axes[1].set_title('Customer Data')
axes[1].set_xlabel('Cluster')
axes[1].set_ylabel('Proportion')
figure.tight_layout(pad=1)
azdias_labels.index = azdias.index
azdias = pd.concat([azdias,azdias_labels],axis=1)
original_data = pd.read_csv('Udacity_AZDIAS_Subset.csv', sep = ';')
original_data = clean_data(original_data)
columns_original_data = original_data.columns
imp_test = SimpleImputer(missing_values = np.nan , strategy='most_frequent')
original_data = imp_test.fit_transform(original_data)
original_data = pd.DataFrame(original_data)
original_data.columns = columns_original_data
results = pd.concat([original_data, azdias['clusters']], axis = 1)
# What kinds of people are part of a cluster that is overrepresented in the
# customer data compared to the general population?
cluster_overrepresented = results[results['clusters'] == 4]
for i in range(cluster_overrepresented.shape[1]):
print(cluster_overrepresented[cluster_overrepresented.columns[i]].value_counts())
# What kinds of people are part of a cluster that is underrepresented in the
# customer data compared to the general population?
cluster_underrepresented = results[results['clusters'] == 0]
for i in range(cluster_underrepresented.shape[1]):
print(cluster_underrepresented[cluster_underrepresented.columns[i]].value_counts())
```
### Discussion 3.3: Compare Customer Data to Demographics Data
The underrepresented data shows the target audience for the company. From cluster 5 we can learn about the demographic. The target audience is mainly male (ANREDE_KZ: 1.0), with a high income (FINANZ_: 1.0-2.0), who are single or are a couple (LP_LEBENSPHASE_FEIN: 20.0, 13.0).
The overrepresented data shows audience which does not appeal to the company. From cluster 1 we can learn about the demographic. The unpopular audience is mainly male/female (ANREDE_KZ: 1.0, 2.0), with a lower income (FINANZ_: 5.0-2.0), who are of retirement or late age (LP_LEBENSPHASE_FEIN: 11.0, 12.0).
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# sns.set_context('paper', font_scale=2)
def get_talon_nov_colors(samples=None, how='normal'):
c_dict = {'Known': '#009E73',
'ISM': '#0072B2',
'NIC': '#D55E00',
'NNC': '#E69F00',
'Antisense': '#000000',
'Intergenic': '#CC79A7',
'Genomic': '#F0E442'}
if how == 'light_40':
c_dict = {'Known': '#66c5ab',
'ISM':'#66aad1',
'NIC': '#e69e66',
'NNC': '#f0c566',
'Antisense': '#666666',
'Intergenic': '#e0afca',
'Genomic': '#f6ef8e'}
elif how == 'light_20':
c_dict = {'Known': '#33b18f',
'ISM':'#338ec1',
'NIC': '#dd7e33',
'NNC': '#ebb233',
'Antisense': '#333333',
'Intergenic': '#d694b9',
'Genomic': '#f3e968'}
order = ['Known', 'ISM', 'NIC', 'NNC', 'Antisense', 'Intergenic', 'Genomic']
if samples:
keys = c_dict.keys()
pop_list = []
for key in keys:
if key not in samples:
pop_list.append(key)
for p in pop_list:
del c_dict[p]
order = [o for o in order if o in samples]
return c_dict, order
def compute_prop_support(sj_file, ab_file, opref):
sj_df = pd.read_csv(sj_file, sep='\t')
ab = pd.read_csv(ab_file, sep='\t')
# merge with ab to get novelty info
ab = ab[['annot_transcript_id', 'transcript_novelty']]
sj_df = sj_df.merge(ab, how='left', left_on='tid', right_on='annot_transcript_id')
# count up splice junctions per transcript
sjs_per_t = sj_df[['tid', 'sj_id']].groupby('tid').count()
sjs_per_t.reset_index(inplace=True)
sjs_per_t.rename({'sj_id':'total_sjs'}, axis=1, inplace=True)
# groupby transcript id and illumina support
sj_df = sj_df[['tid', 'transcript_novelty', 'illumina_support', 'sj_id']].groupby(['tid', 'transcript_novelty', 'illumina_support']).count()
sj_df.reset_index(inplace=True)
sj_df.rename({'sj_id':'n_sjs'}, axis=1, inplace=True)
# merge with total sjs and calc % supported
sj_df = sj_df.merge(sjs_per_t, how='left', on='tid')
sj_df['perc_supported'] = (sj_df.n_sjs/sj_df.total_sjs)*100
# remove unsupported bois
sj_df = sj_df.loc[sj_df.illumina_support == True]
# drop antisense, intergenic, and genomic cause they bad
novs = ['Known', 'ISM', 'NIC', 'NNC']
sj_df = sj_df.loc[sj_df.transcript_novelty.isin(novs)]
# plot plot
plt.figure(figsize=(8.5,8.5))
# sns.set(font_scale=1.50)
# font sizes
plt.rc('font', size=14)
c_dict, order = get_talon_nov_colors(novs)
ax = sns.violinplot(data=sj_df, x='transcript_novelty', y='perc_supported', order=order, palette=c_dict, saturation=1, linewidth=0.5)
ax.set_ylabel('% SJs in transcript supported by Illumina')
ax.set_xlabel('Isoform Novelty')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fname = '{}_illumina_sj_support.pdf'.format(opref)
plt.savefig(fname)
# average and median of each category
for n in novs:
temp = sj_df.loc[sj_df.transcript_novelty==n]
print()
print('Mean % Illumina supported SJs for {}: {}'.format(n, temp.perc_supported.mean()))
print('Median % Illumina supported SJs for {}: {}'.format(n, temp.perc_supported.median()))
def plot_unsupp_sjs_by_nov(sj_file, sj_nov_file, oprefix):
sj_df = pd.read_csv(sj_file, sep='\t')
nov_df = pd.read_csv(sj_nov_file, sep='\t', header=None,
usecols=[0,1,2,3,9], names=['chrom', 'start', 'stop', 'strand', 'novelty'])
nov_df['sj_id'] = nov_df.chrom+'_'+nov_df.start.astype(str)+'_'+nov_df.stop.astype(str)+'_'+nov_df.strand.astype(str)
nov_df = nov_df[['sj_id', 'novelty']]
# limit to just sjs with no illumina support
sj_df = sj_df.loc[sj_df.illumina_support == False]
# merge with novelty of splice junctions
sj_df = sj_df.merge(nov_df, how='left', on='sj_id')
sj_df = sj_df[['sj_id', 'novelty']]
sj_df.drop_duplicates(inplace=True)
# groupby and count # of Illumina-supported SJs per SJ novelty category
sj_df = sj_df.groupby('novelty').count()
sj_df.reset_index(inplace=True)
sj_df.rename({'sj_id':'sj_count'}, axis=1, inplace=True)
# plot plot
plt.figure(figsize=(8.5,8.5))
plt.rc('font', size=14)
# sns.set(font_scale=1.50)
c_dict, order = get_talon_nov_colors(['Known', 'NIC', 'NNC'])
ax = sns.barplot(data=sj_df, x='novelty', y='sj_count', palette=c_dict, hue_order=order, saturation=1)
ax.set_ylabel('Number of SJs unsupported by Illumina')
ax.set_xlabel('SJ Novelty')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
add_n(ax, sj_df, 'sj_count')
fname = '{}_ill_unsupported_sj_novelty.pdf'.format(oprefix)
plt.savefig(fname)
```
## PacBio
```
def add_n(ax, data, feature):
total = data[feature].sum()
ylim = ax.get_ylim()[1]
for p in ax.patches:
percentage = '{:,.0f}'.format(p.get_height())
x_scale = 0.03*len(percentage)
x = p.get_x() + p.get_width() / 2 - x_scale
y = p.get_y() + p.get_height() + ylim*0.01
ax.annotate(percentage, (x, y), size = 15)
sj_file = 'pb_GM12878_sj_tid_support.tsv'
ab_file = 'pb_ont_talon_abundance_filtered.tsv'
compute_prop_support(sj_file, ab_file, 'figures/PB_GM12878')
sj_file = 'pb_GM12878_sj_tid_support.tsv'
sj_nov_file = 'pb_talon_GM12878_sjs_novelty.tab'
plot_unsupp_sjs_by_nov(sj_file, sj_nov_file, 'figures/PB_GM12878')
```
## ONT
```
sj_file = 'ont_GM12878_sj_tid_support.tsv'
ab_file = 'pb_ont_talon_abundance_filtered.tsv'
compute_prop_support(sj_file, ab_file, 'figures/ONT_GM12878')
sj_file = 'ont_GM12878_sj_tid_support.tsv'
sj_nov_file = 'ont_talon_GM12878_sjs_novelty.tab'
plot_unsupp_sjs_by_nov(sj_file, sj_nov_file, 'figures/ONT_GM12878')
sj_support = 'pb_GM12878_sj_tid_support.tsv'
ab = 'pb_ont_talon_abundance_filtered.tsv'
sj_nov = 'pb_talon_GM12878_sjs_novelty.tab'
nov = comput_suport(sj_support, ab, sj_nov)
plot_plot(nov, 'pb_GM12878')
sj_support = 'ont_GM12878_sj_tid_support.tsv'
ab = 'pb_ont_talon_abundance_filtered.tsv'
sj_nov = 'ont_talon_GM12878_sjs_novelty.tab'
nov = comput_suport(sj_support, ab, sj_nov)
plot_plot(nov, 'ont_GM12878')
def comput_suport(sj_support, ab, sj_nov):
df = pd.read_csv(sj_support, sep='\t')
ab = pd.read_csv(ab, sep='\t')
ab = ab[['annot_transcript_id', 'transcript_novelty']]
df = df.merge(ab, how='left', left_on='tid', right_on='annot_transcript_id')
support_df = df.loc[~df.tid.duplicated()]
print(len(support_df.index))
support_df = support_df[['tid', 'transcript_novelty']].groupby('transcript_novelty').count()
support_df.reset_index(inplace=True)
support_df.rename({'tid':'n_tids'}, axis=1, inplace=True)
# merge with information about sj support
nov_df = pd.read_csv(sj_nov, sep='\t', header=None,
usecols=[0,1,2,3,9], names=['chrom', 'start', 'stop', 'strand', 'sj_novelty'])
nov_df['sj_id'] = nov_df.chrom+'_'+nov_df.start.astype(str)+'_'+nov_df.stop.astype(str)+'_'+nov_df.strand.astype(str)
nov_df = nov_df[['sj_id', 'sj_novelty']]
# merge with novelty of splice junctions
df = df.merge(nov_df, how='left', on='sj_id')
# count sjs per transcript
sjs_per_t = df[['tid', 'transcript_novelty', 'sj_id']].groupby(['tid', 'transcript_novelty']).count()
sjs_per_t.reset_index(inplace=True)
sjs_per_t.rename({'sj_id':'total_sjs'}, axis=1, inplace=True)
# what novelty are the remaining ones wrt gencode splice junctions?
temp = df[['tid', 'transcript_novelty', 'illumina_support', 'sj_id']].groupby(['tid', 'transcript_novelty', 'illumina_support']).count()
temp.reset_index(inplace=True)
temp.rename({'sj_id':'n_sjs_illumina'}, axis=1, inplace=True)
temp = temp.loc[temp.illumina_support == True]
# merge in with sjs per t
sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_illumina']], how='left', on='tid')
# count the number of sjs per transcript that are supported by illumina or gencode
df['ill_or_known'] = (df.illumina_support==True)|(df.sj_novelty=='Known')
temp = df[['tid', 'ill_or_known', 'sj_id']].groupby(['tid', 'ill_or_known']).count()
temp.reset_index(inplace=True)
temp.rename({'sj_id':'n_sjs_ill_known'}, axis=1, inplace=True)
temp = temp.loc[temp.ill_or_known == True]
# merge in with sjs per t
sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known']], how='left', on='tid')
# count the number of sjs/sss per transcript that are supported by illumina or gencode
df['ill_known_nic'] = (df.illumina_support==True)|(df.sj_novelty.isin(['NIC', 'Known']))
temp = df[['tid', 'ill_known_nic', 'sj_id']].groupby(['tid', 'ill_known_nic']).count()
temp.reset_index(inplace=True)
temp.rename({'sj_id':'n_sjs_ill_known_nic'}, axis=1, inplace=True)
temp = temp.loc[temp.ill_known_nic == True]
# merge in with sjs per t
sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known_nic']], how='left', on='tid')
# fill nans with 0
sjs_per_t.fillna(0, inplace=True)
sjs_per_t['total_percent'] = 100
# sjs_per_t['ill_percent'] = (sjs_per_t.n_sjs_illumina/sjs_per_t.total_sjs)*100
# sjs_per_t['ill_known_percent'] = (sjs_per_t.n_sjs_ill_known/sjs_per_t.total_sjs)*100
# sjs_per_t['ill_known_nic_percent'] = (sjs_per_t.n_sjs_ill_known_nic/sjs_per_t.total_sjs)*100
sjs_per_t['full_ill_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_illumina
sjs_per_t['full_ill_known_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known
sjs_per_t['full_ill_known_nic_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known_nic
sjs_per_t.head()
nov = sjs_per_t[['tid', 'transcript_novelty']].groupby('transcript_novelty').count()
nov.reset_index(inplace=True)
nov.rename({'tid': 'n_transcripts'},axis=1, inplace=True)
temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_support']].groupby(['transcript_novelty', 'full_ill_support']).count()
temp.reset_index(inplace=True)
temp = temp.loc[temp.full_ill_support == True]
temp.rename({'tid': 'ill_support'}, axis=1, inplace=True)
temp = temp[['transcript_novelty', 'ill_support']]
nov = nov.merge(temp, how='left', on='transcript_novelty')
temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_support']].groupby(['transcript_novelty', 'full_ill_known_support']).count()
temp.reset_index(inplace=True)
temp = temp.loc[temp.full_ill_known_support == True]
temp.rename({'tid': 'ill_known_support'}, axis=1, inplace=True)
temp = temp[['transcript_novelty', 'ill_known_support']]
nov = nov.merge(temp, how='left', on='transcript_novelty')
temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_nic_support']].groupby(['transcript_novelty', 'full_ill_known_nic_support']).count()
temp.reset_index(inplace=True)
temp = temp.loc[temp.full_ill_known_nic_support == True]
temp.rename({'tid': 'ill_known_nic_support'}, axis=1, inplace=True)
temp = temp[['transcript_novelty', 'ill_known_nic_support']]
nov = nov.merge(temp, how='left', on='transcript_novelty')
cols = ['ill', 'ill_known', 'ill_known_nic']
for c in cols:
nov['{}_perc'.format(c)] = (nov['{}_support'.format(c)]/nov['n_transcripts'])*100
nov['total_percent'] =100
return nov
df = pd.read_csv('pb_GM12878_sj_tid_support.tsv', sep='\t')
ab = pd.read_csv('pb_ont_talon_abundance_filtered.tsv', sep='\t')
ab = ab[['annot_transcript_id', 'transcript_novelty']]
df = df.merge(ab, how='left', left_on='tid', right_on='annot_transcript_id')
support_df = df.loc[~df.tid.duplicated()]
print(len(support_df.index))
support_df = support_df[['tid', 'transcript_novelty']].groupby('transcript_novelty').count()
support_df.reset_index(inplace=True)
support_df.rename({'tid':'n_tids'}, axis=1, inplace=True)
# merge with information about sj support
nov_df = pd.read_csv('pb_talon_GM12878_sjs_novelty.tab', sep='\t', header=None,
usecols=[0,1,2,3,9], names=['chrom', 'start', 'stop', 'strand', 'sj_novelty'])
nov_df['sj_id'] = nov_df.chrom+'_'+nov_df.start.astype(str)+'_'+nov_df.stop.astype(str)+'_'+nov_df.strand.astype(str)
nov_df = nov_df[['sj_id', 'sj_novelty']]
# merge with novelty of splice junctions
df = df.merge(nov_df, how='left', on='sj_id')
# count sjs per transcript
sjs_per_t = df[['tid', 'transcript_novelty', 'sj_id']].groupby(['tid', 'transcript_novelty']).count()
sjs_per_t.reset_index(inplace=True)
sjs_per_t.rename({'sj_id':'total_sjs'}, axis=1, inplace=True)
print(len(sjs_per_t.index))
# what novelty are the remaining ones wrt gencode splice junctions?
temp = df[['tid', 'transcript_novelty', 'illumina_support', 'sj_id']].groupby(['tid', 'transcript_novelty', 'illumina_support']).count()
temp.reset_index(inplace=True)
temp.rename({'sj_id':'n_sjs_illumina'}, axis=1, inplace=True)
temp = temp.loc[temp.illumina_support == True]
# merge in with sjs per t
sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_illumina']], how='left', on='tid')
sjs_per_t.head()
# count the number of sjs per transcript that are supported by illumina or gencode
df['ill_or_known'] = (df.illumina_support==True)|(df.sj_novelty=='Known')
temp = df[['tid', 'ill_or_known', 'sj_id']].groupby(['tid', 'ill_or_known']).count()
temp.reset_index(inplace=True)
temp.rename({'sj_id':'n_sjs_ill_known'}, axis=1, inplace=True)
temp = temp.loc[temp.ill_or_known == True]
# merge in with sjs per t
sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known']], how='left', on='tid')
# count the number of sjs/sss per transcript that are supported by illumina or gencode
df['ill_known_nic'] = (df.illumina_support==True)|(df.sj_novelty.isin(['NIC', 'Known']))
temp = df[['tid', 'ill_known_nic', 'sj_id']].groupby(['tid', 'ill_known_nic']).count()
temp.reset_index(inplace=True)
temp.rename({'sj_id':'n_sjs_ill_known_nic'}, axis=1, inplace=True)
temp = temp.loc[temp.ill_known_nic == True]
# merge in with sjs per t
sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known_nic']], how='left', on='tid')
# fill nans with 0
sjs_per_t.fillna(0, inplace=True)
sjs_per_t['total_percent'] = 100
# sjs_per_t['ill_percent'] = (sjs_per_t.n_sjs_illumina/sjs_per_t.total_sjs)*100
# sjs_per_t['ill_known_percent'] = (sjs_per_t.n_sjs_ill_known/sjs_per_t.total_sjs)*100
# sjs_per_t['ill_known_nic_percent'] = (sjs_per_t.n_sjs_ill_known_nic/sjs_per_t.total_sjs)*100
sjs_per_t['full_ill_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_illumina
sjs_per_t['full_ill_known_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known
sjs_per_t['full_ill_known_nic_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known_nic
sjs_per_t.head()
nov = sjs_per_t[['tid', 'transcript_novelty']].groupby('transcript_novelty').count()
nov.reset_index(inplace=True)
nov.rename({'tid': 'n_transcripts'},axis=1, inplace=True)
nov.head()
temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_support']].groupby(['transcript_novelty', 'full_ill_support']).count()
temp.reset_index(inplace=True)
temp = temp.loc[temp.full_ill_support == True]
temp.rename({'tid': 'ill_support'}, axis=1, inplace=True)
temp = temp[['transcript_novelty', 'ill_support']]
nov = nov.merge(temp, how='left', on='transcript_novelty')
temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_support']].groupby(['transcript_novelty', 'full_ill_known_support']).count()
temp.reset_index(inplace=True)
temp = temp.loc[temp.full_ill_known_support == True]
temp.rename({'tid': 'ill_known_support'}, axis=1, inplace=True)
temp = temp[['transcript_novelty', 'ill_known_support']]
nov = nov.merge(temp, how='left', on='transcript_novelty')
temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_nic_support']].groupby(['transcript_novelty', 'full_ill_known_nic_support']).count()
temp.reset_index(inplace=True)
temp = temp.loc[temp.full_ill_known_nic_support == True]
temp.rename({'tid': 'ill_known_nic_support'}, axis=1, inplace=True)
temp = temp[['transcript_novelty', 'ill_known_nic_support']]
nov = nov.merge(temp, how='left', on='transcript_novelty')
nov
cols = ['ill', 'ill_known', 'ill_known_nic']
for c in cols:
nov['{}_perc'.format(c)] = (nov['{}_support'.format(c)]/nov['n_transcripts'])*100
nov['total_percent'] =100
nov
def plot_plot(nov, opref):
c_dict, order = get_talon_nov_colors(['Known', 'ISM', 'NIC', 'NNC', 'Intergenic', 'Antisense'])
c_dict_40, order = get_talon_nov_colors(['Known', 'ISM', 'NIC', 'NNC', 'Intergenic', 'Antisense'], how='light_40')
c_dict_20, order = get_talon_nov_colors(['Known', 'ISM', 'NIC', 'NNC', 'Intergenic', 'Antisense'], how='light_20')
# plotting
plt.figure(figsize=(8.5,8.5))
sns.set(font_scale=1.5, style="whitegrid")
# font sizes
plt.rc('font', size=14)
top_plot = sns.barplot(x='transcript_novelty', y='total_percent', data=nov,
color='white', order=order, edgecolor='black')
p2 = sns.barplot(x='transcript_novelty', y='ill_known_nic_perc',
data=nov, palette=c_dict_40, order=order,
edgecolor='black', saturation=1)
p2 = sns.barplot(x='transcript_novelty', y='ill_known_perc',
data=nov, palette=c_dict_20, saturation=1, order=order,
edgecolor='black')
bottom_plot = sns.barplot(x='transcript_novelty', y='ill_perc',
data=nov, palette=c_dict, saturation=1, order=order,
edgecolor='black')
topbar = plt.Rectangle((0,0),1,1,fc='white', edgecolor='black')
bottombar = plt.Rectangle((0,0),1,1,fc='#0000A3', edgecolor='black')
# plt.title('{} SJ Support by Isoform Novelty'.format(args.sample_name))
plt.xlabel('')
bottom_plot.set_ylabel("Percent of Isoforms with 100% SJ Support")
for ntype, p in zip(order, bottom_plot.patches):
height = p.get_height()
bottom_plot.text(p.get_x()+p.get_width()/2.,
height + .3,
'n={}'.format(nov.loc[nov['transcript_novelty']==ntype]['n_transcripts'].values[0]),
ha="center")
# bottom_plot.set_xticklabels(bottom_plot.get_xticklabels(), fontsize=14) # fontsize of the x and y labels
fname = '{}_sj_support_isoform.pdf'.format(opref)
plt.savefig(fname)
plot_plot(nov, 'PB_GM12878')
temp.loc[temp.tid == 'ENCODEHT000217262']
df.loc[df.tid == 'ENCODEHT000209674']
sjs_per_t.head()
# wtf is ISM doing here
sjs_per_t.loc[sjs_per_t.n_sjs_ill_known != sjs_per_t.n_sjs_ill_known_nic].transcript_novelty.unique()
sjs_per_t.loc[sjs_per_t.tid == 'ENCODEHT000274449']
df.loc[df.tid == 'ENCODEHT000274449']
sjs_per_t.loc[(sjs_per_t.n_sjs_ill_known != sjs_per_t.n_sjs_ill_known_nic)&(sjs_per_t.transcript_novelty == 'ISM')]
df.loc[df.tid == 'ENCODEHT000230837']
sjs_per_t.loc[sjs_per_t.n_sjs_illumina != sjs_per_t.n_sjs_ill_known]
sjs_per_t.loc[sjs_per_t.tid == 'ENCODEHT000274449']
```
| github_jupyter |
```
import seaborn as sns
import scipy as sp
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from collections import Counter
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.inspection import permutation_importance
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
wines = pd.read_csv('winequality-white.csv', sep=';')
wines.describe()
#Reducing the number of quality classes from 6 to 2 to make classification easier
wines_copy = wines.copy()
conditions = [
wines_copy["quality"]>=6,
(wines_copy["quality"]<=5)
]
values = [1,0]
wines_copy["simple_quality"] = np.select(conditions, values)
wines_copy.drop("quality", axis = 1, inplace = True)
#Balancing Classes
X = wines_copy.drop("simple_quality", axis = 1)
y = wines_copy["simple_quality"]
ros = RandomOverSampler(random_state= 42)
X_res, y_res = ros.fit_resample(X, y)
X_new = pd.concat([X_res,y_res],axis = 1)
#SVM
X_train, X_test, y_train, y_test = train_test_split(X_res, y_res, test_size=0.33)
# svm = SVC(kernel='linear')
pipe = Pipeline([('scaler',StandardScaler()),('SVM',SVC(kernel='linear'))])
pipe.fit(X_train, y_train)
pipe.predict(X_test)
print('Accuracy Score : ')
print(accuracy_score(y_test,pipe.predict(X_test)))
print('*' * 120)
print('Confusion Matrix : ')
print(confusion_matrix(y_test,pipe.predict(X_test)))
print('*' * 120)
print('Classification Report : ')
print('*' * 120)
sns.heatmap(confusion_matrix(y_test,pipe.predict(X_test)), annot=True)
#Feature Importance
feature_names = [f"{i}" for i in wines_copy.columns[:-1]]
svm_importances = pd.Series(pipe[1].coef_[0], index=feature_names)
featureImp= []
for feat, importance in zip(X_train.columns, pipe[1].coef_[0]):
temp = [feat, importance*100]
featureImp.append(temp)
fT_df = pd.DataFrame(featureImp, columns = ['Feature', 'Importance'])
print(fT_df.sort_values('Importance', ascending = False))
svm_importances.plot.bar(figsize = (10,5))
plt.title("SVM Feature Importances")
plt.ylabel("Percentage")
plt.axhline(y = 0, linestyle = '-', color = 'black', linewidth ='.5')
plt.show()
#Graph for distribution of wine quality
colors = ["red","pink","cyan","blue","green","purple"]
series = wines["quality"].value_counts()
sq = series.sort_index()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(sq.index,sq.values, color = colors)
ax.set_title("Wine quality")
plt.show()
#Graph of simplified quality
colors1 = ["red","green"]
series = wines_copy["simple_quality"].value_counts()
sq = series.sort_index()
fig = plt.figure()
qua = ["0","1"]
ax = fig.add_axes([0,0,1,1])
ax.bar(qua, sq.values, color = colors1)
ax.set_title("Simplified wine quality")
#Graph of balanced simplified quality
series = X_new["simple_quality"].value_counts()
sq = series.sort_index()
fig = plt.figure()
qua = ["0","1"]
ax = fig.add_axes([0,0,1,1])
ax.bar(qua, sq.values, color = colors1)
ax.set_title("Balanced simplified wine quality")
plt.show()
#Pairplot of the 3 most influencial classes
sns.pairplot(X_new[["alcohol","volatile acidity","sulphates", "simple_quality"]], hue = "simple_quality", palette = colors1, height = 4, markers = ["o","D"])
plt.show()
```
| github_jupyter |
CER001 - Generate a Root CA certificate
=======================================
If a Certificate Authority certificate for the test environmnet has
never been generated, generate one using this notebook.
If a Certificate Authoriy has been generated in another cluster, and you
want to reuse the same CA for multiple clusters, then use CER002/CER003
download and upload the already generated Root CA.
- [CER002 - Download existing Root CA
certificate](../cert-management/cer002-download-existing-root-ca.ipynb)
- [CER003 - Upload existing Root CA
certificate](../cert-management/cer003-upload-existing-root-ca.ipynb)
Consider using one Root CA certificate for all non-production clusters
in each environment, as this reduces the number of Root CA certificates
that need to be uploaded to clients connecting to these clusters.
Steps
-----
### Parameters
```
import getpass
common_name = "SQL Server Big Data Clusters Test CA"
country_name = "US"
state_or_province_name = "Illinois"
locality_name = "Chicago"
organization_name = "Contoso"
organizational_unit_name = "Finance"
email_address = f"{getpass.getuser()}@contoso.com"
days = "825" # Max supported validity period on MacOS 10.15+ 'Catalina' (https://support.apple.com/en-us/HT210176)
test_cert_store_root = "/var/opt/secrets/test-certificates"
```
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
try:
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
j = load_json("cer001-create-root-ca.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
# rules that have 9 elements are the injected (output) rules (the ones we want). Rules
# with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,
# not ../repair/tsg029-nb-name.ipynb)
if len(rule) == 9:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
```
### Get the Kubernetes namespace for the big data cluster
Get the namespace of the Big Data Cluster use the kubectl command line
interface .
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
```
### Create a temporary directory to stage files
```
# Create a temporary directory to hold configuration files
import tempfile
temp_dir = tempfile.mkdtemp()
print(f"Temporary directory created: {temp_dir}")
```
### Helper function to save configuration files to disk
```
# Define helper function 'save_file' to save configuration files to the temporary directory created above
import os
import io
def save_file(filename, contents):
with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file:
text_file.write(contents)
print("File saved: " + os.path.join(temp_dir, filename))
print("Function `save_file` defined successfully.")
```
### Certificate configuration file
```
certificate = f"""
[ ca ]
default_ca = CA_default # The default ca section
[ CA_default ]
default_days = 1000 # How long to certify for
default_crl_days = 30 # How long before next CRL
default_md = sha256 # Use public key default MD
preserve = no # Keep passed DN ordering
x509_extensions = ca_extensions # The extensions to add to the cert
email_in_dn = no # Don't concat the email in the DN
copy_extensions = copy # Required to copy SANs from CSR to cert
[ req ]
default_bits = 2048
default_keyfile = {test_cert_store_root}/cakey.pem
distinguished_name = ca_distinguished_name
x509_extensions = ca_extensions
string_mask = utf8only
[ ca_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_default = {country_name}
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default = {state_or_province_name}
localityName = Locality Name (eg, city)
localityName_default = {locality_name}
organizationName = Organization Name (eg, company)
organizationName_default = {organization_name}
organizationalUnitName = Organizational Unit (eg, division)
organizationalUnitName_default = {organizational_unit_name}
commonName = Common Name (e.g. server FQDN or YOUR name)
commonName_default = {common_name}
emailAddress = Email Address
emailAddress_default = {email_address}
[ ca_extensions ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always, issuer
basicConstraints = critical, CA:true
keyUsage = keyCertSign, cRLSign
"""
save_file("ca.openssl.cnf", certificate)
```
### Get name of the ‘Running’ `controller` `pod`
```
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
```
### Create folder on controller to hold Test Certificates
```
run(f'kubectl exec {controller} -n {namespace} -c controller -- bash -c "mkdir -p {test_cert_store_root}" ')
```
### Copy certificate configuration to `controller` `pod`
```
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp ca.openssl.cnf {controller}:{test_cert_store_root}/ca.openssl.cnf -c controller -n {namespace}')
os.chdir(cwd)
```
### Generate certificate
```
cmd = f"openssl req -x509 -config {test_cert_store_root}/ca.openssl.cnf -newkey rsa:2048 -sha256 -nodes -days {days} -out {test_cert_store_root}/cacert.pem -outform PEM -subj '/C={country_name}/ST={state_or_province_name}/L={locality_name}/O={organization_name}/OU={organizational_unit_name}/CN={common_name}'"
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"')
```
### Clean up temporary directory for staging configuration files
```
# Delete the temporary directory used to hold configuration files
import shutil
shutil.rmtree(temp_dir)
print(f'Temporary directory deleted: {temp_dir}')
print('Notebook execution complete.')
```
Related
-------
- [CER002 - Download existing Root CA
certificate](../cert-management/cer002-download-existing-root-ca.ipynb)
- [CER003 - Upload existing Root CA
certificate](../cert-management/cer003-upload-existing-root-ca.ipynb)
- [CER010 - Install generated Root CA
locally](../cert-management/cer010-install-generated-root-ca-locally.ipynb)
| github_jupyter |
# Feature Layer
Load environment variable from .env file with python-dotenv package
```
import os
from dotenv import load_dotenv # add this line
load_dotenv() # add this line
arcgis_user = os.getenv("ARCGIS_USER")
arcgis_pass = os.getenv("ARCGIS_PASS")
arcgis_portal = os.getenv("ARCGIS_PORTAL")
```
Make GIS object
```
from arcgis.gis import GIS
gis = GIS(arcgis_portal, arcgis_user, arcgis_pass)
gis
```
## Load Published Layer by Id
Load jatim layer item by id
```
jatim_id = '7a84aef70d1d401f82abfb8324066d54'
jatim = gis.content.get(jatim_id)
jatim
```
## Check Available Layers
```
jatim.layers
for lyr in jatim.layers:
print(lyr.properties.name)
```
## Layer Capabilities
What operations can be done for the layer?
By default, a layer only has Query capability. If it doesn't have update?editing? capability, then it can't be edited. You can turn on the edit capability at the layer's settings. Of course, you need the permission to do it from the owner.
```
jatim_layer = jatim.layers[0]
jatim_layer.properties.capabilities
```
## Layer Fields
What fields does it have?
```
jatim_layer.properties.fields
```
## Add Layer to Map
Show jatim in map
```
map = gis.map("Jawa Timur", zoomlevel=8)
map.add_layer(jatim)
map
```
## Querying Layer Features
You can query the layer
```
query_result1 = jatim_layer.query(out_fields='Tanggal', return_geometry=True, result_record_count=1)
len(query_result1)
query_result1
query_result1.sdf
f = query_result1.features[0]
f.geometry
f
list(f.attributes.keys())
```
To access the feature data, you must do a query; it can be empty.
```
jatim_fset = jatim_layer.query()
jatim_fset.sdf
```
## Feature and Its Attributes
Get surabaya feature. You can also query but eh I love list comprehension
```
jatim_fs = jatim_fset.features
surabaya_f = [f for f in jatim_fs if "SURABAYA" in f.attributes["Kabupaten_"]][0]
surabaya_f.attributes
surabaya_fset = jatim_layer.query(where="FID=17", out_fields='FID,ID,Provinsi,Kabupaten_,ODP')
surabaya_fset.sdf
```
let's see the current ODP count of Surabaya then add 1 to it, or set it to 12 if for some reason it's not an int.
```
surabaya_f.attributes["ODP"]
#will this clone it?
surabaya_f_edit = surabaya_f
if isinstance(surabaya_f_edit.attributes["ODP"], int):
surabaya_f_edit.attributes["ODP"] += 1
else:
surabaya_f_edit.attributes["ODP"] = 12
surabaya_f_edit.attributes["ODP"]
#of course it wouldn't.
surabaya_f.attributes["ODP"]
```
## Updating Feature
I'll use the original feature var to emphasize that they're the same.
Use updates parameter to update feature.
Use adds parameter to add new feature.
Use deletes parameter to delete existing feature.
Strangely enough, only deletes use string instead of list. It's the string of a feature's objectid.
```
update_result = jatim_layer.edit_features(updates=[surabaya_f])
update_result
surabaya_fset_edit = jatim_layer.query(where="FID=17", out_fields='FID,ID,Provinsi,Kabupaten_,ODP')
surabaya_fset_edit.sdf
```
It should be reflected in the original jatim_layer object. But is it reflected in the full feature layer?
```
map1 = gis.map("Surabaya", zoomlevel=12)
map1.add_layer(jatim)
map1
```
So it is reflected in the original full feature layer.
Is it reflected in the published feature layer though?
```
jatim_pub = gis.content.get(jatim_id)
map2 = gis.map("Surabaya", zoomlevel=12)
map2.add_layer(jatim_pub)
map2
```
Heck it does. So the changes made are automatically published too.
What if I only want it locally? Can I?
```
surabaya_f.attributes["ODP"]
surabaya_f.attributes["ODP"] += 1
surabaya_f.attributes["ODP"]
surabaya_fset_edit = jatim_layer.query(where="FID=17", out_fields='FID,ID,Provinsi,Kabupaten_,ODP')
surabaya_fset_edit.sdf
```
It doesn't work. So changes must be published.
| github_jupyter |
# New start......
```
%cd /content/PyHelpers
!ls -a
!git add .
!git commit -m 'commit 1 from colabs'
# !cat '/content/PyHelpers/Libs/OptimalPrime.ipynb'
# !git clone https://github.com/bxck75/PyHelpers.git
import os
import subprocess
from IPython.display import clear_output
!python /content/PyHelpers/__main__.py
# clear_output()
# help('__main__')
%%writefile /content/PyHelpers/__main__.py
import os
import subprocess
from Libs import BigHelp
global _ROOT_FOLDER_
global _LIB_
_ROOT_FOLDER_='/content/PyHelpers/'
_LIB_=_ROOT_FOLDER_+'Libs/'
import subprocess
import importlib.util as impylib
# print(len(H.Me(['globx',_LIB_,'*.py'])))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dev', defaults=False, help='foo help')
args = parser.parse_args()
class Stimpy_Imp:
def __init__(self,dev=False):
self.dev = dev
self.cmd_com = self.system_cmd()
# main helper loading
self.RootHelperFile ='BigHelp'
self.H = self.load_lib(self.RootHelperFile)
# module list loading
self.H = self.load_lib('live_list', _ROOT_FOLDER_)
self.H = self.load_lib('experimental_list', _ROOT_FOLDER_)
# Live modules list
self.libs_list = [
'ZipUp',
'RepCoList',
'send_mail'
]
# experimental modules list
self.libs_experimental_list = self.libs_list +[
'GdriveD',
'RamGpu',
'GitAid',
'Fileview',
'custom_functions',
'FiFyFo',
]
self.list_to_load =self.libs_list
if self.dev == True:
self.list_to_load = self.libs_experimental_list
# load the modules into the globals
for lib in self.list_to_load:
globals()[lib] = self.load_lib(str(lib))
def load_lib(self,lib_file,folder=_LIB_+'/'):
'''
1Get the specs from the file
2Make a module outof the specs
3init the module into the globals()[name] (same as import...)
'''
module_path = folder+lib_file+'.py'
print('Module : '+ lib_file +' Loaded!')
if self.dev==True:
print(module_path)
dummy = impylib.spec_from_file_location("module.name",module_path )
Imp = impylib.module_from_spec(dummy)
dummy.loader.exec_module(Imp)
return Imp
def system_cmd(cmd,args,vals):
cmd = ['', '--arg', 'value']
proc_out=[]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in p.stdout:
proc_out.append(line)
# print(line)
p.wait()
print(p.returncode)
print(proc_out)
# #Big Helpers Loading
# from pathlib import Path
# import os, inspect
# try:
# os.system('rm -r /content/sample_data')
# os.system('rm -r /content/ProjectPrimer.py')
# except:
# print('No default garbage to remove')
# # lib_file=Path('/content/lib/Helpers.py')
# # if not lib_file.is_file():
# # os.system('wget https://raw.githubusercontent.com/bxck75/PyHelpers/master/BigHelp.py -O /content/lib/BigHelp.py')
# # os.chdir('/content/')
# # import the biggest help
# from Libs.BigHelp import Helpers
# # installing done......bring in the helpers!
# global H
# H=Helpers()
# # bring in the helpers!
# H.Me(['cml','echo "Pull in the helpers!"'])
# H.prime=['bxck75/PyHelpers']
# H.Me(['inst_reps',H.prime,'/content/lib',True,True])
# H.Me(['cml','echo "All Done!"'])
# # os.chdir('/content/')
# os.system('rm -r /content/lib/BigHelp.py')
# check the new module
Stimpy_Imp()
# print(dir(experimental))
# help(sys.argv[2])
print(SI)
print(experimental_list)
%%writefile /content/PyHelpers/live_list.py
live_mods=[
'ZipUp',
'RepCoList',
'send_mail'
]
%%writefile /content/PyHelpers/experimental_list.py
x_mods=[
'GdriveD',
'RamGpu',
'GitAid',
'Fileview',
'custom_functions',
'FiFyFo',
]
'''Own functions list'''
def get_gdrive_dataset(pack, DS_root='datasets',GD_root='datasets'):
import google
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
H.GD_ROOT=GD_root+'/'
H.DS_ROOT=DS_root+'/'
os.chdir(H.gdrive_root+H.GD_ROOT)
H.Me(['mkd',[DS_root,'models'],H.pix_root])
H.Me(['cml','cp -r '+pack+' '+H.pix_root+DS_root])
os.chdir(H.pix_root+DS_root)
H.Me(['cml','unzip -q '+pack])
H.Me(['cml','rm -r '+pack])
os.chdir(H.pix_root)
def MethHelp(libs):
os_help=H.Me(['vdir',libs])
#make a list containing libs values of os_help
listOfLibs = [x[0] for x in os_help]
#make a list containing libs method values of os_help
listOfMethods= [x[1] for x in os_help]
# Create a zipped list of tuples from above lists
zippedList = list(zip(listOfLibs, listOfMethods[0:5]))
zippedList
# request help on method from list
return zippedList
def loadTboard():
'''load tensorboard'''
import datetime, os
# install tensorboard
# H.Me(['cml','pip install -q tensorflow'])
# Load the TensorBoard notebook extension
try:
%load_ext tensorboard
except:
%reload_ext tensorboard
# !wget https://raw.githubusercontent.com/bxck75/PyHelpers/master/ProjectPrimer.py
# !python ProjectPrimer.py
from lib.PyHelpers.ProjectPrimer import H
from IPython.display import clear_output
from lib import PyHelpers
from lib.PyHelpers import RepCoList,RamGpu,GdriveD,ZipUp
H.Me(['vdir',[ZipUp]])
clear_output()
from lib import PyHelpers
from lib.PyHelpers import RepCoList,RamGpu,GdriveD,ZipUp,BigHelp
H.zip_to_drive = ZipUp.ZipUp
H.Me(['vdir',[H.zip_to_drive]])
sheit_to = H.zip_to_drive('sample_data', # name of zipfile
'/content/drive/My Drive', # folder to push the zip to
'/content/sample_data') # folder to zip
print(sheit_to.ZipUp)
H.Me(['vdir',[H.zip_to_drive]])
# H.Me(['vdir',[BigHelp,GdriveD]])
%cd /content/
import os
os.system('pip install -U -q PyDrive')
from google.colab import files
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import zipfile
import os
import sys
class ZipUp:
def __init__(self, zipname, foldername, target_dir):
''' init the details for the zip and push'''
self.zipname = zipname
self.foldername = foldername
self.target_dir = target_dir
@property
def ZipUp(self):
''' define the zip_n_push property'''
if( self.zipname !='' and self.foldername != '' and self.target_dir != '' ):
self.zipfolder()
self.g_login()
self.make_push()
self.status = self.status + self.get_id()
return self.status
def zipfolder(self):
''' zip the selected folder to the target dir with '''
zipobj = zipfile.ZipFile(self.zipname + '.zip', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(self.target_dir) + 1
for base, dirs, files in os.walk(self.target_dir):
for file in files:
fn = os.path.join(base, file)
zipobj.write(fn, fn[rootlen:])
self.status='zipped '
def g_login(self):
''' Authenticate and create the PyDrive client.'''
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
self.drive = GoogleDrive(gauth)
self.status= self.status+'and '
def make_push(self):
''' Create & upload a file text file.'''
file1 = self.drive.CreateFile({'id':self.get_id()})
file1.SetContentFile(self.zipname+".zip")
file1.Upload()
self.status= self.status+'pushed! (id) '
def get_id(self):
query = "title = '"+self.zipname+".zip'"
file_list = self.drive.ListFile({'q': query}).GetList()
for file in file_list:
if file['labels']['trashed'] ==False:
return file['id']
if __name__ == "__main__":
item_to = ZipUp('sample_data','/content/drive/My Drive','/content/lib/PyHelpers')
print(item_to.ZipUp)
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
def g_login():
''' Authenticate and create the PyDrive client.'''
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
return GoogleDrive(gauth)
drive = g_login()
query = "title = 'sample_data.zip'"
file_list = drive.ListFile({'q': query}).GetList()
for file in file_list:
if file['labels']['trashed'] ==False:
print('-' * 10)
print(file['id'])
# print(file['downloadUrl'])
# print('-' * 10)
# first parent id
parent_id = file['parents'][0]['id']
print(parent_id)
# print('-' * 10)
# x = drive.CreateFile({'id': parent_id})
# x.FetchMetadata()
# print(x)
%cd /content/lib/PyHelpers
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# Install Keras with pip
!pip install -q keras
import keras
# >>> Using TensorFlow backend.
# Install GraphViz with apt
!apt-get install graphviz -y
# Here's the easiest way to do so, IMO, with a little direction from here.
# In a 3 step process, first invoke a file selector within your notebook with this:
from google.colab import files
uploaded = files.upload()
# After your file(s) is/are selected, use the following to iterate the uploaded files in order to find their key names, using:
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn])))
import os
os.chdir('/content/lib')
from lib.ProjectPrimer import H as Tickle
import importlib.util
# print(len(H.Me(['globx','/content/lib/PyHelpers','*.py'])))
# Tickle.Me(['vdir',[importlib]])
# for mod in range(len(H.Me(['globx','/content/lib/PyHelpers','*.py']))):
# print(mod)
# Get the specs from the file
spec = importlib.util.spec_from_file_location("module.name", "/content/lib/PyHelpers/ZipUp.py")
# Make a module outof the specs
ZipDrive = importlib.util.module_from_spec(spec)
# Load the new module(same as import...)
spec.loader.exec_module(ZipDrive)
# check the new module
Tickle.Me(['vdir',[Zip2Drive]])
# # item_to = foo.ZipUp('sample_data','/content/drive/My Drive','/content/lib/PyHelpers')
# # print(item_to.ZipUp)
# # foo.ZipUp()
# help(foo)
# print(foo.__dict__)
# foo.DICT=foo.__dict__
# __builtins__.locals()
H.Me
```
| github_jupyter |
TSG075 - FailedCreatePodSandBox due to NetworkPlugin cni failed to set up pod
=============================================================================
Description
-----------
> Error: Warning FailedCreatePodSandBox 58m kubelet,
> rasha-virtual-machine Failed create pod sandbox: rpc error: code =
> Unknown desc = failed to set up sandbox container
> “b76dc0446642bf06ef91b331be55814795410d58807eeffddf1fe3b5c9c572c0”
> network for pod “mssql-controller-hfvxr”: NetworkPlugin cni failed to
> set up pod “mssql-controller-hfvxr\_test” network: open
> /run/flannel/subnet.env: no such file or directory Normal
> SandboxChanged 34m (x325 over 59m) kubelet, virtual-machine Pod
> sandbox changed, it will be killed and re-created. Warning
> FailedCreatePodSandBox 4m5s (x831 over 58m) kubelet, virtual-machine
> (combined from similar events): Failed create pod sandbox: rpc error:
> code = Unknown desc = failed to set up sandbox container
> “bee7d4eb0a74a4937de687a31676887b0c324e88a528639180a10bdbc33ce008”
> network for pod “mssql-controller-hfvxr”: NetworkPlugin cni failed to
> set up pod “mssql-controller-hfvxr\_test” network: open
> /run/flannel/subnet.env: no such file or directory
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
```
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("tsg075-networkplugin-cni-failed-to-setup-pod.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
```
### Resolution
This issue has been seen on single node kubeadm installations when the
host machine has been rebooted.
To resolve the issue, delete the kube-flannel and coredns pods. The
higher level Kuberenetes objects will re-create these pods.
The following code cells will do this for you:
### Verify there are flannel and coredns pods in this kubernetes cluster
```
run(f"kubectl get pods -n kube-system")
```
### Delete them, so they can be re-created by the higher level Kubernetes objects
```
pod_list = api.list_namespaced_pod("kube-system")
for pod in pod_list.items:
if pod.metadata.name.find("kube-flannel-ds") != -1:
print(f"Deleting pod: {pod.metadata.name}")
run(f"kubectl delete pod/{pod.metadata.name} -n kube-system")
if pod.metadata.name.find("coredns-") != -1:
print(f"Deleting pod: {pod.metadata.name}")
run(f"kubectl delete pod/{pod.metadata.name} -n kube-system")
```
### Verify the flannel and coredns pods have been re-created
```
run(f"kubectl get pods -n kube-system")
print('Notebook execution complete.')
```
| github_jupyter |
# Robot Class
In this project, we'll be localizing a robot in a 2D grid world. The basis for simultaneous localization and mapping (SLAM) is to gather information from a robot's sensors and motions over time, and then use information about measurements and motion to re-construct a map of the world.
### Uncertainty
As you've learned, robot motion and sensors have some uncertainty associated with them. For example, imagine a car driving up hill and down hill; the speedometer reading will likely overestimate the speed of the car going up hill and underestimate the speed of the car going down hill because it cannot perfectly account for gravity. Similarly, we cannot perfectly predict the *motion* of a robot. A robot is likely to slightly overshoot or undershoot a target location.
In this notebook, we'll look at the `robot` class that is *partially* given to you for the upcoming SLAM notebook. First, we'll create a robot and move it around a 2D grid world. Then, **you'll be tasked with defining a `sense` function for this robot that allows it to sense landmarks in a given world**! It's important that you understand how this robot moves, senses, and how it keeps track of different landmarks that it sees in a 2D grid world, so that you can work with it's movement and sensor data.
---
Before we start analyzing robot motion, let's load in our resources and define the `robot` class. You can see that this class initializes the robot's position and adds measures of uncertainty for motion. You'll also see a `sense()` function which is not yet implemented, and you will learn more about that later in this notebook.
```
# import some resources
import numpy as np
import matplotlib.pyplot as plt
import random
%matplotlib inline
# the robot class
class robot:
# --------
# init:
# creates a robot with the specified parameters and initializes
# the location (self.x, self.y) to the center of the world
#
def __init__(self, world_size = 100.0, measurement_range = 30.0,
motion_noise = 1.0, measurement_noise = 1.0):
self.measurement_noise = 0.0
self.world_size = world_size
self.measurement_range = measurement_range
self.x = world_size / 2.0
self.y = world_size / 2.0
self.motion_noise = motion_noise
self.measurement_noise = measurement_noise
self.landmarks = []
self.num_landmarks = 0
# returns a positive, random float
def rand(self):
return random.random() * 2.0 - 1.0
# --------
# move: attempts to move robot by dx, dy. If outside world
# boundary, then the move does nothing and instead returns failure
#
def move(self, dx, dy):
x = self.x + dx + self.rand() * self.motion_noise
y = self.y + dy + self.rand() * self.motion_noise
if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size:
return False
else:
self.x = x
self.y = y
return True
# --------
# sense: returns x- and y- distances to landmarks within visibility range
# because not all landmarks may be in this range, the list of measurements
# is of variable length. Set measurement_range to -1 if you want all
# landmarks to be visible at all times
#
## TODO: complete the sense function
def sense(self):
''' This function does not take in any parameters, instead it references internal variables
(such as self.landamrks) to measure the distance between the robot and any landmarks
that the robot can see (that are within its measurement range).
This function returns a list of landmark indices, and the measured distances (dx, dy)
between the robot's position and said landmarks.
This function should account for measurement_noise and measurement_range.
One item in the returned list should be in the form: [landmark_index, dx, dy].
'''
measurements = []
## TODO: iterate through all of the landmarks in a world
## TODO: For each landmark
## 1. compute dx and dy, the distances between the robot and the landmark
## 2. account for measurement noise by *adding* a noise component to dx and dy
## - The noise component should be a random value between [-1.0, 1.0)*measurement_noise
## - Feel free to use the function self.rand() to help calculate this noise component
## 3. If either of the distances, dx or dy, fall outside of the internal var, measurement_range
## then we cannot record them; if they do fall in the range, then add them to the measurements list
## as list.append([index, dx, dy]), this format is important for data creation done later
for idx, landmark in enumerate(self.landmarks):
dx = landmark[0] - self.x
dy = landmark[1] - self.y
noise = self.rand()*self.measurement_noise
dx += noise
dy += noise
if - self.measurement_range < dx < self.measurement_range and - self.measurement_range < dy <self.measurement_range:
measurements.append([idx,dx,dy])
## TODO: return the final, complete list of measurements
return measurements
# --------
# make_landmarks:
# make random landmarks located in the world
#
def make_landmarks(self, num_landmarks):
self.landmarks = []
for i in range(num_landmarks):
self.landmarks.append([round(random.random() * self.world_size),
round(random.random() * self.world_size)])
self.num_landmarks = num_landmarks
# called when print(robot) is called; prints the robot's location
def __repr__(self):
return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y)
```
## Define a world and a robot
Next, let's instantiate a robot object. As you can see in `__init__` above, the robot class takes in a number of parameters including a world size and some values that indicate the sensing and movement capabilities of the robot.
In the next example, we define a small 10x10 square world, a measurement range that is half that of the world and small values for motion and measurement noise. These values will typically be about 10 times larger, but we ust want to demonstrate this behavior on a small scale. You are also free to change these values and note what happens as your robot moves!
```
world_size = 10.0 # size of world (square)
measurement_range = 5.0 # range at which we can sense landmarks
motion_noise = 0.2 # noise in robot motion
measurement_noise = 0.2 # noise in the measurements
# instantiate a robot, r
r = robot(world_size, measurement_range, motion_noise, measurement_noise)
# print out the location of r
print(r)
```
## Visualizing the World
In the given example, we can see/print out that the robot is in the middle of the 10x10 world at (x, y) = (5.0, 5.0), which is exactly what we expect!
However, it's kind of hard to imagine this robot in the center of a world, without visualizing the grid itself, and so in the next cell we provide a helper visualization function, `display_world`, that will display a grid world in a plot and draw a red `o` at the location of our robot, `r`. The details of how this function wors can be found in the `helpers.py` file in the home directory; you do not have to change anything in this `helpers.py` file.
```
# import helper function
from helpers import display_world
# define figure size
plt.rcParams["figure.figsize"] = (5,5)
# call display_world and display the robot in it's grid world
print(r)
display_world(int(world_size), [r.x, r.y])
```
## Movement
Now you can really picture where the robot is in the world! Next, let's call the robot's `move` function. We'll ask it to move some distance `(dx, dy)` and we'll see that this motion is not perfect by the placement of our robot `o` and by the printed out position of `r`.
Try changing the values of `dx` and `dy` and/or running this cell multiple times; see how the robot moves and how the uncertainty in robot motion accumulates over multiple movements.
#### For a `dx` = 1, does the robot move *exactly* one spot to the right? What about `dx` = -1? What happens if you try to move the robot past the boundaries of the world?
```
# choose values of dx and dy (negative works, too)
dx = 1
dy = 2
r.move(dx, dy)
# print out the exact location
print(r)
# display the world after movement, not that this is the same call as before
# the robot tracks its own movement
display_world(int(world_size), [r.x, r.y])
```
## Landmarks
Next, let's create landmarks, which are measurable features in the map. You can think of landmarks as things like notable buildings, or something smaller such as a tree, rock, or other feature.
The robot class has a function `make_landmarks` which randomly generates locations for the number of specified landmarks. Try changing `num_landmarks` or running this cell multiple times to see where these landmarks appear. We have to pass these locations as a third argument to the `display_world` function and the list of landmark locations is accessed similar to how we find the robot position `r.landmarks`.
Each landmark is displayed as a purple `x` in the grid world, and we also print out the exact `[x, y]` locations of these landmarks at the end of this cell.
```
# create any number of landmarks
num_landmarks = 3
r.make_landmarks(num_landmarks)
# print out our robot's exact location
print(r)
# display the world including these landmarks
display_world(int(world_size), [r.x, r.y], r.landmarks)
# print the locations of the landmarks
print('Landmark locations [x,y]: ', r.landmarks)
```
## Sense
Once we have some landmarks to sense, we need to be able to tell our robot to *try* to sense how far they are away from it. It will be up t you to code the `sense` function in our robot class.
The `sense` function uses only internal class parameters and returns a list of the the measured/sensed x and y distances to the landmarks it senses within the specified `measurement_range`.
### TODO: Implement the `sense` function
Follow the `##TODO's` in the class code above to complete the `sense` function for the robot class. Once you have tested out your code, please **copy your complete `sense` code to the `robot_class.py` file in the home directory**. By placing this complete code in the `robot_class` Python file, we will be able to refernce this class in a later notebook.
The measurements have the format, `[i, dx, dy]` where `i` is the landmark index (0, 1, 2, ...) and `dx` and `dy` are the measured distance between the robot's location (x, y) and the landmark's location (x, y). This distance will not be perfect since our sense function has some associated `measurement noise`.
---
In the example in the following cell, we have a given our robot a range of `5.0` so any landmarks that are within that range of our robot's location, should appear in a list of measurements. Not all landmarks are guaranteed to be in our visibility range, so this list will be variable in length.
*Note: the robot's location is often called the **pose** or `[Pxi, Pyi]` and the landmark locations are often written as `[Lxi, Lyi]`. You'll see this notation in the next notebook.*
```
# try to sense any surrounding landmarks
measurements = r.sense()
# this will print out an empty list if `sense` has not been implemented
print(measurements)
```
**Refer back to the grid map above. Do these measurements make sense to you? Are all the landmarks captured in this list (why/why not)?**
---
## Data
#### Putting it all together
To perform SLAM, we'll collect a series of robot sensor measurements and motions, in that order, over a defined period of time. Then we'll use only this data to re-construct the map of the world with the robot and landmar locations. You can think of SLAM as peforming what we've done in this notebook, only backwards. Instead of defining a world and robot and creating movement and sensor data, it will be up to you to use movement and sensor measurements to reconstruct the world!
In the next notebook, you'll see this list of movements and measurements (which you'll use to re-construct the world) listed in a structure called `data`. This is an array that holds sensor measurements and movements in a specific order, which will be useful to call upon when you have to extract this data and form constraint matrices and vectors.
`data` is constructed over a series of time steps as follows:
```
data = []
# after a robot first senses, then moves (one time step)
# that data is appended like so:
data.append([measurements, [dx, dy]])
# for our example movement and measurement
print(data)
# in this example, we have only created one time step (0)
time_step = 0
# so you can access robot measurements:
print('Measurements: ', data[time_step][0])
# and its motion for a given time step:
print('Motion: ', data[time_step][1])
```
### Final robot class
Before moving on to the last notebook in this series, please make sure that you have copied your final, completed `sense` function into the `robot_class.py` file in the home directory. We will be using this file in the final implementation of slam!
| github_jupyter |
# 12. 직접 만들어보는 OCR
**Text recognition 모델을 구현, 학습하고 Text detection 모델과 연결하여 OCR을 구현한다.**
## 12-1. 들어가며
## 12-2. Overall structure of OCR
## 12-3. Dataset for OCR
```
import os
path = os.path.join(os.getenv('HOME'),'aiffel/ocr')
os.chdir(path)
print(path)
```
## 12-4. Recognition model (1)
```
NUMBERS = "0123456789"
ENG_CHAR_UPPER = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
TARGET_CHARACTERS = ENG_CHAR_UPPER + NUMBERS
print(f"The total number of characters is {len(TARGET_CHARACTERS)}")
import re
import six
import math
import lmdb
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.utils import Sequence
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
BATCH_SIZE = 128
HOME_DIR = os.getenv('HOME')+'/aiffel/ocr'
TRAIN_DATA_PATH = HOME_DIR+'/data/MJ/MJ_train'
VALID_DATA_PATH = HOME_DIR+'/data/MJ/MJ_valid'
TEST_DATA_PATH = HOME_DIR+'/data/MJ/MJ_test'
print(TRAIN_DATA_PATH)
```
## 12-5. Recognition model (2) Input Image
```
from IPython.display import display
# env에 데이터를 불러올게요
# lmdb에서 데이터를 불러올 때 env라는 변수명을 사용하는게 일반적이에요
env = lmdb.open(TRAIN_DATA_PATH,
max_readers=32,
readonly=True,
lock=False,
readahead=False,
meminit=False)
# 불러온 데이터를 txn(transaction)이라는 변수를 통해 엽니다
# 이제 txn변수를 통해 직접 데이터에 접근 할 수 있어요
with env.begin(write=False) as txn:
for index in range(1, 5):
# index를 이용해서 라벨 키와 이미지 키를 만들면
# txn에서 라벨과 이미지를 읽어올 수 있어요
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
img_key = 'image-%09d'.encode() % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
# 이미지는 버퍼를 통해 읽어오기 때문에
# 버퍼에서 이미지로 변환하는 과정이 다시 필요해요
try:
img = Image.open(buf).convert('RGB')
except IOError:
img = Image.new('RGB', (100, 32))
label = '-'
# 원본 이미지 크기를 출력해 봅니다
width, height = img.size
print('original image width:{}, height:{}'.format(width, height))
# 이미지 비율을 유지하면서 높이를 32로 바꿀거에요
# 하지만 너비를 100보다는 작게하고 싶어요
target_width = min(int(width*32/height), 100)
target_img_size = (target_width,32)
print('target_img_size:{}'.format(target_img_size))
img = np.array(img.resize(target_img_size)).transpose(1,0,2)
# 이제 높이가 32로 일정한 이미지와 라벨을 함께 출력할 수 있어요
print('display img shape:{}'.format(img.shape))
print('label:{}'.format(label))
display(Image.fromarray(img.transpose(1,0,2).astype(np.uint8)))
class MJDatasetSequence(Sequence):
# 객체를 초기화 할 때 lmdb를 열어 env에 준비해둡니다
# 또, lmdb에 있는 데이터 수를 미리 파악해둡니다
def __init__(self,
dataset_path,
label_converter,
batch_size=1,
img_size=(100,32),
max_text_len=22,
is_train=False,
character='') :
self.label_converter = label_converter
self.batch_size = batch_size
self.img_size = img_size
self.max_text_len = max_text_len
self.character = character
self.is_train = is_train
self.divide_length = 100
self.env = lmdb.open(dataset_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.num_samples = int(txn.get('num-samples'.encode()))
self.index_list = [index + 1 for index in range(self.num_samples)]
def __len__(self):
return math.ceil(self.num_samples/self.batch_size/self.divide_length)
# index에 해당하는 image와 label을 읽어옵니다
# 위에서 사용한 코드와 매우 유사합니다
# label을 조금 더 다듬는 것이 약간 다릅니다
def _get_img_label(self, index):
with self.env.begin(write=False) as txn:
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
img_key = 'image-%09d'.encode() % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('RGB')
except IOError:
img = Image.new('RGB', self.img_size)
label = '-'
width, height = img.size
target_width = min(int(width*self.img_size[1]/height), self.img_size[0])
target_img_size = (target_width, self.img_size[1])
img = np.array(img.resize(target_img_size)).transpose(1,0,2)
# label을 약간 더 다듬습니다
label = label.upper()
out_of_char = f'[^{self.character}]'
label = re.sub(out_of_char, '', label)
label = label[:self.max_text_len]
return (img, label)
# __getitem__은 약속되어있는 메서드입니다
# 이 부분을 작성하면 slice할 수 있습니다
# 자세히 알고 싶다면 아래 문서를 참고하세요
# https://docs.python.org/3/reference/datamodel.html#object.__getitem__
#
# 1. idx에 해당하는 index_list만큼 데이터를 불러
# 2. image와 label을 불러오고
# 3. 사용하기 좋은 inputs과 outputs형태로 반환합니다
def __getitem__(self, idx):
# 1.
batch_indicies = self.index_list[
idx*self.batch_size:
(idx+1)*self.batch_size
]
input_images = np.zeros([self.batch_size, *self.img_size, 3])
labels = np.zeros([self.batch_size, self.max_text_len], dtype='int64')
input_length = np.ones([self.batch_size], dtype='int64') * self.max_text_len
label_length = np.ones([self.batch_size], dtype='int64')
# 2.
for i, index in enumerate(batch_indicies):
img, label = self._get_img_label(index)
encoded_label = self.label_converter.encode(label)
# 인코딩 과정에서 '-'이 추가되면 max_text_len보다 길어질 수 있어요
if len(encoded_label) > self.max_text_len:
continue
width = img.shape[0]
input_images[i,:width,:,:] = img
labels[i,0:len(encoded_label)] = encoded_label
label_length[i] = len(encoded_label)
# 3.
inputs = {
'input_image': input_images,
'label': labels,
'input_length': input_length,
'label_length': label_length,
}
outputs = {'ctc': np.zeros([self.batch_size, 1])}
return inputs, outputs
print("슝~")
```
## 12-6. Recognition model (3) Encode
```
class LabelConverter(object):
def __init__(self, character):
self.character = "-" + character
self.label_map = dict()
for i, char in enumerate(self.character):
self.label_map[char] = i
def encode(self, text):
encoded_label = []
# [[YOUR CODE]]
for i, char in enumerate(text):
if i > 0 and char == text[i - 1]:
encoded_label.append(0) # 같은 문자 사이에 공백 문자 label을 삽입
encoded_label.append(self.label_map[char])
return np.array(encoded_label)
return np.array(encoded_label)
def decode(self, encoded_label):
target_characters = list(self.character)
decoded_label = ""
for encode in encoded_label:
decoded_label += self.character[encode]
return decoded_label
```
```python
# 정답 코드
class LabelConverter(object):
def __init__(self, character):
self.character = "-" + character
self.label_map = dict()
for i, char in enumerate(self.character):
self.label_map[char] = i
def encode(self, text):
encoded_label = []
for i, char in enumerate(text):
if i > 0 and char == text[i - 1]:
encoded_label.append(0) # 같은 문자 사이에 공백 문자 label을 삽입
encoded_label.append(self.label_map[char])
return np.array(encoded_label)
def decode(self, encoded_label):
target_characters = list(self.character)
decoded_label = ""
for encode in encoded_label:
decoded_label += self.character[encode]
return decoded_label
print("슝~")
```
```
label_converter = LabelConverter(TARGET_CHARACTERS)
encdoded_text = label_converter.encode('HELLO')
print("Encdoded_text: ", encdoded_text)
decoded_text = label_converter.decode(encdoded_text)
print("Decoded_text: ", decoded_text)
```
## 12-7. Recognition model (4) Build CRNN model
```
def ctc_lambda_func(args): # CTC loss를 계산하기 위한 Lambda 함수
labels, y_pred, label_length, input_length = args
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
print("슝~")
def build_crnn_model(input_shape=(100,32,3), characters=TARGET_CHARACTERS):
num_chars = len(characters)+2
image_input = layers.Input(shape=input_shape, dtype='float32', name='input_image')
# Build CRNN model
# [[YOUR CODE]]
conv = layers.Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(image_input)
conv = layers.MaxPooling2D(pool_size=(2, 2))(conv)
conv = layers.Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.MaxPooling2D(pool_size=(2, 2))(conv)
conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.MaxPooling2D(pool_size=(1, 2))(conv)
conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.BatchNormalization()(conv)
conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.BatchNormalization()(conv)
conv = layers.MaxPooling2D(pool_size=(1, 2))(conv)
feature = layers.Conv2D(512, (2, 2), activation='relu', kernel_initializer='he_normal')(conv)
sequnce = layers.Reshape(target_shape=(24, 512))(feature)
sequnce = layers.Dense(64, activation='relu')(sequnce)
sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce)
sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce)
y_pred = layers.Dense(num_chars, activation='softmax', name='output')(sequnce)
labels = layers.Input(shape=[22], dtype='int64', name='label')
input_length = layers.Input(shape=[1], dtype='int64', name='input_length')
label_length = layers.Input(shape=[1], dtype='int64', name='label_length')
loss_out = layers.Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")(
[labels, y_pred, label_length, input_length]
)
model_input = [image_input, labels, input_length, label_length]
model = Model(
inputs=model_input,
outputs=loss_out
)
return model
```
```python
# 정답 코드
def build_crnn_model(input_shape=(100,32,3), characters=TARGET_CHARACTERS):
num_chars = len(characters)+2
image_input = layers.Input(shape=input_shape, dtype='float32', name='input_image')
# Build CRNN model
conv = layers.Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(image_input)
conv = layers.MaxPooling2D(pool_size=(2, 2))(conv)
conv = layers.Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.MaxPooling2D(pool_size=(2, 2))(conv)
conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.MaxPooling2D(pool_size=(1, 2))(conv)
conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.BatchNormalization()(conv)
conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv)
conv = layers.BatchNormalization()(conv)
conv = layers.MaxPooling2D(pool_size=(1, 2))(conv)
feature = layers.Conv2D(512, (2, 2), activation='relu', kernel_initializer='he_normal')(conv)
sequnce = layers.Reshape(target_shape=(24, 512))(feature)
sequnce = layers.Dense(64, activation='relu')(sequnce)
sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce)
sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce)
y_pred = layers.Dense(num_chars, activation='softmax', name='output')(sequnce)
labels = layers.Input(shape=[22], dtype='int64', name='label')
input_length = layers.Input(shape=[1], dtype='int64', name='input_length')
label_length = layers.Input(shape=[1], dtype='int64', name='label_length')
loss_out = layers.Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")(
[labels, y_pred, label_length, input_length]
)
model_input = [image_input, labels, input_length, label_length]
model = Model(
inputs=model_input,
outputs=loss_out
)
return model
print("슝~")
```
## 12-8. Recognition model (5) Train & Inference
```
# 데이터셋과 모델을 준비합니다
train_set = MJDatasetSequence(TRAIN_DATA_PATH, label_converter, batch_size=BATCH_SIZE, character=TARGET_CHARACTERS, is_train=True)
val_set = MJDatasetSequence(VALID_DATA_PATH, label_converter, batch_size=BATCH_SIZE, character=TARGET_CHARACTERS)
model = build_crnn_model()
# 모델을 컴파일 합니다
optimizer = tf.keras.optimizers.Adadelta(lr=0.1, clipnorm=5)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer)
# 훈련이 빨리 끝날 수 있도록 ModelCheckPoint와 EarlyStopping을 사용합니다
checkpoint_path = HOME_DIR + '/model_checkpoint.hdf5'
ckp = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, monitor='val_loss',
verbose=1, save_best_only=True, save_weights_only=True
)
earlystop = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='min'
)
model.fit(train_set,
steps_per_epoch=len(train_set),
epochs=1,
validation_data=val_set,
validation_steps=len(val_set),
callbacks=[ckp, earlystop])
# 다음은 학습된 모델의 가중치가 저장된 경로입니다
checkpoint_path = HOME_DIR + '/data/model_checkpoint.hdf5'
# 데이터셋과 모델을 불러옵니다
test_set = MJDatasetSequence(TEST_DATA_PATH, label_converter, batch_size=BATCH_SIZE, character=TARGET_CHARACTERS)
model = build_crnn_model()
model.load_weights(checkpoint_path)
# crnn 모델은 입력이 복잡한 구조이므로 그대로 사용할 수가 없습니다
# 그래서 crnn 모델의 입력중 'input_image' 부분만 사용한 모델을 새로 만들겁니다
# inference 전용 모델이에요
input_data = model.get_layer('input_image').output
y_pred = model.get_layer('output').output
model_pred = Model(inputs=input_data, outputs=y_pred)
from IPython.display import display
# 모델이 inference한 결과를 글자로 바꿔주는 역할을 합니다
# 코드 하나하나를 이해하기는 조금 어려울 수 있습니다
def decode_predict_ctc(out, chars = TARGET_CHARACTERS):
results = []
indexes = K.get_value(
K.ctc_decode(
out, input_length=np.ones(out.shape[0]) * out.shape[1],
greedy=False , beam_width=5, top_paths=1
)[0][0]
)[0]
text = ""
for index in indexes:
text += chars[index]
results.append(text)
return results
# 모델과 데이터셋이 주어지면 inference를 수행합니다
# index개 만큼의 데이터를 읽어 모델로 inference를 수행하고
# 결과를 디코딩해 출력해줍니다
def check_inference(model, dataset, index = 5):
for i in range(index):
inputs, outputs = dataset[i]
img = dataset[i][0]['input_image'][0:1,:,:,:]
output = model.predict(img)
result = decode_predict_ctc(output, chars="-"+TARGET_CHARACTERS)[0].replace('-','')
print("Result: \t", result)
display(Image.fromarray(img[0].transpose(1,0,2).astype(np.uint8)))
check_inference(model_pred, test_set, index=10)
```
## 12-9. 프로젝트: End-to-End OCR
```
import tensorflow as tf
import numpy as np
import PIL
import cv2
import keras_ocr
print(tf.__version__)
print(np.__version__)
print(PIL.__version__)
print(cv2.__version__)
print(keras_ocr.__version__)
from keras_ocr.detection import Detector
SAMPLE_IMG_PATH = HOME_DIR + '/data/sample.jpg'
detector = Detector()
def detect_text(img_path):
# TODO
# 배치 크기를 위해서 dimension을 확장해주고 kera-ocr의 입력 차원에 맞게 H,W,C로 변경합니다.
# 배치의 첫 번째 결과만 가져옵니다.
# 시각화를 위해서 x와 y좌표를 변경해줍니다. (앞선 h dimension으로 인해 y,x로 표기됨)
cropped_imgs = []
for text_result in ocr_result:
img_draw.polygon(text_result, outline='red')
x_min = text_result[:,0].min() - 5
x_max = text_result[:,0].max() + 5
y_min = text_result[:,1].min() - 5
y_max = text_result[:,1].max() + 5
word_box = [x_min, y_min, x_max, y_max]
cropped_imgs.append(img_pil.crop(word_box))
return result_img, cropped_imgs
img_pil, cropped_img = detect_text(SAMPLE_IMG_PATH)
display(img_pil)
def recognize_img(pil_img, input_img_size=(100,32)):
# TODO: 잘려진 단어 이미지를 인식하는 코드를 작성하세요!
for _img in cropped_img:
recognize_img(_img)
```
>## **루브릭**
>
>|번호|평가문항|상세기준|
>|:---:|---|---|
>|1|Text recognition을 위해 특화된 데이터셋 구성이 체계적으로 진행되었다.|텍스트 이미지 리사이징, ctc loss 측정을 위한 라벨 인코딩, 배치처리 등이 적절히 수행되었다.|
>|2|CRNN 기반의 recognition 모델의 학습이 정상적으로 진행되었다.|학습결과 loss가 안정적으로 감소하고 대부분의 문자인식 추론 결과가 정확하다.|
>|3|keras-ocr detector와 CRNN recognizer를 엮어 원본 이미지 입력으로부터 text가 출력되는 OCR이 End-to-End로 구성되었다.|샘플 이미지를 원본으로 받아 OCR 수행 결과를 리턴하는 1개의 함수가 만들어졌다.|
| github_jupyter |
# Continuous training with TFX and Google Cloud AI Platform
## Learning Objectives
1. Use the TFX CLI to build a TFX pipeline.
2. Deploy a TFX pipeline version with tuning enabled to a hosted AI Platform Pipelines instance.
3. Create and monitor a TFX pipeline run using the TFX CLI and KFP UI.
In this lab, you use utilize the following tools and services to deploy and run a TFX pipeline on Google Cloud that automates the development and deployment of a TensorFlow 2.3 WideDeep Classifer to predict forest cover from cartographic data:
* The [**TFX CLI**](https://www.tensorflow.org/tfx/guide/cli) utility to build and deploy a TFX pipeline.
* A hosted [**AI Platform Pipeline instance (Kubeflow Pipelines)**](https://www.tensorflow.org/tfx/guide/kubeflow) for TFX pipeline orchestration.
* [**Dataflow**](https://cloud.google.com/dataflow) jobs for scalable, distributed data processing for TFX components.
* A [**AI Platform Training**](https://cloud.google.com/ai-platform/) job for model training and flock management of tuning trials.
* [**AI Platform Prediction**](https://cloud.google.com/ai-platform/), a model server destination for blessed pipeline model versions.
* [**CloudTuner**](https://www.tensorflow.org/tfx/guide/tuner#tuning_on_google_cloud_platform_gcp) (KerasTuner implementation) and [**AI Platform Vizier**](https://cloud.google.com/ai-platform/optimizer/docs/overview) for advanced model hyperparameter tuning using the Vizier algorithm.
You will then create and monitor pipeline runs using the TFX CLI as well as the KFP UI.
### Setup
#### Update lab environment PATH to include TFX CLI and skaffold
```
import yaml
# Set `PATH` to include the directory containing TFX CLI and skaffold.
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
```
#### Validate lab package version installation
```
!python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))"
!python -c "import kfp; print('KFP version: {}'.format(kfp.__version__))"
```
**Note**: this lab was built and tested with the following package versions:
`TFX version: 0.25.0`
`KFP version: 1.0.4`
(Optional) If running the above command results in different package versions or you receive an import error, upgrade to the correct versions by running the cell below:
```
%pip install --upgrade --user tfx==0.25.0
%pip install --upgrade --user kfp==1.0.4
```
Note: you may need to restart the kernel to pick up the correct package versions.
#### Validate creation of AI Platform Pipelines cluster
Navigate to [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console.
Note you may have already deployed an AI Pipelines instance during the Setup for the lab series. If so, you can proceed using that instance. If not:
**1. Create or select an existing Kubernetes cluster (GKE) and deploy AI Platform**. Make sure to select `"Allow access to the following Cloud APIs https://www.googleapis.com/auth/cloud-platform"` to allow for programmatic access to your pipeline by the Kubeflow SDK for the rest of the lab. Also, provide an `App instance name` such as "tfx" or "mlops".
Validate the deployment of your AI Platform Pipelines instance in the console before proceeding.
## Review: example TFX pipeline design pattern for Google Cloud
The pipeline source code can be found in the `pipeline` folder.
```
%cd pipeline
!ls -la
```
The `config.py` module configures the default values for the environment specific settings and the default values for the pipeline runtime parameters.
The default values can be overwritten at compile time by providing the updated values in a set of environment variables. You will set custom environment variables later on this lab.
The `pipeline.py` module contains the TFX DSL defining the workflow implemented by the pipeline.
The `preprocessing.py` module implements the data preprocessing logic the `Transform` component.
The `model.py` module implements the training, tuning, and model building logic for the `Trainer` and `Tuner` components.
The `runner.py` module configures and executes `KubeflowDagRunner`. At compile time, the `KubeflowDagRunner.run()` method converts the TFX DSL into the pipeline package in the [argo](https://argoproj.github.io/argo/) format for execution on your hosted AI Platform Pipelines instance.
The `features.py` module contains feature definitions common across `preprocessing.py` and `model.py`.
## Exercise: build your pipeline with the TFX CLI
You will use TFX CLI to compile and deploy the pipeline. As explained in the previous section, the environment specific settings can be provided through a set of environment variables and embedded into the pipeline package at compile time.
### Configure your environment resource settings
Update the below constants with the settings reflecting your lab environment.
- `GCP_REGION` - the compute region for AI Platform Training, Vizier, and Prediction.
- `ARTIFACT_STORE` - An existing GCS bucket. You can use any bucket or use the GCS bucket created during installation of AI Platform Pipelines. The default bucket name will contain the `kubeflowpipelines-` prefix.
```
# Use the following command to identify the GCS bucket for metadata and pipeline storage.
!gsutil ls
```
* `CUSTOM_SERVICE_ACCOUNT` - In the gcp console Click on the Navigation Menu. Navigate to `IAM & Admin`, then to `Service Accounts` and use the service account starting with prefix - `'tfx-tuner-caip-service-account'`. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please refer back to the lab `README` for setup instructions.
- `ENDPOINT` - set the `ENDPOINT` constant to the endpoint to your AI Platform Pipelines instance. The endpoint to the AI Platform Pipelines instance can be found on the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console. Open the *SETTINGS* for your instance and use the value of the `host` variable in the *Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD* section of the *SETTINGS* window. The format is `'...pipelines.googleusercontent.com'`.
```
#TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.
GCP_REGION = 'us-central1'
ARTIFACT_STORE_URI = 'gs://dougkelly-sandbox-kubeflowpipelines-default'
ENDPOINT = '70811b42aef62be3-dot-us-central2.pipelines.googleusercontent.com'
CUSTOM_SERVICE_ACCOUNT = 'tfx-tuner-caip-service-account@dougkelly-sandbox.iam.gserviceaccount.com'
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
# Set your resource settings as Python environment variables. These override the default values in pipeline/config.py.
%env GCP_REGION={GCP_REGION}
%env ARTIFACT_STORE_URI={ARTIFACT_STORE_URI}
%env CUSTOM_SERVICE_ACCOUNT={CUSTOM_SERVICE_ACCOUNT}
%env PROJECT_ID={PROJECT_ID}
```
### Create a pipeline version with hyperparameter tuning
Incorporating automatic model hyperparameter tuning into a continuous training TFX pipeline workflow enables faster experimentation, development, and deployment of a top performing model.
Default hyperparameter values in the search space are defined in `_get_hyperparameters()` in `model.py` and used these values to build a TensorFlow WideDeep Classifier model.
Let's deploy a pipeline version with the `Tuner` component added to the pipeline that calls out to the AI Platform Vizier service for hyperparameter tuning. The `Tuner` component `"best_hyperparameters"` artifact will be passed directly to your `Trainer` component to deploy the top performing model. Review `pipeline.py` to see how this environment variable changes the pipeline topology. Also, review the tuning function in `model.py` for configuring `CloudTuner`.
Note that you might not want to tune the hyperparameters every time you retrain your model due to the computational cost and diminishing performance returns. Once you have used `Tuner` determine a good set of hyperparameters, you can remove `Tuner` from your pipeline and use model hyperparameters defined in your model code or use a `ImporterNode` to import the `Tuner` `"best_hyperparameters"`artifact from a previous `Tuner` run to your model `Trainer`.
### Set the compile time settings
Default pipeline runtime environment values are configured in the pipeline folder `config.py`. You will set their values directly below:
* `PIPELINE_NAME` - the pipeline's globally unique name. For each subsequent pipeline update, each pipeline version uploaded to KFP will be reflected on the `Pipelines` tab in the `Pipeline name > Version name` dropdown in the format `PIPELINE_NAME_datetime.now()`.
* `MODEL_NAME` - the pipeline's unique model output name for AI Platform Prediction. For multiple pipeline runs, each pushed blessed model will create a new version with the format `'v{}'.format(int(time.time()))`.
* `DATA_ROOT_URI` - the URI for the raw lab dataset `gs://workshop-datasets/covertype/small`.
* `CUSTOM_TFX_IMAGE` - the image name of your pipeline container build by skaffold and published by `Cloud Build` to `Cloud Container Registry` in the format `'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME)`.
* `RUNTIME_VERSION` - the TensorFlow runtime version. This lab was built and tested using TensorFlow `2.3`.
* `PYTHON_VERSION` - the Python runtime version. This lab was built and tested using Python `3.7`.
* `USE_KFP_SA` - The pipeline can run using a security context of the GKE default node pool's service account or the service account defined in the `user-gcp-sa` secret of the Kubernetes namespace hosting Kubeflow Pipelines. If you want to use the `user-gcp-sa` service account you change the value of `USE_KFP_SA` to `True`. Note that the default AI Platform Pipelines configuration does not define the `user-gcp-sa` secret.
* `ENABLE_TUNING` - boolean value indicating whether to add the `Tuner` component to the pipeline or use hyperparameter defaults. See the `model.py` and `pipeline.py` files for details on how this changes the pipeline topology across pipeline versions. You will create pipeline versions without and with tuning enabled in the subsequent lab exercises for comparison.
```
PIPELINE_NAME = 'tfx_covertype_continuous_training'
MODEL_NAME = 'tfx_covertype_classifier'
DATA_ROOT_URI = 'gs://workshop-datasets/covertype/small'
CUSTOM_TFX_IMAGE = 'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME)
RUNTIME_VERSION = '2.3'
PYTHON_VERSION = '3.7'
USE_KFP_SA=False
ENABLE_TUNING=True
%env PIPELINE_NAME={PIPELINE_NAME}
%env MODEL_NAME={MODEL_NAME}
%env DATA_ROOT_URI={DATA_ROOT_URI}
%env KUBEFLOW_TFX_IMAGE={CUSTOM_TFX_IMAGE}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERIONS={PYTHON_VERSION}
%env USE_KFP_SA={USE_KFP_SA}
%env ENABLE_TUNING={ENABLE_TUNING}
```
### Compile your pipeline code
You can build and upload the pipeline to the AI Platform Pipelines instance in one step, using the `tfx pipeline create` command. The `tfx pipeline create` goes through the following steps:
- (Optional) Builds the custom image to that provides a runtime environment for TFX components or uses the latest image of the installed TFX version
- Compiles the pipeline code into a pipeline package
- Uploads the pipeline package via the `ENDPOINT` to the hosted AI Platform instance.
As you debug the pipeline DSL, you may prefer to first use the `tfx pipeline compile` command, which only executes the compilation step. After the DSL compiles successfully you can use `tfx pipeline create` to go through all steps.
```
!tfx pipeline compile --engine kubeflow --pipeline_path runner.py
```
Note: you should see a `{PIPELINE_NAME}.tar.gz` file appear in your current `/pipeline` directory.
## Exercise: deploy your pipeline container to AI Platform Pipelines with TFX CLI
After the pipeline code compiles without any errors you can use the `tfx pipeline create` command to perform the full build and deploy the pipeline. You will deploy your compiled pipeline container hosted on Google Container Registry e.g. `gcr.io/[PROJECT_ID]/[PIPELINE_NAME]` to run on AI Platform Pipelines with the TFX CLI.
```
# TODO: Your code here to use the TFX CLI to deploy your pipeline image to AI Platform Pipelines.
!tfx pipeline create \
--pipeline_path=runner.py \
--endpoint={ENDPOINT} \
--build_target_image={CUSTOM_TFX_IMAGE}
```
**Hint**: review the [TFX CLI documentation](https://www.tensorflow.org/tfx/guide/cli#create) on the "pipeline group" to create your pipeline. You will need to specify the `--pipeline_path` to point at the pipeline DSL and runner defined locally in `runner.py`, `--endpoint`, and `--build_target_image` arguments using the environment variables specified above.
Note: you should see a `build.yaml` file in your pipeline folder created by skaffold. The TFX CLI compile triggers a custom container to be built with skaffold using the instructions in the `Dockerfile`.
If you need to redeploy the pipeline you can first delete the previous version using `tfx pipeline delete` or you can update the pipeline in-place using `tfx pipeline update`.
To delete the pipeline:
`tfx pipeline delete --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}`
To update the pipeline:
`tfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT}`
```
!tfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT}
```
### Exercise: create a pipeline run with the TFX CLI
After the pipeline has been deployed, you can trigger and monitor pipeline runs using TFX CLI.
*Hint*: review the [TFX CLI documentation](https://www.tensorflow.org/tfx/guide/cli#run_group) on the "run group".
```
# TODO: your code here to trigger a pipeline run with the TFX CLI
!tfx run create --pipeline_name={PIPELINE_NAME} --endpoint={ENDPOINT}
```
### Exercise: monitor your pipeline runs with the TFX CLI
To view the status of existing pipeline runs:
```
!tfx run list --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}
```
To retrieve the status of a given run retrieved from the command above:
```
RUN_ID='[YOUR RUN ID]'
!tfx run status --pipeline_name {PIPELINE_NAME} --run_id {RUN_ID} --endpoint {ENDPOINT}
```
### Exercise: monitor your pipeline runs with the Kubeflow Pipelines UI
On the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page, click `OPEN PIPELINES DASHBOARD`. A new browser tab will open. Select the `Pipelines` tab to the left where you see the `PIPELINE_NAME` pipeline you deployed previously.
Click on the most recent pipeline version which will open up a window with a visualization of your TFX pipeline directed graph. Pipeline components are represented as named boxes with direct arrows representing artifact dependencies and the execution order of your ML workflow.
Next, click the `Experiments` tab. You will see your pipeline name under `Experiment name` with an downward arrow that allows you to view all active and previous runs. Click on the pipeline run that you trigger with the step above. You can follow your pipeline's run progress by viewing your pipeline graph get built on the screen and drill into individual components to view artifacts and logs.
### Important
A full pipeline run with tuning enabled will take about 50 minutes to complete. You can view the run's progress using the TFX CLI commands above and in the KFP UI.
Take the time to review the pipeline metadata artifacts created in the GCS artifact repository for each component including data splits, your Tensorflow SavedModel, model evaluation results, etc. as the pipeline executes. In the GCP console, you can also view the Dataflow jobs for pipeline data processing as well as the AI Platform Training jobs for model training and tuning.
## Next Steps
In this lab, you learned how to build and deploy a TFX pipeline with the TFX CLI and then update, build and deploy a new continuous training pipeline with automatic hyperparameter tuning.
As next steps, try leveraging a CI/CD tool like [Cloud Build]() to layer in additional automation during the building and deployment of the pipeline code in this lab.
## License
<font size=-1>Licensed under the Apache License, Version 2.0 (the \"License\");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.</font>
| github_jupyter |
```
import functools
import os
import numpy as np
import time
from absl import app
from absl import flags
from absl import logging
import mnist_model # ADDED
# REMOVED from experimental.mimo import cifar_model # local file import
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import baselines.utils_new as utils # ADDED this!
# from uncertainty_baselines.baselines.cifar import utils
import uncertainty_metrics as um
import numpy as np
import os
import random
from collections import defaultdict
import matplotlib.pyplot as plt
from scipy.stats import entropy
import seaborn as sns
ensemble_size = 1
output_dir="/Users/benna/Desktop/DLA/OOD-detection-using-MIMO/MNIST_NN/M"+str(ensemble_size)+"/"
image_shape=[28,28,1]
width_multiplier=2
num_classes=10
model = mnist_model.wide_resnet(
input_shape=[ensemble_size] +
image_shape,
depth=28,
width_multiplier=width_multiplier,
num_classes=num_classes,
ensemble_size=ensemble_size)
optimizer = tf.keras.optimizers.SGD( momentum=0.9, nesterov=True)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(output_dir)
checkpoint.restore(latest_checkpoint)
def load_not_mnist(im_root,batch_size):
dirs = os.listdir(im_root)
label_dict = defaultdict()
filenames=[]
labels=[]
for idx, dr in enumerate(dirs):
# print(dr)
label_dict[idx] = dr
ims = os.listdir(os.path.join(im_root, dr))
random.shuffle(ims)
for im in (ims):
if im=="RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png" or im== "Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png":
continue
# #
# image_string = tf.io.read_file(os.path.join(im_root, dr, im))
# try:
# tf.io.decode_image(image_string)
# except:
# print(im)
# continue
else:
filenames.append(os.path.join(im_root, dr, im))
labels.append(idx)
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
normalize=True
dtype = tf.float32
image_string = tf.io.read_file(filename)
image = tf.io.decode_image(image_string, dtype=dtype)
# image = tf.image.convert_image_dtype(image_decoded, dtype)
if normalize:
# We use the convention of mean = np.mean(train_images, axis=(0,1,2))
# and std = np.std(train_images, axis=(0,1,2)).
mean = tf.constant([0.1307], dtype=dtype)
std = tf.constant([0.3081], dtype=dtype)
# Previously, std = np.mean(np.std(train_images, axis=(1, 2)), axis=0)
# which gave std = tf.constant([0.2023, 0.1994, 0.2010], dtype=dtype).
# However, we change convention to use the std over the entire training
# set instead.
image = (image - mean) / std
label = tf.cast(label, dtype)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
### LOAD DATASETS
test_datasets={}
test_batch_size=500
N_mnist = 10000
N_NOTmnist = 18710
dataset_builder_class = ub.datasets.MnistDataset
clean_test_dataset_builder = dataset_builder_class(split=tfds.Split.TEST)
clean_test_dataset = clean_test_dataset_builder.load(batch_size=test_batch_size)
test_datasets = {'clean': clean_test_dataset,}
load_c_dataset = utils.load_mnist_c
corruption_types, max_intensity = utils.load_corrupted_test_info("mnist")
for corruption in corruption_types[:]:
for intensity in range(1, max_intensity + 1):
dataset = load_c_dataset(
corruption_name=corruption,
corruption_intensity=intensity,
batch_size=test_batch_size,
use_bfloat16=False)
test_datasets['{0}_{1}'.format(corruption, intensity)] = (
# strategy.experimental_distribute_dataset(dataset))
dataset)
not_minst_root = '/Users/benna/Desktop/DLA/dataset/notMNIST_small'
not_mnist_dataset= load_not_mnist(not_minst_root,test_batch_size)
test_datasets['notMNIST'] = not_mnist_dataset
# acc=tf.keras.metrics.SparseCategoricalAccuracy()
#@tf.function
def test_step(test_iterator, dataset_name):
if dataset_name == 'clean':
images, label = next(test_iterator).values()
else:
images, label = next(test_iterator)
images = tf.tile(tf.expand_dims(images, 1), [1, ensemble_size, 1, 1, 1])
logits = model(images, training=False)
probs = tf.nn.softmax(logits)
# print(probs.shape)
probs = tf.math.reduce_mean(probs, axis=1) # marginalize
# acc.update_state(label, probs)
# print(probs.shape)
entr=entropy(probs,base=10,axis=1)
# print(entr)
# plt.hist(entr, histtype="step", align="left",bins=np.arange(-0.5,2.5,0.5))
return entr
### TEST
ensemble_size = 1
output_dir="/Users/benna/Desktop/DLA/OOD-detection-using-MIMO/MNIST_NN/M"+str(ensemble_size)+"/"
image_shape=[28,28,1]
width_multiplier=2
num_classes=10
model = mnist_model.wide_resnet(
input_shape=[ensemble_size] +
image_shape,
depth=28,
width_multiplier=width_multiplier,
num_classes=num_classes,
ensemble_size=ensemble_size)
optimizer = tf.keras.optimizers.SGD( momentum=0.9, nesterov=True)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(output_dir)
checkpoint.restore(latest_checkpoint)
Entropies=defaultdict()
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
entropy_test=[]
test_iterator = iter(test_dataset)
print('Testing on dataset %s', dataset_name)
if dataset_name == "notMNIST":
N_images = N_NOTmnist
else:
N_images = N_mnist
entropy_test=[]
for _ in range(N_images//test_batch_size):
entropy_test.append(test_step(test_iterator, dataset_name))
entropy_test = np.reshape(entropy_test, (-1,1))
Entropies['M{0}_{1}'.format(ensemble_size, dataset_name)] = entropy_test
ensemble_size = 2
output_dir="/Users/benna/Desktop/DLA/OOD-detection-using-MIMO/MNIST_NN/M"+str(ensemble_size)+"/"
image_shape=[28,28,1]
width_multiplier=2
num_classes=10
model = mnist_model.wide_resnet(
input_shape=[ensemble_size] +
image_shape,
depth=28,
width_multiplier=width_multiplier,
num_classes=num_classes,
ensemble_size=ensemble_size)
optimizer = tf.keras.optimizers.SGD( momentum=0.9, nesterov=True)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(output_dir)
checkpoint.restore(latest_checkpoint)
for dataset_name, test_dataset in datasets_to_evaluate.items():
entropy_test=[]
test_iterator = iter(test_dataset)
print('Testing on dataset %s', dataset_name)
if dataset_name == "notMNIST":
N_images = N_NOTmnist
else:
N_images = N_mnist
entropy_test=[]
for _ in range(N_images//test_batch_size):
entropy_test.append(test_step(test_iterator, dataset_name))
entropy_test = np.reshape(entropy_test, (-1,1))
Entropies['M{0}_{1}'.format(ensemble_size, dataset_name)] = entropy_test
import pickle as pkl
import numpy as np
M=[1, 2, 4, 5, 10]
N_mnist = 10000
N_NOTmnist = 18500
corruption_types, max_intensity = utils.load_corrupted_test_info("mnist")
corruption_types = [c+'_'+str(max_intensity)for c in corruption_types]
corruption_types.append("clean")
# corruption_types.extend(["notMNIST", "clean"])
# corruption_types
E={}
for c in corruption_types:
E[c]= np.zeros((len(M),N_mnist))
E["notMNIST"] = np.zeros((len(M),N_NOTmnist))
for i,ensemble_size in enumerate(M):#np.append(np.arange (1,7), 10):
Entropies = pkl.load( open( "Entropies_M"+str(ensemble_size)+".pkl", "rb" ) )
for dataset_name, entr in Entropies.items():
# print(dataset_name)
name=dataset_name[4:] if ensemble_size ==10 else dataset_name[3:]
E[name][i]=entr.flatten()
# print(dataset_name[3:])
# print(name)
for dataset_name, entr in E.items():
if dataset_name!= "identity_1":
name = dataset_name if dataset_name== "clean" or dataset_name == 'notMNIST' else dataset_name[:-2]
title= "O-O-D -" + name if name != "clean" else "known classes"
plt.figure()
sns.kdeplot(data=E[dataset_name].T, palette="Reds")
plt.legend(np.flip(M))
plt.xlabel("entropy values")
plt.title(title)
plt.plot()
plt.savefig(title+'.png', dpi=1000)
# if dataset_name == 'M'+str(ensemble_size)+'':
# for i in range(len(M)):
# sns.kdeplot(data=E["clean"][i])
# (E["clean"][i], hist=False, kde=True, bins=np.arange(0,2,1), color = 'red', kde_kws={'linewidth': 1 })
# sns.kdeplot(data=E["clean"][:2,:100])
# plt.plot()
import functools
import os
import numpy as np
import time
from absl import app
from absl import flags
from absl import logging
import mnist_model # ADDED
# REMOVED from experimental.mimo import cifar_model # local file import
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import baselines.utils_new as utils # ADDED this!
# from uncertainty_baselines.baselines.cifar import utils
import uncertainty_metrics as um
import numpy as np
import os
import random
from collections import defaultdict
import matplotlib.pyplot as plt
from scipy.stats import entropy
import seaborn as sns
import pickle as pkl
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument("--ensemble", help="ensemble size.", default=1,type=int)
# args = parser.parse_args()
# ensemble_size = args.ensemble
ensemble_size=3
def load_not_mnist(im_root,batch_size):
dirs = os.listdir(im_root)
label_dict = defaultdict()
filenames=[]
labels=[]
for idx, dr in enumerate(dirs):
# print(dr)
label_dict[idx] = dr
ims = os.listdir(os.path.join(im_root, dr))
random.shuffle(ims)
for im in (ims):
if im=="RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png" or im== "Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png":
continue
else:
filenames.append(os.path.join(im_root, dr, im))
labels.append(idx)
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
normalize=True
dtype = tf.float32
image_string = tf.io.read_file(filename)
image = tf.io.decode_image(image_string, dtype=dtype)
# image = tf.image.convert_image_dtype(image_decoded, dtype)
if normalize:
# We use the convention of mean = np.mean(train_images, axis=(0,1,2))
# and std = np.std(train_images, axis=(0,1,2)).
mean = tf.constant([0.1307], dtype=dtype)
std = tf.constant([0.3081], dtype=dtype)
# Previously, std = np.mean(np.std(train_images, axis=(1, 2)), axis=0)
# which gave std = tf.constant([0.2023, 0.1994, 0.2010], dtype=dtype).
# However, we change convention to use the std over the entire training
# set instead.
image = (image - mean) / std
label = tf.cast(label, dtype)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
### LOAD DATASETS
test_datasets={}
test_batch_size=500
N_mnist = 10000
N_NOTmnist = 18710
dataset_builder_class = ub.datasets.MnistDataset
clean_test_dataset_builder = dataset_builder_class(split=tfds.Split.TEST)
clean_test_dataset = clean_test_dataset_builder.load(batch_size=test_batch_size)
test_datasets = {'clean': clean_test_dataset,}
# load_c_dataset = utils.load_mnist_c
# corruption_types, max_intensity = utils.load_corrupted_test_info("mnist")
# for corruption in corruption_types[:]:
# for intensity in range(1, max_intensity + 1):
# dataset = load_c_dataset(
# corruption_name=corruption,
# corruption_intensity=intensity,
# batch_size=test_batch_size,
# use_bfloat16=False)
# test_datasets['{0}_{1}'.format(corruption, intensity)] = (
# # strategy.experimental_distribute_dataset(dataset))
# dataset)
not_minst_root = '/Users/benna/Desktop/DLA/dataset/notMNIST_small'
not_mnist_dataset= load_not_mnist(not_minst_root,test_batch_size)
test_datasets['notMNIST'] = not_mnist_dataset
acc=tf.keras.metrics.SparseCategoricalAccuracy()
#@tf.function
def test_step(test_iterator, dataset_name):
if dataset_name == 'clean':
images, label = next(test_iterator).values()
else:
images, label = next(test_iterator)
images = tf.tile(tf.expand_dims(images, 1), [1, ensemble_size, 1, 1, 1])
logits = model(images, training=False)
probs = tf.nn.softmax(logits)
# print(probs.shape)
probs = tf.math.reduce_mean(probs, axis=1) # marginalize
prediction = np.argmax(probs,axis=1)
confidence = np.max(probs,axis=1)
# print((prediction, confidence, label))
return prediction, confidence, np.array(label)
### TEST
output_dir="/Users/benna/Desktop/DLA/OOD-detection-using-MIMO/MNIST_NN/M"+str(ensemble_size)+"/"
image_shape=[28,28,1]
width_multiplier=2
num_classes=10
model = mnist_model.wide_resnet(
input_shape=[ensemble_size] +
image_shape,
depth=28,
width_multiplier=width_multiplier,
num_classes=num_classes,
ensemble_size=ensemble_size)
optimizer = tf.keras.optimizers.SGD( momentum=0.9, nesterov=True)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(output_dir)
checkpoint.restore(latest_checkpoint)
# Entropies=defaultdict()
datasets_to_evaluate = test_datasets
prediction_list, confidence_list, label_list = [], [], []
for dataset_name, test_dataset in datasets_to_evaluate.items():
entropy_test=[]
test_iterator = iter(test_dataset)
print('Testing on dataset %s', dataset_name)
N_images = N_mnist
for _ in range(N_images//test_batch_size):
prediction, confidence, label = test_step(test_iterator, dataset_name)
prediction_list.extend(prediction)
confidence_list.extend(confidence)
label_list.extend(confidence)
Confidence = {"prediction": prediction_list,
"confidence": confidence_list,
"label":label_list}
pkl.dump( Confidence, open( "Confidence_M"+str(ensemble_size)+".pkl", "wb" ) )
Ensembles = ['M1', 'M2', 'M3', 'M4', 'M5', 'M6']
N_mnist = 10000
# E={}
# for c in ["prediction", "label", "confidence"]:
# E[c]= np.zeros((len(Ensembles),N_mnist*2))
# prediction = np.zeros((len(Ensembles),N_mnist*2))
# label = np.zeros((len(Ensembles),N_mnist*2))
# confidence = np.zeros((len(Ensembles),N_mnist*2))
accuracy=np.zeros((len(Ensembles), 10))
for i,ensemble_size in enumerate(Ensembles):#np.append(np.arange (1,7), 10):
Confidence = pkl.load( open( "Confidence_"+(ensemble_size)+".pkl", "rb" ) )
prediction = np.array(Confidence["prediction"])
label = np.array(Confidence["label"])
confidence = np.array(Confidence["confidence"])
for j,tau in enumerate(np.arange(0,1,0.1)):
above_confidence= np.where(confidence>=tau)[0]
filtered_predictions= prediction[above_confidence]
filtered_labels = label[above_confidence]
# print(filtered_predictions)
binary_accuracy = np.where(filtered_predictions == filtered_labels,1,0)
# print(filtered_predictions == filtered_labels)
acc = binary_accuracy.sum()/len(binary_accuracy)
accuracy[i,j]=acc
# print(len(above_confidence))
# print(np.arange(0,1,0.1))
# # print(name)
# for dataset_name, entr in E.items():
# if dataset_name!= "identity_1":
# name = dataset_name if dataset_name== "clean" or dataset_name == 'notMNIST' else dataset_name[:-2]
# title= "O-O-D -" + name if name != "clean" else "known classes"
# plt.figure()
# sns.kdeplot(data=E[dataset_name].T, palette="Reds")
# plt.legend(np.flip(M))
# plt.xlabel("entropy values")
# plt.title(title)
# plt.plot()
# plt.savefig(title+'.png', dpi=1000)
np.shape(accuracy)
np.tile(np.arange(0,1,0.1),reps=(7,1))
# sns.scatterplot(np.arange(0,1,0.1),accuracy[1])
# plt.plot(np.arange(0,1,0.1),accuracy[0], '.r-')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.plot(np.tile(np.arange(0,1,0.1),reps=(len(Ensembles),1)).T, accuracy.T, '.-')
plt.legend(Ensembles)
grid_x_ticks = np.arange(0, 1, 0.1)
grid_y_ticks = np.arange(.5, 1, 0.1)
ax.set_xticks(grid_x_ticks , minor=True)
ax.set_yticks(grid_y_ticks , minor=True)
ax.grid(which='both')
plt.ylabel("Accuracy on examples p(y|x)> τ")
plt.xlabel("Confidence Threshold τ" )
plt.title("Accuracy for different confidence levels")
# plt.savefig(title+'.png', dpi=1000)
above_confidence= np.where(confidence>0.5)[0]
filtered_predictions= prediction[above_confidence]
filtered_labels = label[above_confidence]
binary_accuracy = np.where(filtered_predictions == filtered_labels,1,0)
accuracy = binary_accuracy.sum()/len(binary_accuracy)
len(filtered_predictions),accuracy,binary_accuracy.sum(), len(np.arange(0,1,0.1))
```
| github_jupyter |
# Table of Contents
<div class="toc" style="margin-top: 1em;"><ul class="toc-item" id="toc-level0"><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-no-ndarray-bidimensional" data-toc-modified-id="Fatiamento-no-ndarray-bidimensional-1"><span class="toc-item-num">1 </span>Fatiamento no ndarray bidimensional</a></span><ul class="toc-item"><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Inicializando-um-array-e-mudando-o-seu-shape" data-toc-modified-id="Inicializando-um-array-e-mudando-o-seu-shape-1.1"><span class="toc-item-num">1.1 </span>Inicializando um array e mudando o seu shape</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-de-linhas-e-colunas-de-um-array" data-toc-modified-id="Fatiamento-de-linhas-e-colunas-de-um-array-1.2"><span class="toc-item-num">1.2 </span>Fatiamento de linhas e colunas de um array</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-de-elementos-específicos-de-um-array" data-toc-modified-id="Fatiamento-de-elementos-específicos-de-um-array-1.3"><span class="toc-item-num">1.3 </span>Fatiamento de elementos específicos de um array</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-com-índices-invertidos" data-toc-modified-id="Fatiamento-com-índices-invertidos-1.4"><span class="toc-item-num">1.4 </span>Fatiamento com índices invertidos</a></span></li></ul></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Documentação-Oficial-Numpy" data-toc-modified-id="Documentação-Oficial-Numpy-2"><span class="toc-item-num">2 </span>Documentação Oficial Numpy</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Links-Interessantes" data-toc-modified-id="Links-Interessantes-3"><span class="toc-item-num">3 </span>Links Interessantes</a></span></li></ul></div>
# Fatiamento no ndarray bidimensional
Um recurso importante do numpy é o fatiamento no qual é possível acessar partes do array de diversas formas, como pode ser visto abaixo:
## Inicializando um array e mudando o seu shape
```
%matplotlib inline
import numpy as np
from PIL import Image
a = np.arange(20) # a é um vetor unidimensional de 20 elementos
print(a)
a = a.reshape(4,5) # a agora é um array 4x5 (4 linhas por 5 colunas)
print('a.reshape(4,5) = \n', a)
```
## Fatiamento de linhas e colunas de um array
O operador : indica que todos os elementos naquela dimensão devem ser acessados.
```
print('A segunda linha do array: \n', a[1,:]) # A segunda linha é o índice 1
print(' A primeira coluna do array: \n', a[:,0]) # A primeira coluna corresponde ao índice 0
```
## Fatiamento de elementos específicos de um array
```
print('Acessando as linhas do array de 2 em 2 começando pelo índice 0: \n',
a[0::2,:])
print(' Acessando as linhas e colunas do array de 2 em 2 começando pela linha 0 e coluna 1: \n',
a[0::2,1::2])
```
## Fatiamento com índices invertidos
```
print("Acesso as duas últimas linhas do array em ordem reversa:\n",
a[-1:-3:-1,:])
print("Acesso elemento na última linha e coluna do array:\n",
a[-1,-1])
print("Invertendo a ordem das linhas do array:\n",
a[::-1,:])
```
# Documentação Oficial Numpy
[Scipy.org Princípios básicos de indexação de arrays](https://docs.scipy.org/doc/numpy/user/basics.indexing.html)
# Links Interessantes
[Scipy-lectures: operações avançadas com fatiamento](http://scipy-lectures.github.io/intro/numpy/array_object.html#fancy-indexing)
| github_jupyter |
# Explorando y analizando DataFrames con Pandas
__[Pandas](https://pandas.pydata.org/pandas-docs/stable/index.html)__ es un paquete construido sobre la base de NumPy, incluye la implementación de la estructura **DataFrame**. Un DataFrame es, en esencia, un arreglo bidimensional con etiquetas para filas y columnas, típicamente las columnas contienen tipo de datos diferentes.
1. [Series](#1)
2. [DataFrame](#2)
3. [Índices](#3)
4. [Obtención de datos](#4)
5. [Se utiliza *(&, |)* en lugar de *(and, or)*](#8)
6. [Modificación de datos](#5)
7. [Apply](#6)
8. [One Hot Encoding](#7)
```
import numpy as np
import pandas as pd
```
## Series
<a id="1"></a>
Un objecto de tipo *Series* es un arreglo de datos, parecido a un *Array* de *numpy*, que consta de índices y valores.
Aquí algunos enlaces de referencia:
- https://pandas.pydata.org/pandas-docs/stable/reference/series.html#computations-descriptive-stats
- https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html
**Una serie tiene varios métodos, como `min, mean, std`, entre muchos otros. Para crear una serie:**
```
serie = pd.Series([0.25, 0.5, 0.75, 1.0])
print(serie)
print('Desviación estándar: ', serie.std())
```
**Una serie tiene valores e índices:**
```
print('Valores: ', serie.values)
print('Índices: ', serie.index)
```
**Filtrado de datos, retorna una Serie de valores booleanos:**
```
serie > 0.5
serie.isnull()
```
**A diferencia de los arreglos de *Numpy*, a una Serie se le puede asignar un indice de manera explícta:**
```
serie = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd'])
print(serie['a':'c'])
```
**Se puede crear una Serie a partir de un diccionario (clave -> indice)**
```
poblacion_dict = {'Chuquisaca': 626000,
'La Paz': 26448193,
'Cochabamba': 2883000,
'Oruro': 538000,
'Potosí': 887000,
'Tarija': 563000,
'Santa Cruz': 3225000,
'Beni': 468000,
'Pando': 144000 }
poblacion = pd.Series(poblacion_dict)
poblacion
```
**Otros ejemplos de creación de Series**
```
serie = pd.Series(5, index=[100, 200, 300])
serie
```
**Selección de claves del diccionario (solo se crea un serie con una parte del diccionario)**
```
serie = pd.Series({2:'a', 1:'b', 3:'c'}, index=[3, 2])
serie
```
## Dataframes
<a id="2"></a>
Un *DataFrame* es un arreglo bi-dimensional formado por una secuencia de Series con la misma cantidad de elementos y con el mismo índice. Es decir: es como un diccionario de Series del mismo tamaño y con los mismos índices. Un *DataFrame* permite asignar nombres a las columnas.
```
extension_departamentos_Bolivia_dict = {'Chuquisaca': 51514,
'La Paz': 133985,
'Cochabamba': 55631,
'Oruro': 55588,
'Potosí': 117218,
'Tarija': 37623,
'Santa Cruz': 370621,
'Beni': 213564
}
extension_departamentos_Serie = pd.Series(extension_departamentos_Bolivia_dict)
extension_departamentos_Serie
```
**Creación a partir de dos Series que tiene el mismo index (aunque los indices no estén en el mismo order o incluso falten datos en algunas de las Series)**
```
datos_bolivia = pd.DataFrame({'poblacion': poblacion, 'extension': extension_departamentos_Serie})
datos_bolivia
```
**Tanto las filas como las columnas tienen asociado un índice**
```
print(datos_bolivia.index)
print(datos_bolivia.columns)
```
**Se puede ver a un DataFrame como un diccionario de Series (columnas)**
```
datos_bolivia['poblacion']
```
**Otras maneras de crear un DataFrame: si no se provee un índice se crea una secuencia de numeros que empieza en 0.**
```
data = pd.DataFrame(columns=['a','b'], data=[[1, 45], [87, 96], [125, 13], [135, 789]])
data
```
**Lista de diccionarios (las claves son los nombres de las columnas)**
```
data = pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}])
data
```
**Información general de un *DataFrame***
```
datos_bolivia.shape
datos_bolivia.head(5)
datos_bolivia.tail(5)
datos_bolivia.size
datos_bolivia.info()
# la función describe() devuele un DataFrame con indicadores para cada una de las columnas
datos_bolivia.describe()
```
# Indices
<a id="3"></a>
Un *Index* es el mecanismo para referenciar datos en las Series y los DataFrames. Un Index object es un **conjunto** ordenado de valores
```
indA = pd.Index([1, 3, 5, 7, 9])
indB = pd.Index([2, 3, 5, 7, 11])
print(indA.union(indB))
print(indA.intersection(indB))
print(indA.difference(indB))
```
# Extracción de datos
<a id="4"></a>
Extraer datos de un DataFrame o una serie.
```
datos_bolivia = pd.DataFrame(data={'poblacion':poblacion, 'extension':extension_departamentos_Serie})
datos_bolivia
```
**Un *DataFrame* es como diccionario de Series (columnas) en el cual se puede extraer y modificar datos**
```
datos_bolivia['poblacion']
datos_bolivia[['poblacion','extension']]
datos_bolivia['constante'] = 1
datos_bolivia['densidad'] = datos_bolivia['poblacion'] / datos_bolivia['extension']
datos_bolivia
datos_bolivia['capital'] = pd.Series(
{'Chuquisaca': 'Sucre',
'La Paz': 'Murillo',
'Cochabamba': 'Cercado',
'Oruro': 'Cercado',
'Potosí': 'Potosí',
'Tarija': 'Tarija',
'Santa Cruz': 'Santa Cruz de la Sierra',
'Pando': 'Cobija',
'Beni': 'Trinidad' })
datos_bolivia
```
**Un DataFrame es también como un arreglo bidimensional (una matriz de Series)
Soporta indices, slicing, filtering empleando los indices explicitos (iloc usa indices numéricos implicitos).
El primer valor de la matriz hace referencia a las filas**
- https://railsware.com/blog/python-for-machine-learning-indexing-and-slicing-for-lists-tuples-strings-and-other--sequential-types/
```
datos_bolivia.loc['Beni']
datos_bolivia.loc['Beni':'Oruro']
datos_bolivia['poblacion'] > 2000000
datos_bolivia['extension'].isnull()
```
## Se utiliza *(&, |)* en lugar de *(and, or)*
<a id="8"></a>
```
datos_bolivia.loc[(datos_bolivia['poblacion'] > 2000000) & (datos_bolivia['extension']> 60000.0), ['poblacion','densidad'] ]
datos_bolivia
```
# Modificación de datos
<a id="5"></a>
**Elimina todos los datos de una columna**
```
datos_bolivia.drop(columns=['constante'], inplace=True)
datos_bolivia
```
**Eliminar los datos faltantes (los que son NaN)**
```
datos_bolivia.dropna(how='any')
datos_bolivia.loc['Pando', 'densidad'] = datos_bolivia.loc['Pando', 'poblacion'] / datos_bolivia.loc['Pando', 'extension']
datos_bolivia
```
## Apply
<a id="6"></a>
Appy aplica una función que recibe como argumento a cada una de las columnas (o filas) de un DataFrame. Modifica el DataFrame existente.
**axis=0 es la opción por defecto, significa que se recorrerá el DataFrame por las columnas (similar a recorrer una matriz por columas). Si axis=1 el DataFrame se recorrerá por sus filas.**
```
datos_bolivia_extension_reducida_a_la_mitad = datos_bolivia.apply(lambda x: x['extension']/2, axis=1)
datos_bolivia_extension_reducida_a_la_mitad
```
## One Hot Encoding
<a id="7"></a>
Conversión de valores numéricos y nominales en categorías y luego las categorías en valores numéricos.
Necesario cuando el algoritmo de aprendizaje automático no es capaz de trabajar con valores nominales o contínuos
**Obtener los códigos de una variable nominal**
```
datos_bolivia['capital'].astype('category').cat.codes
```
**Obtener el vector One Hot Encoding**
```
pd.get_dummies(datos_bolivia,columns=['capital'])
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Dogs vs Cats Image Classification With Image Augmentation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c02_dogs_vs_cats_with_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c02_dogs_vs_cats_with_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`.
## Specific concepts that will be covered:
In the process, we will build practical experience and develop intuition around the following concepts
* Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator`class — How can we efficiently work with data on disk to interface with our model?
* _Overfitting_ - what is it, how to identify it, and how can we prevent it?
* _Data Augmentation_ and _Dropout_ - Key techniques to fight overfitting in computer vision tasks that we will incorporate into our data pipeline and image classifier model.
## We will follow the general machine learning workflow:
1. Examine and understand data
2. Build an input pipeline
3. Build our model
4. Train our model
5. Test our model
6. Improve our model/Repeat the process
<hr>
**Before you begin**
Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits.
# Importing packages
Let's start by importing required packages:
* os — to read files and directory structure
* numpy — for some matrix math outside of TensorFlow
* matplotlib.pyplot — to plot the graph and display images in our training and validation data
```
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import matplotlib.pyplot as plt
```
For the TensorFlow imports, we directly specify Keras symbols (Sequential, Dense, etc.). This enables us to refer to these names directly in our code without having to qualify their full names (for example, `Dense` instead of `tf.keras.layer.Dense`).
```
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
```
# Data Loading
To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of <a href="https://www.kaggle.com/c/dogs-vs-cats/data" target="_blank">Dogs vs. Cats</a> dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research).
In previous Colabs, we've used <a href="https://www.tensorflow.org/datasets" target="_blank">TensorFlow Datasets</a>, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem.
```
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
```
The dataset we have downloaded has following directory structure.
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>cats_and_dogs_filtered</b>
|__ <b>train</b>
|______ <b>cats</b>: [cat.0.jpg, cat.1.jpg, cat.2.jpg ....]
|______ <b>dogs</b>: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...]
|__ <b>validation</b>
|______ <b>cats</b>: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ....]
|______ <b>dogs</b>: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...]
</pre>
We'll now assign variables with the proper file path for the training and validation sets.
```
base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
```
### Understanding our data
Let's look at how many cats and dogs images we have in our training and validation directory
```
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
print('total training cat images:', num_cats_tr)
print('total training dog images:', num_dogs_tr)
print('total validation cat images:', num_cats_val)
print('total validation dog images:', num_dogs_val)
print("--")
print("Total training images:", total_train)
print("Total validation images:", total_val)
```
# Setting Model Parameters
For convenience, let us set up variables that will be used later while pre-processing our dataset and training our network.
```
BATCH_SIZE = 100
IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels
```
After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk and will apply rescaling and will resize them into required dimensions using single line of code.
# Data Augmentation
Overfitting often occurs when we have a small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number and variety of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples through random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This exposes the model to more aspects of the data, allowing it to generalize better.
In **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process.
To start off, let's define a function that can display an image, so we can see the type of augmentation that has been performed. Then, we'll look at specific augmentations that we'll use during training.
```
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
```
### Flipping the image horizontally
We can begin by randomly applying horizontal flip augmentation to our dataset and seeing how individual images will look after the transformation. This ia achieved by passing `horizontal_flip=True` as an argument to the `ImageDataGenerator` class.
```
image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)
train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE))
```
To see the transformation in action, let's take one sample image from our training set and repeat it five times. The augmentation will be randomly applied (or not) to each repetition.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### Rotating the image
The rotation augmentation will randomly rotate the image up to a specfied number of degrees. Here, we'll set it to 45.
```
image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45)
train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE, IMG_SHAPE))
```
To see the transformation in action, let's once again take a sample image from our training set and repeat it. The augmentation will be randomly applied (or not) to each repetition.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### Applying Zoom
We can also apply Zoom augmentation to our dataset, zooming images up to 50% randomly.
```
image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5)
train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE, IMG_SHAPE))
```
One more time, take a sample image from our training set and repeat it. The augmentation will be randomly applied (or not) to each repetition.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### Putting it all together
We can apply all these augmentations, and even others, with just one line of code, by passing the augmentations as arguments with proper values.
Here, we have applied rescale, rotation of 45 degrees, width shift, height shift, horizontal flip, and zoom augmentation to our training images.
```
image_gen_train = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_data_gen = image_gen_train.flow_from_directory(batch_size=BATCH_SIZE,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE),
class_mode='binary')
```
Let's visualize how a single image would look like five different times, when we pass these augmentations randomly to our dataset.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### Creating Validation Data generator
Generally, we only apply data augmentation to our training examples, since the original images should be representative of what our model needs to manage. So, in this case we are only rescaling our validation images and converting them into batches using ImageDataGenerator.
```
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow_from_directory(batch_size=BATCH_SIZE,
directory=validation_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode='binary')
```
# Model Creation
## Define the model
The model consists of four convolution blocks with a max pool layer in each of them.
Before the final Dense layers, we're also applying a Dropout probability of 0.5. This mean that 50% of the values coming into the Dropout layer will be set to zero. This helps to prevent overfitting.
Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes — dogs and cats — using `softmax`.
```
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(2, activation='softmax')
])
```
### Compiling the model
As usual, we will use the `adam` optimizer. Since we are output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
### Model Summary
Let's look at all the layers of our network using **summary** method.
```
model.summary()
```
### Train the model
It's time we train our network.
Since our batches are coming from a generator (`ImageDataGenerator`), we'll use `fit_generator` instead of `fit`.
```
epochs=100
history = model.fit_generator(
train_data_gen,
steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))),
epochs=epochs,
validation_data=val_data_gen,
validation_steps=int(np.ceil(total_val / float(BATCH_SIZE)))
)
```
### Visualizing results of the training
We'll now visualize the results we get after training our network.
```
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
```
| github_jupyter |
# Data preparation for calibration benchmarks
```
import os
os.getcwd()
import sys
sys.path.append('/Users/moralejo/CTA/ctasoft/cta-benchmarks/')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from copy import deepcopy, copy
import os
from ctapipe.utils import get_dataset_path
from ctapipe.io import event_source
from ctapipe.calib import CameraCalibrator
from ctapipe.visualization import CameraDisplay
from ctapipe.io import HDF5TableWriter
from ctapipe.io.containers import Container, Field
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
from ctabench.dl1 import true_pe_cleaning, noise_calibrated_image, signal_calibrated_image
# infile = get_dataset_path('gamma_test_large.simtel.gz')
infile = '/Users/moralejo/Desktop/gamma_20deg_0deg_run100___cta-prod3-lapalma3-2147m-LaPalma_cone10.simtel.gz'
source = event_source(infile, max_events=None)
integrators = {0: 'NeighbourPeakIntegrator', 1: 'LocalPeakIntegrator', 2: 'FullIntegrator'}
cam_ids = {'LSTCam':0, 'NectarCam':1, 'FlashCam':2, 'SCTCam':3}
reco2true_ratio = []
reco = []
true = []
for ii, integrator in integrators.items():
reco2true_ratio.append([])
reco.append([])
true.append([])
for cam_id in cam_ids.items():
reco2true_ratio[ii].append([])
reco[ii].append([])
true[ii].append([])
# i.e. list[integrator][camera][entry]
for ii, integrator in integrators.items():
print(integrator)
cal = CameraCalibrator(r1_product='HESSIOR1Calibrator',
extractor_product=integrator)
for event in source:
cal.calibrate(event)
diff_sum = 0
for tel_id in event.r0.tels_with_data:
cam_id = cam_ids[event.inst.subarray.tel[tel_id].camera.cam_id]
true_image = event.mc.tel[tel_id].photo_electron_image
true_image_nozeroes = true_image[true_image>0]
calibrated_image = event.dl1.tel[tel_id].image[0]
calibrated_image_nozeroes = calibrated_image[true_image>0]
ratio = np.divide(calibrated_image_nozeroes,true_image_nozeroes)
reco2true_ratio[ii][cam_id].extend(ratio.tolist())
reco[ii][cam_id].extend(calibrated_image_nozeroes.tolist())
true[ii][cam_id].extend(true_image_nozeroes.tolist())
cam_id = 0
integrator_id = 1
for ii, integrator in integrators.items():
ratio = np.array(reco2true_ratio[ii][cam_id])
r = np.array(reco[ii][cam_id])
t = np.array(true[ii][cam_id])
_ = plt.plot(np.log10(t),r/t,'ro',markersize=0.1)
plt.axis([-0.1, 3., -3., 8.])
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time, os
import pickle
from collections import Counter
import re
import string
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
nlp = spacy.load("en_core_web_sm")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from src.features.nlp_utilities import load_clean_nlp, spacy_tagging_demo, new_stop_words
from src.features.nlp_utilities import text_process,display_topics,count_vectorizer_display, tfidf_vectorizer_display
from src.features.nlp_utilities import tsne_viz, pca_viz, get_recommends
raw_data_dir = "data/raw/"
interim_data_dir= "/data/interim/"
processed_data_dir = "data/processed/"
```
# Data Science Topic Modeling and Job Recommender
## Introduction
The term "Data Science" has come to be a catch-all for many different speciliazations that deal with data, from data analysts to machine learning engineers. Often times, just looking at the title of role is not sufficient to figure out what kind of skills/comeptencies the role requires.
This project will attempt to topic model data science job listings scraped from Indeed.com into more useful categories, and then create a recomendation engine.
## Web Scraping
Please see the main webscraping script and scraping function imports [here](https://github.com/pdekhman/data-science-job-recommender/blob/master/src/scrape_main.py)
## Cleaning and Transforming Data
The scraped data was mostly clean. The only preprocessing needed was creating a dataframe and changing a few company names.
Please see [here](https://github.com/pdekhman/data-science-job-recommender/blob/master/src/features/nlp_utilities.py) for custom preprocessing and nlp functions
```
nlp_df = load_clean_nlp(raw_data_dir,"listings_final.pkl",'text')
nlp_df['company'] = np.where(nlp_df.company=='Amazon Web Services, Inc.','Amazon.com Services, Inc.',nlp_df.company)
nlp_df['company'] = np.where(nlp_df.company=='Amazon.com Services, Inc.','Amazon',nlp_df.company)
nlp_df.head()
```
## sPacy NLP Demo
In order to create useful topics, we need to break down the text of each job description further. The sPacy library allows us to break down each word a number of ways, including seeing its lemma (root), what part of speech it is (POS), and whether is a number/punctuation/stopword.
```
record = 2000
print("Title: " + nlp_df.iloc[record,0]+ "\n")
print("Company: " + nlp_df.iloc[record,1] + '\n')
print("City: " + (nlp_df.iloc[record,3] + '\n'))
print("Job Text: " + nlp_df.iloc[record,2] + '\n')
```
Lets take the job listing above, for Data Engineer at Google in San Francisco
```
example_text = nlp_df.iloc[record,2]
#pass through sacy parser
spacy_tagging_demo(example_text)
```
Once passed through the sPacy parser (tokenization, part-of speech tagging, etc), we can now see the individual words (tokens) and detailed information about each of them.
## Full NLP Pipeline
```
spacy_text = list(nlp.pipe(nlp_df.text,disable=['parser']))
```
We now pass the full column of job description text through the sPacy pipeline (we have disabled the parser because we won't be using any named entity recognition in this analysis)
```
punctuation = string.punctuation
pos = ['VERB','ADV','ADJ','ADP','DET','NOUN']
processed_text = [text_process(i,new_stop_words,punctuation,pos) for i in spacy_text]
```
After the sPacy pipeline, the text is further processed to exclude stop words, punctuation and numbers. The text is also reduced to its lower-case lemma, and parts of speech that speak to concrete skills (Verbs, Adverbs, Adjectives) are removed as well.
```
processed_text[record]
```
The original text was whittled down to only the important words and skills
## Topic Modeling
Once we have our fully processed text, it's time to move to topic modeling by:
1. Employing a TFIDF Vectorizer to count word occurences and produce a bag-of-words matrix. We chose TFIDF Vectorizer over the traditional Count Vectorizer because it gave us more meaningful topics
2. Feeding the transformed "document word matrix" into a Non-Negative Matrix Factorization Model (NMF) to produce our topics
```
tfidf_doc_topic = tfidf_vectorizer_display(processed_text,topics=6,max_features=2000,max_o = .8,min_o = 75)
```
Classifying and naming topics is more of an art than science, however I think there are defintely clear divisions in the above topics:
* Topic 0: Generalist (catch-all topic)
* Topic 1: Machine Learning / AI
* Topic 2: Big Data
* Topic 3: Business Intelligence
* Topic 4: Cloud
* Topic 5: NLP
```
topic_df = pd.DataFrame(tfidf_doc_topic.round(2),
index = (zip(nlp_df.index.values,nlp_df.title)),
columns = ['Generalist','ML/AI','Big Data','BI','Cloud','NLP'])
topic_df['Topic'] = topic_df.idxmax(axis=1)
topic_df
```
Appending the "Topic" with the highest weight to each job listings in our original dataframe, we can now see the job titles and topics together
## Topic Visualization
```
tsne_viz(tfidf_doc_topic,topic_df)
```
Using TSNE to reduce the document word matrix to two-dimensions, we see clear divisions between the different topics
```
pca_viz(tfidf_doc_topic,topic_df)
```
Dimensionality reduction using PCA shows more overlap, especially between "NLP" and "ML/AI", and the "Cloud" topic is all over the map
## Topic Breakdows
```
plt.figure(figsize=[12,6])
ax = sns.countplot(x="Topic", data=new_topic_df,order=new_topic_df.Topic.value_counts().index, palette=['orange','blue','blue','blue','blue','blue'])
plt.ylabel('Count',fontsize=16,fontweight='bold')
plt.xlabel("Topic",fontsize=16,fontweight='bold')
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.title ('Data Science Jobs Across All Cities',fontsize=16,fontweight='bold')
```
The most prevalent type of role across all cities is "Business Intelligence"
```
for i in nlp_df.city.unique():
city_mask = (nlp_df.city==i)
city_df = new_topic_df[city_mask]
plt.figure(figsize=[12,6])
ax = sns.countplot(x="Topic", data=city_df,order=city_df.Topic.value_counts().index, palette=['orange','blue','blue','blue','blue','blue'])
plt.ylabel('Count',fontsize=16,fontweight='bold')
plt.xlabel("Topic",fontsize=16,fontweight='bold')
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.title (i,fontsize=16,fontweight='bold')
plt.savefig(i+'.svg',type='svg')
```
Roles in San Francisco and Seattle tend to lean toward "Big Data" responsibilities, while Washington, D.C. roles seem to favor "Generalists"
```
for i in ['Apple','Amazon','Facebook','Spotify']:
company_mask = (nlp_df.company==i)
company_df = new_topic_df[company_mask]
plt.figure(figsize=[12,6])
ax = sns.countplot(x="Topic", data=company_df,order=company_df.Topic.value_counts().index, palette=['orange','blue','blue','blue','blue','blue'])
plt.ylabel('Count',fontsize=16,fontweight='bold')
plt.xlabel("Topic",fontsize=16,fontweight='bold')
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.title (i,fontsize=16,fontweight='bold')
```
Apple has more "Big Data Roles", while other larger tech companies are split between "Generalists" and "Business Intelligence"
## Job Recommender
Now that we have our topics, we can create recommendations using the original TFIDF document word matrix and cosine distance.
```
get_recommends(2000,tfidf_doc_topic,nlp_df,num_recom=3)
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# plot style
sns.set_style('whitegrid')
sns.set_style({'font.family': 'Times New Roman'})
%matplotlib inline
df = pd.read_csv("data-policy-results.csv", sep='\t')
df.head()
len(df)
# Pie chart
a = df['Policy type'].value_counts()
labels = a.index
sizes = a
#colors
colors1 = plt.cm.Set2(np.linspace(0,1,3))
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4.5))
axes[0].pie(sizes, colors = colors1, labels=labels, autopct='%1.1f%%', startangle=50, pctdistance=0.85)
axes[0].add_patch(plt.Circle((0,0),0.70,fc='white'))
axes[0].axis('equal')
axes[0].set_title("Data policy strictness of top 30 economics journals")
sns.countplot(x='Sharing mode', data=df, palette='Oranges', ax=axes[1])
axes[1].set_title("Recommended sharing mode per data policy")
plt.tight_layout()
plt.savefig('img/a.png', dpi=100)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4.5))
sns.countplot(x='Adopt AEA', data=df, palette="Blues", ax=axes[0])
axes[0].set_title("Count of journals that adopt AEA data policy")
axes[0].set_xlabel("Adopt AEA data policy")
axes[0].set(ylim=(0, 22))
axes[0].set_yticks(np.arange(0, 22, 2))
sns.countplot(x='Adopt AEA', hue='Policy type', data=df, palette=colors1, ax=axes[1])
axes[1].set_title("Count of journals that adopt AEA data policy & strictness")
axes[1].set_xlabel("Adopt AEA data policy")
axes[1].set(ylim=(0, 22))
axes[1].set_yticks(np.arange(0, 22, 2))
#plt.legend(loc=1)
legend = plt.legend(frameon = 1)
frame = legend.get_frame()
frame.set_facecolor('white')
#frame.set_edgecolor('gray')
plt.tight_layout()
plt.savefig('img/b.png', dpi=100)
df = pd.read_csv("econ.csv")
a = df.sum()
df1 = a[['stata', 'julia', 'python','R','C','C++','matlab','fortran','sas']]
df1.values
from __future__ import division
pom = (df1.values * 100 / df1.values.sum())
l = pd.Series(pom, index=df1.index)
u = l.sort_values(ascending=False)
u
s = df[['stata', 'julia', 'python','R','C','C++','matlab','fortran','sas']]
no_sw = s.sum(axis=1).astype(int).value_counts(sort=False)
no_sw
tot=len(s)
no_ = no_sw.values*100/tot
no_ = pd.Series(no_, index=no_sw.index)
no_
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4.5))
sns.barplot(x=no_.index, y=no_.values, palette="Blues", ax=axes[0])
axes[0].set_ylabel("Percentage [%]")
axes[0].set_xlabel("Number of software tools")
axes[0].set_title("Number of software tools (out of listed) used in publications")
sns.barplot(x=u.index, y=u.values, palette="deep", ax=axes[1])
axes[1].set_ylabel("Percentage [%]")
axes[1].set_title("Most used software in AER publications from 1999 to 2018")
plt.tight_layout()
plt.savefig('img/c.png', dpi=100)
```
| github_jupyter |
# Recommender System:
- The last thing to do is to use our saved models to recommend items to users:
### For the requested user:
- Calculate the score for every item.
- Sort the items based on the score and output the top results.
### Check which users exist on the test set
```
!pip install ipython-autotime
#### To measure all running time
# https://github.com/cpcloud/ipython-autotime
%load_ext autotime
import pandas as pd
import pickle
import pandas as pd
import numpy as np
import os
#Keras
from keras.models import load_model
from keras import backend as K
# Tensorflow
import tensorflow as tf
from sklearn.metrics import mean_squared_error
```
### Set and Check GPUs
```
def set_check_gpu():
cfg = K.tf.ConfigProto()
cfg.gpu_options.per_process_gpu_memory_fraction =1 # allow all of the GPU memory to be allocated
# for 8 GPUs
cfg.gpu_options.visible_device_list = "0,1,2,3,4,5,6,7" # "0,1"
# for 1 GPU
# cfg.gpu_options.visible_device_list = "0"
#cfg.gpu_options.allow_growth = True # # Don't pre-allocate memory; dynamically allocate the memory used on the GPU as-needed
#cfg.log_device_placement = True # to log device placement (on which device the operation ran)
sess = K.tf.Session(config=cfg)
K.set_session(sess) # set this TensorFlow session as the default session for Keras
print("* TF version: ", [tf.__version__, tf.test.is_gpu_available()])
print("* List of GPU(s): ", tf.config.experimental.list_physical_devices() )
print("* Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
# set for 8 GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7";
# set for 1 GPU
# os.environ["CUDA_VISIBLE_DEVICES"] = "0";
# Tf debugging option
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# print(tf.config.list_logical_devices('GPU'))
print(tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# set_check_gpu()
from sklearn.model_selection import train_test_split
review_data = pd.read_csv('../data/amazon_reviews_us_Shoes_v1_00_help_voted_And_cut_lognTail.csv')
review_data.rename(columns={ 'star_rating': 'score','customer_id': 'user_id', 'user': 'user_name'}, inplace=True)
items = review_data.product_id.unique()
item_map = {i:val for i,val in enumerate(items)}
inverse_item_map = {val:i for i,val in enumerate(items)}
review_data["old_item_id"] = review_data["product_id"] # copying for join with metadata
review_data["item_id"] = review_data["product_id"].map(inverse_item_map)
items = review_data.item_id.unique()
print ("We have %d unique items in metadata "%items.shape[0])
users = review_data.user_id.unique()
user_map = {i:val for i,val in enumerate(users)}
inverse_user_map = {val:i for i,val in enumerate(users)}
review_data["old_user_id"] = review_data["user_id"]
review_data["user_id"] = review_data["user_id"].map(inverse_user_map)
items_reviewed = review_data.product_id.unique()
review_data["old_item_id"] = review_data["product_id"] # copying for join with metadata
review_data["item_id"] = review_data["product_id"].map(inverse_item_map)
items_reviewed = review_data.item_id.unique()
users = review_data.user_id.unique()
helpful_votes = review_data.helpful_votes.unique()
ratings_train, ratings_test = train_test_split( review_data, test_size=0.1, random_state=0)
ratings_test.user_id.value_counts().sort_values(ascending=False).head(10)
```
### ** Create a recommendation example dataset of 100 users from the test set and all items for each and predict recommendations for them
```
items = review_data.product_id.unique()
item_map = {i:val for i,val in enumerate(items)}
inverse_item_map = {val:i for i,val in enumerate(items)}
review_data["old_item_id"] = review_data["product_id"] # copying for join with metadata
review_data["item_id"] = review_data["product_id"].map(inverse_item_map)
items = review_data.item_id.unique()
print ("We have %d unique items in metadata "%items.shape[0])
# all_info['description'] = all_info['description'].fillna(all_info['title'].fillna('no_data'))
# all_info['title'] = all_info['title'].fillna(all_info['description'].fillna('no_data').apply(str).str[:20])
# all_info['image'] = all_info['image'].fillna('no_data')
# all_info['price'] = pd.to_numeric(all_info['price'],errors="coerce")
# all_info['price'] = all_info['price'].fillna(all_info['price'].median())
review_data.head(n=2)
type(review_data['product_id'].unique())
# creating metadata mappings
titles = review_data['product_title'].unique()
titles_map = {i:val for i,val in enumerate(titles)}
inverse_titles_map = {val:i for i,val in enumerate(titles)}
# price = review_data['price'].unique()
# price_map = {i:val for i,val in enumerate(price)}
# inverse_price_map = {val:i for i,val in enumerate(price)}
# print ("We have %d prices" %price.shape)
print ("We have %d titles" %titles.shape)
# all_info['price_id'] = all_info['price'].map(inverse_price_map)
review_data['title_id'] = review_data['product_title'].map(inverse_titles_map)
# creating dict from
item2prices = {}
# for val in review_data[['item_id','price_id']].dropna().drop_duplicates().iterrows():
# item2prices[val[1]["item_id"]] = val[1]["price_id"]
item2titles = {}
# for val in all_info[['item_id','title_id']].dropna().drop_duplicates().iterrows():
# item2titles[val[1]["item_id"]] = val[1]["title_id"]
# populating the rating dataset with item metadata info
# ratings_train["price_id"] = ratings_train["item_id"].map(lambda x : item2prices[x])
# ratings_train["title_id"] = ratings_train["item_id"].map(lambda x : item2titles[x])
# populating the test dataset with item metadata info
# ratings_test["price_id"] = ratings_test["item_id"].map(lambda x : item2prices[x])
# ratings_test["title_id"] = ratings_test["item_id"].map(lambda x : item2titles[x])
# ratings_test = pd.read_parquet('./data/ratings_test.parquet')
# ratings_train = pd.read_parquet('./data/ratings_train.parquet')
review_data.columns
```
### Select products
#### - use ALL product now.
```
items = review_data.item_id.unique()
df_items = pd.DataFrame(data=items.flatten(),columns=['item_id'])
df_items = pd.merge(df_items,review_data,how='left',left_on=('item_id'),right_on=('item_id'))
### use all products
# df_items= df_items.sample(100)
df_items['key'] = 1
print ("We have %d unique items "%df_items['item_id'].shape[0])
# df_items= df_items[['item_id', 'description', 'category', 'title', 'title_id', 'price', 'price_id', 'brand', 'key']]
df_items= df_items[['item_id', 'product_id', 'score', 'product_title', 'helpful_votes', 'old_item_id', 'old_user_id', 'title_id', 'key']]
print(df_items.shape)
df_items.head(2)
```
### Select 100 users
```
users = ratings_test.user_id.unique()
df_users = pd.DataFrame(data=users.flatten(),columns=['user_id'])
df_users = pd.merge(df_users,ratings_test,how='left',left_on=('user_id'),right_on=('user_id'))
### Select 100 users
df_users= df_users.sample(100)
df_users['key'] = 1
print ("We have %d unique users "%df_users['user_id'].shape[0])
df_users= df_users[['user_id', 'key']]
print(df_users.shape)
df_users.head(2)
```
## Merge users and item and items metadata
```
df_unseenData= pd.merge(df_users, df_items, on='key')
del df_unseenData['key']
print ("We have %d unique records in the recommendation example dataset "%df_unseenData.shape[0])
print(df_unseenData.shape)
df_unseenData.sample(10)
df_unseenData.columns
df_unseenData
from os import listdir
from os.path import isfile, join
mypath = '../models'
onlyfiles = [f.replace('.h5', '') for f in listdir(mypath) if isfile(join(mypath, f))]
onlyfiles
```
## Predict the ratings for the items and users in the a recommendation example dataset:
### - dense_5_Multiply_50_embeddings_10_epochs_dropout
```
load_path = "../models/"
# models =['dense_1_Multiply_50_embeddings_4_epochs_dropout',
# 'dense_5_Multiply_50_embeddings_10_epochs_dropout',
# 'matrix_facto_10_embeddings_100_epochs',
# 'dense_1_Multiply_50_embeddings_100_epochs_dropout']
# select the best model
models =[
'dense_5_Multiply_50_embeddings_10_epochs_dropout'
]
# models_meta = [
# 'dense_5_Meta_Multiply_50_embeddings_10_epochs_dropout',
# ]
# for mod in models:
# model = load_model(load_path+mod+'.h5')
# df_unseenData['preds_' + mod] = model.predict([df_unseenData['user_id'],
# df_unseenData['item_id'],
# df_unseenData['price_id'],
# df_unseenData['title_id']])
for mod in models:
model = load_model(load_path+mod+'.h5')
df_unseenData['preds_' + mod] = model.predict([df_unseenData['user_id'],
df_unseenData['item_id']])
df_unseenData.head(2)
# df_unseenData.sort_values(by=['preds_dense_5_Multiply_50_embeddings_10_epochs_dropout', 'user_id'], ascending=False)
df_unseenData['user_id'].head(n=2)
df_unseenData.columns
df_unseenData.shape
```
## Check which users exist on the example set
```
# df_unseenData.T
df_unseenData.user_id.value_counts().sort_values(ascending=False).head(5)
df_unseenData[['user_id','preds_dense_5_Multiply_50_embeddings_10_epochs_dropout']].sort_values('preds_dense_5_Multiply_50_embeddings_10_epochs_dropout',ascending=True).head(5)
```
## A function that will return recommendation list for a given user
```
df_unseenData.tail(n=3)
load_path = "../models/"
def get_recommendations(userID , model_scr, df_Data):
if userID not in df_Data['user_id'].values:
print("\nUser ID not found %d" %userID)
return userID
# print("\nRecommendations for user id %d Name: %s is:" % (userID, df_Data.loc[df_Data['user_id'] == userID, 'user_name'].values[0]))
df_output=df_Data.loc[df_Data['user_id'] == userID][['item_id','product_title','helpful_votes', model_scr,
]].sort_values(model_scr,ascending=False).set_index('item_id')
# print(df_output)
df_output.rename(columns={model_scr: 'score'}, inplace=True)
df_output = df_output.sort_values(by=['score'], ascending=False)
#add ASIN form item_id
# df_output['product_id'] = df_Data['item_id'].apply(item_map)
return df_output
```
### Recommend items to a given user
- Using dense_5_Multiply_50_embeddings_10_epochs_dropout
```
df_unseenData.columns
####### User ID: 63008
df_output = get_recommendations(userID=63008,
model_scr='preds_dense_5_Multiply_50_embeddings_10_epochs_dropout',
df_Data=df_unseenData)
print(df_output.shape)
df_output = df_output.drop_duplicates(subset='product_title')
print(df_output.shape)
df_output.head(10)
```
## Make predictions for another user using another model:
```
####### User ID
user_id = 26406
df_output = get_recommendations(userID=user_id,
model_scr='preds_dense_5_Multiply_50_embeddings_10_epochs_dropout',
df_Data=df_unseenData)
print(df_output.shape)
df_output = df_output.drop_duplicates(subset='product_title')
print(df_output.shape)
df_output.head(10)
# df_output.columns
df_output = df_output.reset_index()
df_output['user_id'] = user_id
df_output['asin'] = df_output['item_id'].apply(lambda x : item_map[x])
df_output['url'] = df_output['item_id'].apply(lambda x : 'https://www.amazon.com/dp/'+item_map[x])
df_output = df_output[['user_id','item_id', 'score', 'asin', 'url', 'product_title']]
df_output.head()
unseenUser_list = df_unseenData['user_id'].values
print(len(unseenUser_list))
len(unique_reviewer)
```
### select 100 users from unseen data
```
unique_reviewer = list(set(unseenUser_list.tolist()))
print("total number of users: ", len(unique_reviewer))
all_predicted_df = pd.DataFrame()
for user_id in unique_reviewer:
print("selected 100 user_id:", user_id)
df_output = get_recommendations(userID=user_id ,model_scr='preds_dense_5_Multiply_50_embeddings_10_epochs_dropout',df_Data=df_unseenData)
df_output = df_output.reset_index()
df_output['user_id'] = user_id
df_output['asin'] = df_output['item_id'].apply(lambda x : item_map[x])
df_output['url'] = df_output['item_id'].apply(lambda x : 'https://www.amazon.com/dp/'+item_map[x])
df_output = df_output[['user_id','item_id', 'score', 'asin', 'url', 'product_title']]
df_output = df_output.sort_values(by=['score'], ascending=False)
# print(df_output.shape)
df_output = df_output.drop_duplicates(subset='product_title')
# print(df_output.shape)
####### select top product pre user
df_output = df_output.head(n=50)
#concat
all_predicted_df = all_predicted_df.append(df_output)
# reset index
all_predicted_df = all_predicted_df.reset_index(drop=True)
# all_predicted_df
all_predicted_df.shape
all_predicted_df.columns
# all_predicted_df = all_predicted_df.drop_duplicates()
# all_predicted_df = all_predicted_df.reset_index()
# all_predicted_df.drop(columns=['index'])
all_predicted_df.shape
#Shoes_for_100_users_per_20_products_prediction_Ver2.csv
# all_predicted_df.to_csv('Shoes_for_100_users_per_20_products_prediction_Ver3.csv', header=True, index=False)
# Shoes_for_100_users_per_100_products_prediction_Ver2
# all_predicted_df.to_csv('Shoes_for_100_users_per_100_products_prediction_Ver3.csv', header=True, index=False)
#Shoes_for_100_users_per_50_products_prediction_Ver2.csv
all_predicted_df.to_csv('Shoes_for_100_users_per_50_products_prediction_Ver3.csv', header=True, index=False)
#Shoes_for_100_users_per_ALL_products_prediction_Ver2.csv
# all_predicted_df.to_csv('Shoes_for_100_users_per_ALL_products_prediction_Ver3.csv', header=True, index=False)
#Shoes_for_ALL_users_per_ALL_products_prediction_Ver2.csv
# all_predicted_df.to_csv('Shoes_for_ALL_users_per_ALL_products_prediction_Ver3.csv', header=True, index=False)
# !aws s3 cp Shoes_for_ALL_users_per_ALL_products_prediction_Ver2.csv s3://dse-cohort5-group1/3-Keras-DeepRecommender-for-Shoes/predictions/Shoes_for_ALL_users_per_ALL_products_prediction_Ver2.csv
```
| github_jupyter |
# About
Welcome to the functionality examples notebook. This notebook is only intended for local use: it's a place to try out and explore the `henchman` api without worrying about what will render in html on github or in the docs.
```
import pandas as pd
import featuretools as ft
es = ft.demo.load_retail()
cutoff_times = pd.read_csv('../../../../Downloads/predict_may_sales.csv')[['customer_id', 'cutoff_time', 'total']]
cutoff_times['cutoff_time'] = pd.to_datetime(cutoff_times['cutoff_time'])
fm, features = ft.dfs(entityset=es, target_entity='customers', cutoff_time=cutoff_times, verbose=True)
es
```
# Diagnostics
```
from henchman.diagnostics import overview, warnings, column_report, profile
overview(es['order_products'].df)
column_report(es['order_products'].df)
warnings(fm)
```
# Plotting
```
from henchman.plotting import show
from henchman.plotting import (feature_importances, histogram, piechart, scatter, timeseries)
show(piechart(es['orders'].df['cancelled']), title='Cancelled Orders')
show(piechart(es['orders'].df['country'], mergepast=10), height=400, width=500)
show(timeseries(es['customers'].df['first_orders_time'], es['customers'].df['customer_id'],
n_bins=20, aggregate='count'),
width=900, height=300)
show(timeseries(es['order_products'].df['order_date'], es['order_products'].df['total'],
aggregate='sum', n_bins=12),
width=900, height=300)
show(scatter(es['orders'].df['cancelled'], es['orders'].df['cancelled'],
agg=es['orders'].df['country'], hover=True, aggregate='mean'),
title='Cancelled by country', x_axis='Cancelled', y_axis='Cancelled', height=300, width=300)
```
# Selection
```
from henchman.selection import RandomSelect, Dendrogram
from henchman.learning import inplace_encoder
X = inplace_encoder(fm.copy())
y = X.pop('total')
y = y > 1000
selector_1 = RandomSelect(n_feats=10)
selector_1.fit(X)
selector_1.transform(X).head()
selector_2 = Dendrogram(X, max_threshes=500)
from henchman.plotting import dendrogram
show(dendrogram(selector_2))
selector_2._shuffle_all_representatives()
X_p = selector_2.transform(X, n_feats=80)
X_p.head()
warnings(X_p)
from henchman.learning import inplace_encoder, create_holdout, create_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
import numpy as np
splits = 5
scores, fit_model = create_model(X, y, RandomForestClassifier(), roc_auc_score, n_splits=splits)
print('Average score of {:.2f} over {} splits (stdev {:.3f})'.format(np.mean(scores), splits, np.std(scores)))
scores, fit_model2 = create_model(X_p, y, RandomForestClassifier(), roc_auc_score, n_splits=splits)
print('Average score of {:.2f} over {} splits (stdev {:.3f})'.format(np.mean(scores), splits, np.std(scores)))
show(feature_importances(X_p, fit_model2, n_feats=10), height=300)
show(histogram(X['MAX(orders.SUM(order_products.total))'], y, col_max=5000))
from henchman.plotting import roc_auc
show(roc_auc(X_p, y, RandomForestClassifier(), n_splits=splits), height=400, width=400)
from henchman.plotting import f1
show(f1(X_p, y, RandomForestClassifier(), n_splits=splits), height=400, width=400)
import numpy as np
np.__version__
pd.__version__
```
| github_jupyter |
# Training Neural Networks
The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.
<img src="assets/function_approx.png" width=500px>
At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.
To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems
$$
\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}
$$
where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.
By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base.
<img src='assets/gradient_descent.png' width=350px>
## Backpropagation
For single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.
Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.
<img src='assets/backprop_diagram.png' width=550px>
In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.
To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.
$$
\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}
$$
**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.
We update our weights using this gradient with some learning rate $\alpha$.
$$
\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}
$$
The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum.
## Losses in PyTorch
Let's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.
Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss),
> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.
>
> The input is expected to contain scores for each class.
This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
```
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
```
### Note
If you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
```
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
```
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss)).
>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
```
## Solution
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# Define the loss
criterion = nn.NLLLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our log-probabilities
logps = model(images)
# Calculate the loss with the logps and the labels
loss = criterion(logps, labels)
print(loss)
```
## Autograd
Now that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.
You can turn off gradients for a block of code with the `torch.no_grad()` content:
```python
x = torch.zeros(1, requires_grad=True)
>>> with torch.no_grad():
... y = x * 2
>>> y.requires_grad
False
```
Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.
The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
```
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
```
Below we can see the operation that created `y`, a power operation `PowBackward0`.
```
## grad_fn shows the function that generated this variable
print(y.grad_fn)
```
The autgrad module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
```
z = y.mean()
print(z)
```
You can check the gradients for `x` and `y` but they are empty currently.
```
print(x.grad)
```
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`
$$
\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}
$$
```
z.backward()
print(x.grad)
print(x/2)
```
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step.
## Loss and Autograd together
When we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
```
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
images.shape
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
```
## Training the network!
There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
```
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
```
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:
* Make a forward pass through the network
* Use the network output to calculate the loss
* Perform a backward pass through the network with `loss.backward()` to calculate the gradients
* Take a step with the optimizer to update the weights
Below I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
```
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
```
### Training for real
Now we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.
>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
```
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
# batched GD
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
# Take an optimizer step.
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
```
With the network trained, we can check out it's predictions.
```
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
```
Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.
| github_jupyter |
```
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
DEVICE = "cpu"
# if torch.cuda.is_available():
# DEVICE = "cuda"
DEVICE
class Memory(nn.Module):
def __init__(self, N, M):
super().__init__()
self.N = N
self.M = M
self.size = [self.N, self.M]
self.register_buffer("memory_bias", torch.Tensor(N, M))
stdev = 1 / (np.sqrt(N + M))
nn.init.uniform_(self.memory_bias, -stdev, stdev)
def reset(self, batch_size=1):
self.batch_size = batch_size
self.memory = self.memory_bias.clone().repeat(batch_size, 1, 1)
def read(self, w):
data = torch.matmul(w.unsqueeze(1), self.memory).squeeze(1)
return data
def write(self, w, e_gate, a_gate):
self.flashback = self.memory
self.memory = torch.Tensor(self.batch_size, self.N, self.M)
erase = torch.matmul(w.unsqueeze(-1), e_gate.unsqueeze(1))
add = torch.matmul(w.unsqueeze(-1), a_gate.unsqueeze(1))
self.memory = self.flashback * (1 - erase) + add
def address(self, k, b, g, s, y, w_prev):
wc = self._similarity(k, b)
wg = self._interpolate(w_prev, wc, g)
w_ = self._shift(wg, s)
w = self._sharpen(w_, y)
return w
def _similarity(self, k, b):
k = k.view(self.batch_size, 1, -1)
similarity = F.cosine_similarity(self.memory + 1e-16, k + 1e-16, dim=-1)
content_weight = F.softmax(b * similarity, dim=1)
return content_weight
def _interpolate(self, w_prev, wc, g):
focus = g * wc + (1 - g) * w_prev
return focus
def _shift(self, wg, s):
shift = torch.zeros(wg.size())
for batch in range(self.batch_size):
shift[batch] = _convolve(wg[batch], s[batch])
return shift
def _sharpen(self, w_, y):
w = w_ ** y
w = torch.div(w, torch.sum(w, dim=1).view(-1, 1) + 1e-16)
return w
def _convolve(w, s):
t = torch.cat([w[-1:], w, w[:1]])
c = F.conv1d(t.view(1, 1, -1), s.view(1, 1, -1)).view(-1)
return c
class ReadHead(nn.Module):
def __init__(self, memory, controller_size):
super().__init__()
self.memory = memory
self.N, self.M = self.memory.size
self.controller_size = controller_size
self.key = nn.Linear(self.controller_size, self.M)
self.key_strength = nn.Linear(self.controller_size, 1)
self.interpolation_gate = nn.Linear(self.controller_size, 1)
self.shift_weighting = nn.Linear(self.controller_size, 3)
self.sharpen_factor = nn.Linear(self.controller_size, 1)
self.is_read_head = True
self.reset()
def _address(self, k, b, g, s, y, w_prev):
k = k.clone()
b = F.softplus(b)
g = torch.sigmoid(g)
s = torch.softmax(s, dim=1)
y = 1 + F.softplus(y)
w = self.memory.address(k, b, g, s, y, w_prev)
return w
def forward(self, controller_state, w_prev):
k = self.key(controller_state)
b = self.key_strength(controller_state)
g = self.interpolation_gate(controller_state)
s = self.shift_weighting(controller_state)
y = self.sharpen_factor(controller_state)
w = self._address(k, b, g, s, y, w_prev)
data = self.memory.read(w)
return data, w
def create_new_state(self, batch_size):
return torch.zeros(batch_size, self.N)
def reset(self):
nn.init.xavier_uniform_(self.key.weight, gain=1.4)
nn.init.xavier_uniform_(self.key_strength.weight, gain=1.4)
nn.init.xavier_uniform_(self.interpolation_gate.weight, gain=1.4)
nn.init.xavier_uniform_(self.shift_weighting.weight, gain=1.4)
nn.init.xavier_uniform_(self.sharpen_factor.weight, gain=1.4)
nn.init.normal_(self.key.bias, std=0.01)
nn.init.normal_(self.key_strength.bias, std=0.01)
nn.init.normal_(self.interpolation_gate.bias, std=0.01)
nn.init.normal_(self.shift_weighting.bias, std=0.01)
nn.init.normal_(self.sharpen_factor.bias, std=0.01)
class WriteHead(nn.Module):
def __init__(self, memory, controller_size):
super().__init__()
self.memory = memory
self.N, self.M = self.memory.size
self.controller_size = controller_size
self.key = nn.Linear(self.controller_size, self.M)
self.key_strength = nn.Linear(self.controller_size, 1)
self.interpolation_gate = nn.Linear(self.controller_size, 1)
self.shift_weighting = nn.Linear(self.controller_size, 3)
self.sharpen_factor = nn.Linear(self.controller_size, 1)
self.erase = nn.Linear(self.controller_size, self.M)
self.add = nn.Linear(self.controller_size, self.M)
self.is_read_head = False
self.reset()
def _address(self, k, b, g, s, y, w_prev):
k = k.clone()
b = F.softplus(b)
g = torch.sigmoid(g)
s = torch.softmax(s, dim=1)
y = 1 + F.softplus(y)
w = self.memory.address(k, b, g, s, y, w_prev)
return w
def forward(self, controller_state, w_prev):
k = self.key(controller_state)
b = self.key_strength(controller_state)
g = self.interpolation_gate(controller_state)
s = self.shift_weighting(controller_state)
y = self.sharpen_factor(controller_state)
e = self.erase(controller_state)
a = self.add(controller_state)
e = torch.sigmoid(e)
w = self._address(k, b, g, s, y, w_prev)
self.memory.write(w, e, a)
return w
def create_new_state(self, batch_size):
return torch.zeros(batch_size, self.N)
def reset(self):
nn.init.xavier_uniform_(self.key.weight, gain=1.4)
nn.init.xavier_uniform_(self.key_strength.weight, gain=1.4)
nn.init.xavier_uniform_(self.interpolation_gate.weight, gain=1.4)
nn.init.xavier_uniform_(self.shift_weighting.weight, gain=1.4)
nn.init.xavier_uniform_(self.sharpen_factor.weight, gain=1.4)
nn.init.xavier_uniform_(self.erase.weight, gain=1.4)
nn.init.xavier_uniform_(self.add.weight, gain=1.4)
nn.init.normal_(self.key.bias, std=0.01)
nn.init.normal_(self.key_strength.bias, std=0.01)
nn.init.normal_(self.interpolation_gate.bias, std=0.01)
nn.init.normal_(self.shift_weighting.bias, std=0.01)
nn.init.normal_(self.sharpen_factor.bias, std=0.01)
nn.init.normal_(self.erase.bias, std=0.01)
nn.init.normal_(self.add.bias, std=0.01)
class Controller(nn.Module):
def __init__(self, no_input, no_output, no_layer):
super().__init__()
self.no_input = no_input
self.no_output = no_output
self.no_layer = no_layer
self.size = [self.no_input, self.no_output]
self.lstm = nn.LSTM(input_size =self.no_input,
hidden_size=self.no_output,
num_layers = self.no_layer)
self.h_bias = nn.Parameter(torch.randn(self.no_layer, 1, self.no_output) * 0.05)
self.c_bias = nn.Parameter(torch.randn(self.no_layer, 1, self.no_output) * 0.05)
self.reset()
def forward(self, data, prev_state):
data = data.unsqueeze(0)
output, state = self.lstm(data, prev_state)
return output.squeeze(0), state
def create_new_state(self, batch_size):
h = self.h_bias.clone().repeat(1, batch_size, 1)
c = self.c_bias.clone().repeat(1, batch_size, 1)
return h, c
def reset(self):
for param in self.lstm.parameters():
if param.dim()==1:
nn.init.constant_(param, 0)
else:
stdev = 1 / (np.sqrt(self.no_input + self.no_output))
nn.init.uniform_(param, -stdev, stdev)
class NTM(nn.Module):
def __init__(self, no_input, no_output, controller_size, controller_layer, no_head, N, M):
super().__init__()
self.no_input = no_input
self.no_output = no_output
self.controller_size = controller_size
self.controller_layer = controller_layer
self.no_head = no_head
self.N = N
self.M = M
self.memory = Memory(self.N, self.M)
self.controller = Controller(self.no_input + (self.M * self.no_head), self.controller_size, self.controller_layer)
self.head = nn.ModuleList([])
_, self.controller_size = self.controller.size
for head_no in range(self.no_head):
self.head += [
ReadHead(self.memory, self.controller_size),
WriteHead(self.memory, self.controller_size)
]
self.no_read_head = 0
self.read = []
for head in self.head:
if head.is_read_head:
read_bias = torch.randn(1, self.M) * 0.01
self.register_buffer("read{}_bias".format(self.no_read_head), read_bias.data)
self.read += [read_bias]
self.no_read_head += 1
self.fc = nn.Linear(self.controller_size + self.no_read_head * self.M, self.no_output)
self.reset()
def create_new_state(self, batch_size):
read = [r.clone().repeat(batch_size, 1) for r in self.read]
controller_state = self.controller.create_new_state(batch_size)
head_state = [head.create_new_state(batch_size) for head in self.head]
return read, controller_state, head_state
def init_sequence(self, batch_size):
self.batch_size = batch_size
self.memory.reset(batch_size)
self.previous_state = self.create_new_state(batch_size)
def forward(self, x=None):
if x is None:
x = torch.zeros(self.batch_size, self.no_input)
prev_read, prev_controller_state, prev_head_state = self.previous_state
inp = torch.cat([x] + prev_read, dim=1)
controller_output, controller_state = self.controller(inp, prev_controller_state)
reads = []
head_state = []
for head, prev_head_state in zip(self.head, prev_head_state):
if head.is_read_head:
r, h_state = head(controller_output, prev_head_state)
reads += [r]
else:
h_state = head(controller_output, prev_head_state)
head_state += [h_state]
out = torch.cat([controller_output] + reads, dim=1)
out = torch.sigmoid(self.fc(out))
self.previous_state = (reads, controller_state, head_state)
return out, self.previous_state
def reset(self):
nn.init.xavier_uniform_(self.fc.weight, gain=1)
nn.init.normal_(self.fc.bias, std=0.01)
def no_param(self):
no_param = 0
for param in self.parameters():
no_param += param.data.view(-1).size(0)
return no_param
def dataloader(no_batch, batch_size, seq_width, min_len, max_len):
for batch_no in range(no_batch):
seq_len = np.random.randint(min_len, max_len)
seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))
seq = torch.from_numpy(seq)
inp = torch.zeros(seq_len+1, batch_size, seq_width+1)
inp[:seq_len, :, :seq_width] = seq
inp[seq_len, :, seq_width] = 1
out = seq.clone()
yield batch_no+1, inp.float(), out.float()
no_input = 9
no_output = 8
controller_size = 100
controller_layer = 1
no_head = 1
N = 128
M = 20
COPIER = NTM(no_input=no_input, no_output=no_output, controller_size=controller_size, controller_layer=controller_layer, no_head=no_head, N=N, M=M).to(DEVICE)
def progress_clean():
"""Clean the progress bar."""
print("\r{}".format(" " * 80), end='\r')
def progress_bar(batch_num, report_interval, last_loss):
"""Prints the progress until the next report."""
progress = (((batch_num-1) % report_interval) + 1) / report_interval
fill = int(progress * 40)
print("\r[{}{}]: {} (Loss: {:.4f})".format(
"=" * fill, " " * (40 - fill), batch_num, last_loss), end='')
def save_checkpoint(net, name, args, batch_num, losses, costs, seq_lengths):
progress_clean()
basename = "{}/{}-{}-batch-{}".format(args.checkpoint_path, name, args.seed, batch_num)
model_fname = basename + ".model"
LOGGER.info("Saving model checkpoint to: '%s'", model_fname)
torch.save(net.state_dict(), model_fname)
# Save the training history
train_fname = basename + ".json"
LOGGER.info("Saving model training history to '%s'", train_fname)
content = {
"loss": losses,
"cost": costs,
"seq_lengths": seq_lengths
}
open(train_fname, 'wt').write(json.dumps(content))
def clip_grads(net):
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, net.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
import logging
import time
LOGGER = logging.getLogger(__name__)
def get_ms():
"""Returns the current time in miliseconds."""
return time.time() * 1000
no_batch = 50000
batch_size = 1
loss = nn.BCELoss()
optimizer = torch.optim.RMSprop(COPIER.parameters(), momentum=0.9, alpha=0.95, lr=1e-4)
errors = []
costs = []
seq_length = []
start_ms = get_ms()
for batch_no, x, y in dataloader(no_batch=no_batch, batch_size=batch_size, seq_width=8, min_len=1, max_len=20):
optimizer.zero_grad()
inp_seq_len = x.size(0)
out_seq_len = y.size(0)
LOGGER.info("Training model for %d batches (batch_size=%d)...",
no_batch, batch_size)
COPIER.init_sequence(batch_size)
for i in range(inp_seq_len):
COPIER(x[i])
y_ = torch.zeros(y.size())
for i in range(out_seq_len):
y_[i], _ = COPIER()
error = loss(y_, y)
error.backward()
clip_grads(COPIER)
optimizer.step()
y_binarized = y_.clone().data
y_binarized.apply_(lambda x: 0 if x < 0.5 else 1)
cost = torch.sum(torch.abs(y_binarized - y.data))
errors.append(error.item())
costs.append(cost.item()/batch_size)
seq_length += [y.size(0)]
progress_bar(batch_no, 200, error)
# Report
if batch_no % 200 == 0:
mean_loss = np.array(errors[-200:]).mean()
mean_cost = np.array(costs[-200:]).mean()
mean_time = int(((get_ms() - start_ms) / 200) / batch_size)
progress_clean()
print("Mean Time: {} ms".format(mean_time))
print("Mean Cost: {}".format(mean_cost))
print("Mean Loss: {}".format(mean_loss))
print("=====================================")
LOGGER.info("Batch %d Loss: %.6f Cost: %.2f Time: %d ms/sequence",
batch_no, mean_loss, mean_cost, mean_time)
start_ms = get_ms()
# # Checkpoint
# if (1000 != 0) and (batch_no % 1000 == 0):
# save_checkpoint(copier, "copier"+str(batch_no), args,
# batch_0, losses, costs, seq_lengths)
```
# Network has successfully learnt to copy memory elements
| github_jupyter |
<a href="https://colab.research.google.com/github/reic/colab_python/blob/main/crawler.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 網路爬蟲與多執行緒練習
未穩定的版本
```
#@title UU看書 專用(多執行緖)
#@markdown 還在修正的程式,可以直接從這一個區塊執行
import requests
import os,re
from bs4 import BeautifulSoup
import concurrent.futures
try:
os.mkdir("/content/tmp")
except:
print("目錄已存在")
os.chdir("/content/tmp")
os.system("rm -fr *")
def get_html(urls):
[title,art_url]=urls
art_id=art_url[art_url.rfind("/")+1:-5]
soup=BeautifulSoup(requests.get(art_url).text)
content=soup.find(name="div",id='contentbox')
print(art_id)
text=f"{title}\n\n"
context=str(content).replace("\n","").replace("\r","")
context=context.replace("\xa0\xa0\xa0\xa0","")
context=re.sub('<div class="ad_content".*?</div>','',context)
context=context.replace("<br/>","\n").replace("</p>","\n")
context=re.sub('<.*?>',"",context).split("\n")
context=[itm.strip() for itm in context if len(itm)>0]
text+="\n\n".join(context)+"\n\n"
with open(f"{art_id}.txt",mode="w",encoding="utf-8") as f:
f.write(text)
#@markdown 書籍目錄網址
url="https://tw.uukanshu.com/b/107806/" #@param {type:'string'}
sites=url[:url.find("/",8)]
reg=requests.get(url)
# soup=BeautifulSoup(reg.text,"html.parser")
soup=BeautifulSoup(reg.text)
output_name=soup.find("h2").getText()
articles=soup.find(name="ul",id="chapterList").find_all("a")
links=[]
# len(articles)
for i in articles:
href=i.get("href")
links.append([i.get("title"),f"{sites}{href}"])
links.sort(key=lambda x: x[1])
# 同時建立及啟用10個執行緒
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
executor.map(get_html, links)
output_name=soup.find("h2").getText()
files_text=os.listdir()
files_text=[file for file in files_text if file.endswith(".txt")]
# 檔案排序,需要考慮 檔案名稱長短不一的問題,問前是透過數字的處理
files_text.sort(key=lambda x:int(x[:-4]))
with open(f"../{output_name}.txt","w",encoding='utf-8') as f:
for file in files_text:
with open(file,"r") as f2:
f.write(f2.read())
from google.colab import files
files.download('../{}.txt'.format(output_name))
```
# 效率
透過下述的方法,合併檔案,因為輸出檔需要被反覆的開始太多次,隨著檔案大小逐漸增加。讓效能下跌
```python
for file in files:
os.system("cat {}>> ../{}.txt".format(file,output_name))
```
若改用下述的方法, output 檔,只需要開啟一次。可以大大縮短時間。
```python
with open(f"../{output_name}.txt","w",encoding='utf-8') as f:
for file in files_text:
with open(file,"r") as f2:
f.write(f2.read())
```
```
#@title 品書閣 專用(多執行緖)
#@markdown 還在修正的程式,可以直接從這一個區塊執行
import requests
import os,re
from bs4 import BeautifulSoup
import concurrent.futures
try:
os.mkdir("/content/tmp")
except:
print("目錄已存在")
os.chdir("/content/tmp")
os.system("rm -fr *")
stop_word="請大家收藏"
def get_html(arr):
[titles,links]=arr
art_id=links[links.rfind("/")+1:-5]
soup=BeautifulSoup(requests.get(links).text)
content=soup.find(name="div",id='content').find_all("p")
text=f"{titles}\n"
for itm in content[1:]:
txt=itm.getText()
if "一秒記住" in txt:
continue
if "點下一章繼續閱讀" in txt:
text+=f"{txt}\n"
break
if stop_word in txt:
break
# text+="%s \n" %txt
text+=f"{txt}\n"
# print(art_id)
next_article=BeautifulSoup(requests.get(links).text).find("a",text="下一章").get("href")
if art_id in next_article:
soup2=BeautifulSoup(requests.get(f"{sites}{next_article}").text)
content2=soup2.find(name="div",id="content").find_all("p")
print("***",next_article)
for itm2 in content2:
txt2=itm2.getText()
if "一秒記住" in txt2:
continue
if stop_word in txt2:
break
text+=f"{txt2}\n"
with open(f"{art_id}.txt",mode="w",encoding="utf-8") as f:
f.write(text)
#@markdown 書籍目錄網址
url="https://tw.pinsuge.com/index/85767.html" #@param {type:'string'}
sites=url[:url.find("/",8)]
req = requests.get(url)
soup=BeautifulSoup(req.text)
articles=soup.find_all("a")
links=[]
titles=[]
for itm in articles[1:]:
if not itm.get("title"):
continue
links.append([itm.get("title",),"{}{}".format(sites,itm.get("href"))])
# 同時建立及啟用10個執行緒
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
executor.map(get_html, links)
files_text=os.listdir()
files_text=[file for file in files_text if file.endswith(".txt")]
# 檔案排序,需要考慮 檔案名稱長短不一的問題,問前是透過數字的處理
files_text.sort(key=lambda x:int(x[:-4]))
with open(f"../{output_name}.txt","w",encoding='utf-8') as f:
for file in files_text:
with open(file,"r") as f2:
f.write(f2.read())
from google.colab import files
files.download('../{}.txt'.format(output_name))
```
# 參考資料
```
#@title BeauttifulSoup 練習
#@markdown extract, decompose 的練習
html_doc = """<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were <script>.lkjlkjlj\n\r
我是\n</script> <i> hello world</i>
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup=BeautifulSoup(html_doc,"lxml")
txt=soup.find("p","story")
print(str(txt))
print("".center(100,"-"))
txt.script.decompose()
txt.a.extract()
txt.a.extract()
print(str(txt))
#@title 品書閣 (舊) 單執行緖版本
#@markdown 查看目錄=>章節列表
import requests
import os,re,time
from bs4 import BeautifulSoup
import concurrent.futures
try:
os.mkdir("/content/tmp")
except:
print("目錄已存在")
os.chdir("/content/tmp")
os.system("rm -fr *")
url="https://tw.pinsuge.com/index/85767.html" #@param {type:'string'}
sites=url[:url.find("/",8)]
req = requests.get(url)
#@markdown 在 stop_word 之後為相關的廣告內容
stop_word="請大家收藏" #@param {type:'string'}
#@markdown 若章節分成兩個頁面的處理字元
next_word="點下一章繼續" #@param {type:'string'}
soup=BeautifulSoup(req.text)
soup
articles=soup.find_all(name="a")
output_name=soup.find("h1").getText()
links=[]
for itm in articles[1:]:
if not itm.get("title"):
continue
links.append([itm.get("title",),"{}{}".format(sites,itm.get("href"))])
def get_content(content):
to_text=''
for itm in content[:-1]:
txt=itm.getText()
if "一秒記住" in txt:
continue
if stop_word in txt:
break
to_text+="%s \n" %txt
return to_text
def get_soup(url):
return BeautifulSoup(requests.get(url).text)
def cont_chapter_check(soup,text):
return (len(soup.find_all(name="p",string=re.compile(text))) >0)
def cont_chapter_link(soup):
return soup.find("a",text="下一章").get("href")
index=1
star=time.time()
for link in links:
soup=get_soup(link[1])
to_text=''
to_text+="%s \n" %link[0]
print(link[0])
to_text+=get_content(soup.find(name="div",id="content").find_all(name="p"))
if cont_chapter_check(soup,next_word):
reg2=requests.get("{}{}".format(sites,cont_chapter_link(soup)))
to_text+=get_content(BeautifulSoup(reg2.text).find(name="div",id="content").find_all(name="p"))
# print(to_text)
with open("%03d.txt" % index,mode="w",encoding="utf-8") as f:
f.write(to_text)
index+=1
files=os.listdir()
files=[file for file in files if file.endswith(".txt")]
files.sort()
files
for file in files:
os.system("cat {}>> ../{}.txt".format(file,output_name))
from google.colab import files
files.download('../{}.txt'.format(output_name))
end=time.time()
print("經過了 {end-time} 秒")
#@title 多執行序參考程式範例
from bs4 import BeautifulSoup
import concurrent.futures
import requests
import time
def scrape(urls):
response = requests.get(urls)
soup = BeautifulSoup(response.content, "lxml")
# 爬取文章標題
titles = soup.find_all("h3", {"class": "post_title"})
for title in titles:
print(title.getText().strip())
time.sleep(2)
base_url = "https://www.inside.com.tw/tag/AI"
urls = [f"{base_url}?page={page}" for page in range(1, 6)] # 1~5頁的網址清單
print(urls)
start_time = time.time() # 開始時間
# scrape(urls)
# 同時建立及啟用10個執行緒
# with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# executor.map(scrape, urls)
end_time = time.time()
print(f"{end_time - start_time} 秒爬取 {len(urls)} 頁的文章")
```
| github_jupyter |
# Test tensorflow gpu #
```
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
```
# Dataset #
```
import os
import codecs
data = {}
classes_file = '../Dataset/classes.txt'
with codecs.open(classes_file, 'r', encoding='utf-8') as cF:
data = cF.read().split('\r\n')
len(data)
import os
from PIL import Image, ImageDraw, ImageFont
text_source = '../Dataset/source.txt'
fonts_path = '../Dataset/Fonts'
fonts = [f'{fonts_path}/{f}' for f in os.listdir(fonts_path)]
fonts
dataset = []
sequence_len = 20
import matplotlib.pyplot as plt
import numpy as np
import random
import cv2
def draw_img(img):
plt.imshow(np.asarray(img), cmap='gray', vmin=0, vmax=255)
plt.show()
def load_img(img):
return cv2.imread(img, cv2.IMREAD_GRAYSCALE)
def dilate_img(img):
return cv2.dilate(img, np.ones((2,2), np.uint8))
def otsu_thresholding(img):
norm_img = np.zeros(img.shape)
img = cv2.normalize(img, norm_img, 0, 255, cv2.NORM_MINMAX)
blur = cv2.GaussianBlur(img, (3,3), 0)
_, img = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img = dilate_img(img)
return img
```
## Load dataset ##
```
with open(text_source) as txt:
word_count = 0
sequence = ''
dataset = []
for i, line in enumerate(txt):
words = line.split(' ')
for single_word in words:
word = ''.join([c for c in single_word if c in data])
word.replace('\n', ' ')
if len(word) < 1:
continue
if len(word) > 30:
split_count = len(word) // 30 + 1
for i in range(split_count):
start = i * split_count
end = start + len(word) // split_count
dataset.append(word[start:end])
continue
sequence = sequence + word + ' '
word_count = (word_count + 1) % sequence_len
if word_count == 0 or len(sequence) > 85:
dataset.append(sequence[:-1])
sequence = ''
dataset = list(set(dataset))
len(dataset)
```
## Shuffle dataset ##
```
sorted_data = sorted(dataset, key=len)
longest_label = len(sorted_data[-1])
longest_label
import random
random.seed = 1234567
random.shuffle(dataset)
# dataset = dataset[:20000]
dataset
```
# Split data #
```
train_split = int(0.9 * len(dataset))
val_split = int(train_split + 0.09 * len(dataset))
# test_split = int(train_split + 0.1 * len(dataset))
train_labels = dataset[:train_split]
val_labels = dataset[train_split:val_split]
test_labels = dataset[val_split:]
# val_labels = dataset[train_split:val_split]
# test_labels = dataset[val_split:]
print('Len train: ' + str(len(train_labels)))
print('Len val: ' + str(len(val_labels)))
print('Len test: ' + str(len(test_labels)))
```
# Model #
```
timesteps = 256
width = 4096
height = 64
max_label_len = longest_label + 2
max_label_len
from tensorflow.keras import applications, backend as K
from tensorflow.keras import models, losses, optimizers, Model, utils
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, MaxPooling2D, Dropout
from tensorflow.keras.layers import Flatten, Dense, Lambda, Reshape, Bidirectional, LSTM, GRU
from tensorflow.keras.layers import Activation, add, Concatenate, Attention, Embedding
from keras_self_attention import SeqSelfAttention
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def build_model(num_classes=94, timesteps=timesteps, max_label_len=max_label_len, input_shape=(4096, 64, 1), training=False):
inputs = Input(name='the_inputs', shape=input_shape, dtype='float32')
# Convolution layer (VGG)
inner = Conv2D(32, (3, 3), padding='same', name='conv1-1', kernel_initializer='he_normal')(inputs)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(32, (3, 3), padding='same', name='conv1-2', kernel_initializer='he_normal')(inputs)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max1')(inner)
inner = Conv2D(64, (3, 3), padding='same', name='conv2-1', kernel_initializer='he_normal')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(64, (3, 3), padding='same', name='conv2-2', kernel_initializer='he_normal')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max2')(inner)
inner = Conv2D(128, (3, 3), padding='same', name='conv3-1', kernel_initializer='he_normal')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(128, (3, 3), padding='same', name='conv3-2', kernel_initializer='he_normal')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(4, 2), name='max3')(inner)
inner = Conv2D(256, (3, 3), padding='same', name='conv4-1', kernel_initializer='he_normal')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(256, (3, 3), padding='same', name='conv4-2')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(1, 2), name='max4')(inner)
inner = Conv2D(512, (2, 2), padding='same', kernel_initializer='he_normal', name='con5-1')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(512, (2, 2), padding='same', kernel_initializer='he_normal', name='con5-2')(inner)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
# CNN to RNN
inner = Reshape(target_shape=((timesteps, 2048)), name='reshape')(inner)
inner = Dense(128, activation='relu', kernel_initializer='he_normal', name='dense1')(inner)
inner = Dropout(0.2)(inner)
# RNN
lstm1 = Bidirectional(LSTM(256, return_sequences=True, kernel_initializer='he_normal',
name='lstm1'))(inner)
lstm2 = Bidirectional(LSTM(512, return_sequences=True, kernel_initializer='he_normal',
name='lstm2'))(lstm1)
attention = SeqSelfAttention(attention_width=15,
attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
attention_activation=None,
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
use_attention_bias=False,
name='Attention')(lstm2)
# attention = Dropout(0.2)(attention)
# RNN output -> character activations:
outer = Dense(num_classes + 1, kernel_initializer='he_normal', name='dense2')(attention)
y_pred = Activation('softmax', name='softmax')(outer)
labels = Input(name='the_labels', shape=[max_label_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length]) #(None, 1)
y_func = K.function([inputs], [y_pred])
if training:
return Model(inputs=[inputs, labels, input_length, label_length], outputs=loss_out), y_func
else:
return Model(inputs=[inputs], outputs=y_pred)
model, y_func = build_model(timesteps=timesteps, max_label_len=max_label_len, training=True)
model.summary()
from tensorflow.keras.utils import plot_model
os.environ["PATH"] += os.pathsep + 'C:/Program Files/Graphviz/bin/'
plot_model(model=model, show_shapes=True)
```
# Data generator #
```
import itertools
def return_classes(string):
text = [' '] + list(string) + [' ']
classes = [data.index(x) if x in data else 1 for x in text]
return np.asarray(classes)
def return_text(classes):
text = ''
for c in classes:
if 0 <= c < len(data) and c != 1:
text += data[c]
return text
def decode_batch(out, callback=False):
ret = []
for i in range(out.shape[0]):
out_best = list(np.argmax(out[i, 2:], 1))
out_best2 = [k for k, g in itertools.groupby(out_best)]
outstr = return_text(out_best2)
if callback:
print(f'{out_best} -> {outstr}')
ret.append(outstr)
return ret
def gen_text_image(text, padding=16):
font = random.choice(fonts)
font_size = random.randrange(30, 61)
fnt = ImageFont.truetype(font, font_size)
width, _ = fnt.getsize(text)
img = Image.new('L', (width + (padding + 1) * 2, 64), color=255)
d = ImageDraw.Draw(img)
if 'calibri' in font:
d.text((padding + 2,2), text, font=fnt, fill=0)
elif 'verdana' in font:
d.text((padding + 2,-8), text, font=fnt, fill=0)
elif 'constan' in font:
d.text((padding + 2,0), text, font=fnt, fill=0)
elif 'corbel' in font:
d.text((padding + 2,2), text, font=fnt, fill=0)
elif 'consola' in font:
d.text((padding + 2,2), text, font=fnt, fill=0)
elif 'cour' in font:
d.text((padding + 2,-4), text, font=fnt, fill=0)
elif 'tahoma' in font:
d.text((padding + 2,-8), text, font=fnt, fill=0)
else:
d.text((padding + 2,-6), text, font=fnt, fill=0)
image = np.array(img)
image = add_salt_and_pepper(image, 0.2)
image = otsu_thresholding(image)
image = inverse(image)
image = (image / 255.) * 2. - 1.
return image
def inverse(image):
return cv2.bitwise_not(image)
def add_salt_and_pepper(image, amount):
output = np.copy(np.array(image))
# add salt
nb_salt = np.ceil(amount * output.size * 0.5)
coords = [np.random.randint(0, i - 1, int(nb_salt)) for i in output.shape]
output[coords] = random.randint(50,200)
# add pepper
nb_pepper = np.ceil(amount * output.size * 0.5)
coords = [np.random.randint(0, i - 1, int(nb_pepper)) for i in output.shape]
output[coords] = random.randint(0,100)
return np.asarray(Image.fromarray(output))
class TextImageGenerator:
def __init__(self, labels, img_w=4096, img_h=64,
batch_size=16, timesteps=timesteps, training=True, max_text_len=max_label_len):
self.dim = (img_w, img_h, 1)
self.batch_size = batch_size
self.max_text_len = max_text_len
self.labels = labels
self.n = len(self.labels)
self.indexes = list(range(self.n))
self.training = training
self.cur_index = 0
def next_sample(self):
self.cur_index += 1
if self.cur_index >= self.n:
self.cur_index = 0
random.shuffle(self.indexes)
return self.labels[self.indexes[self.cur_index]]
def next_batch(self):
while True:
X = np.zeros((self.batch_size, *self.dim))
y = np.zeros((self.batch_size, self.max_text_len), dtype=int)
input_length = np.full((self.batch_size, 1), timesteps, dtype=np.float32)
label_length = np.zeros((self.batch_size, 1), dtype=np.float32)
for i in range(self.batch_size):
label = self.next_sample()
# Store sample
image = np.swapaxes(gen_text_image(label), 0, 1)
image = np.expand_dims(image, -1)
X[i, 0:image.shape[0], :] = image
# Store class
label_classes = return_classes(label)
y[i, :len(label_classes)] = label_classes
label_length[i] = len(label_classes)
inputs = {
'the_inputs': X, # (bs, 4096, 64, 1)
'the_labels': y, # (bs, max_label_len)
'input_length': input_length, # (bs, 1)
'label_length': label_length # (bs, 1)
}
outputs = {'ctc': np.zeros([self.batch_size])} # (bs, 1)
yield (inputs, outputs)
```
# Callbacks #
```
import editdistance
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint
from tensorflow.keras.callbacks import TensorBoard, ReduceLROnPlateau, Callback
class VizCallback(Callback):
def __init__(self, y_func, text_img_gen, text_size, num_display_words=10):
self.y_func = y_func
self.text_img_gen = text_img_gen
self.num_display_words = num_display_words
self.text_size = text_size
def show_edit_distance(self, num):
num_left = num
mean_norm_ed = 0.0
mean_ed = 0.0
while num_left > 0:
word_batch = next(self.text_img_gen.next_batch())[0]
num_proc = min(word_batch['the_inputs'].shape[0], num_left)
# predict
inputs = word_batch['the_inputs'][0:num_proc]
pred = self.y_func([inputs])[0]
decoded_res = decode_batch(pred)
# label
labels = word_batch['the_labels'][:num_proc].astype(np.int32)
labels = [return_text(label) for label in labels]
for j in range(num_proc):
edit_dist = editdistance.eval(decoded_res[j], labels[j])
mean_ed += float(edit_dist)
mean_norm_ed += float(edit_dist) / len(labels[j])
num_left -= num_proc
mean_norm_ed = mean_norm_ed / num
mean_ed = mean_ed / num
print('\nOut of %d samples: \nMean edit distance:'
'%.3f \nMean normalized edit distance: %0.3f \n'
% (num, mean_ed, mean_norm_ed))
def on_epoch_end(self, epoch, logs={}):
batch = next(self.text_img_gen.next_batch())[0]
inputs = batch['the_inputs'][:self.num_display_words]
labels = batch['the_labels'][:self.num_display_words].astype(np.int32)
labels = [return_text(label) for label in labels]
pred = self.y_func([inputs])[0]
pred_texts = decode_batch(pred)
for i in range(min(self.num_display_words, len(inputs))):
print("label: {} - predict: {}".format(labels[i], pred_texts[i]))
self.show_edit_distance(self.text_size)
batch_size = 16
train_generator = TextImageGenerator(train_labels, training=True, batch_size=batch_size)
val_generator = TextImageGenerator(val_labels, training=False, batch_size=batch_size)
test_generator = TextImageGenerator(test_labels, training=False, batch_size=batch_size)
log_dir = "logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
output_dir = './models/VGG/MultiplicativeAttention'
weight_path = f'{output_dir}/ocr_model_{datetime.now().strftime("%Y%m%d-%H%M%S")}' + '_epoch_{epoch:02d}.h5'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tensorboard = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
vis = VizCallback(y_func, test_generator, len(test_labels))
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
initial_learning_rate = 0.001
epochs = 100
callbacks = [early_stop, tensorboard, vis, checkpoint]
# callbacks = [early_stop, tensorboard, vis]
# callbacks = [early_stop, tensorboard, vis, LearningRateScheduler(lr_time_based_decay, verbose=1)]
```
# Training #
```
def train(callbacks, batch_size, epochs, initial_epoch=0):
print('Training process starting...')
H = model.fit(train_generator.next_batch(),
steps_per_epoch=train_len//batch_size,
validation_data=val_generator.next_batch(),
validation_steps=val_len//batch_size,
epochs=epochs,
initial_epoch=initial_epoch,
callbacks=callbacks,
verbose=1)
return H
train_len = len(train_labels)
val_len = len(val_labels)
from tensorflow.keras import optimizers
opt = optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adam')
train(callbacks, batch_size, epochs)
```
# Testing #
| github_jupyter |
# kNN basic practice
## sklearn.neighbors
docs: http://scikit-learn.org/stable/modules/classes.html#module-sklearn.neighbors
examples: http://scikit-learn.org/stable/modules/classes.html#module-sklearn.neighbors
```
from matplotlib.colors import ListedColormap
from sklearn import model_selection, datasets, metrics, neighbors
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
%pylab inline
```
# Toy task - only 2 features
```
classification_problem = datasets.make_classification(
n_samples=100,
n_features=2,
n_informative=2,
n_classes=3,
n_redundant=0,
n_clusters_per_class=1,
random_state=3,
)
def plot_dataset(dataset):
colors = ListedColormap(['red', 'blue', 'yellow'])
light_colors = ListedColormap(['lightcoral', 'lightblue', 'lightyellow'])
plt.figure(figsize=(16, 9))
plt.grid()
plt.scatter(dataset[0][:, 0], dataset[0][:, 1],
c=dataset[1], cmap=colors, s=100)
plt.show()
plot_dataset(classification_problem)
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(
classification_problem[0],
classification_problem[1],
test_size=0.3,
random_state=1,
)
```
# Let's build our model!
```
clf = neighbors.KNeighborsClassifier()
clf.fit(train_data, train_labels)
predictions = clf.predict(test_data)
metrics.accuracy_score(test_labels, predictions)
predictions
```
### And visualize
```
def get_meshgrid(data, step=.05, border=.5,):
x_min, x_max = data[:, 0].min() - border, data[:, 0].max() + border
y_min, y_max = data[:, 1].min() - border, data[:, 1].max() + border
return np.meshgrid(np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
def plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels):
colors = ListedColormap(['red', 'blue', 'yellow'])
light_colors = ListedColormap(['lightcoral', 'lightblue', 'lightyellow'])
#fit model
estimator.fit(train_data, train_labels)
#set figure size
pyplot.figure(figsize = (16, 6))
#plot decision surface on the train data
pyplot.subplot(1, 2, 1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(estimator.predict(np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
pyplot.pcolormesh(xx, yy, mesh_predictions, cmap=light_colors)
pyplot.scatter(train_data[:, 0], train_data[:, 1], c=train_labels, s=100, cmap=colors)
pyplot.title('Train data, accuracy={:.2f}'.format(metrics.accuracy_score(train_labels, estimator.predict(train_data))))
#plot decision surface on the test data
pyplot.subplot(1, 2, 2)
pyplot.pcolormesh(xx, yy, mesh_predictions, cmap=light_colors)
pyplot.scatter(test_data[:, 0], test_data[:, 1], c=test_labels, s=100, cmap=colors)
pyplot.title('Test data, accuracy={:.2f}'.format(metrics.accuracy_score(test_labels, estimator.predict(test_data))))
estimator = neighbors.KNeighborsClassifier(n_neighbors=1)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=2)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=3)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=5)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=10)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=20)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=30)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
estimator = neighbors.KNeighborsClassifier(n_neighbors=40)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
```
## Seems good!
```
hard_problem = datasets.make_classification(
n_samples=100,
n_features=100,
n_informative=50,
n_classes=3,
n_redundant=50,
n_clusters_per_class=1,
random_state=42,
)
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(
hard_problem[0],
hard_problem[1],
test_size=0.3,
random_state=1,
)
clf = neighbors.KNeighborsClassifier(n_neighbors=5)
clf.fit(train_data, train_labels)
predictions = clf.predict(test_data)
metrics.accuracy_score(test_labels, predictions)
```
## Or not so good...
```
def train_knn_classifier(dimensions, n_classes):
scores = []
for dim in dimensions:
problem = datasets.make_classification(n_samples=1000, n_features=dim, n_informative=dim // 2,
n_classes=5, n_redundant=dim // 2,
n_clusters_per_class=1, random_state=42)
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(
problem[0],
problem[1],
test_size=0.3,
random_state=1,
)
clf = neighbors.KNeighborsClassifier(n_neighbors=5)
clf.fit(train_data, train_labels)
predictions = clf.predict(test_data)
acc = metrics.accuracy_score(test_labels, predictions)
scores.append(acc)
plt.figure(figsize=(16, 9))
plt.plot(dimensions, scores)
plt.show()
train_knn_classifier([10, 20, 50, 100, 500, 1000], 5)
```
# Practice area
Now let's try and train a simple (or not so) kNN classifier on a more complicated dataset.
```
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data > ./wine_data.csv
import pandas as pd
dataset = pd.read_csv('wine_data.csv', header=None)
dataset.head()
X = dataset.drop(0, axis=1).as_matrix()
y = dataset[0].as_matrix()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape)
print(X_test.shape)
from sklearn.metrics import accuracy_score
def measure_quality(predictions):
return accuracy_score(y_test, predictions)
```
# Bonus area
### Those who get accuracy of at least 0.8 get bonus [0.5 max]
```
# YOUR CODE HERE
# print(measure_quality(y_pred))
```
| github_jupyter |
# Creating your own dataset from Google Images
*by: Francisco Ingham and Jeremy Howard. Inspired by [Adrian Rosebrock](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)*
In this tutorial we will see how to easily create an image dataset through Google Images. **Note**: You will have to repeat these steps for any new category you want to Google (e.g once for dogs and once for cats).
```
from fastai.vision import *
```
## Get a list of URLs
### Search and scroll
Go to [Google Images](http://images.google.com) and search for the images you are interested in. The more specific you are in your Google Search, the better the results and the less manual pruning you will have to do.
Scroll down until you've seen all the images you want to download, or until you see a button that says 'Show more results'. All the images you scrolled past are now available to download. To get more, click on the button, and continue scrolling. The maximum number of images Google Images shows is 700.
It is a good idea to put things you want to exclude into the search query, for instance if you are searching for the Eurasian wolf, "canis lupus lupus", it might be a good idea to exclude other variants:
"canis lupus lupus" -dog -arctos -familiaris -baileyi -occidentalis
You can also limit your results to show only photos by clicking on Tools and selecting Photos from the Type dropdown.
### Download into file
Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset.
Press <kbd>Ctrl</kbd><kbd>Shift</kbd><kbd>J</kbd> in Windows/Linux and <kbd>Cmd</kbd><kbd>Opt</kbd><kbd>J</kbd> in Mac, and a small window the javascript 'Console' will appear. That is where you will paste the JavaScript commands.
You will need to get the urls of each of the images. Before running the following commands, you may want to disable ad blocking extensions (uBlock, AdBlockPlus etc.) in Chrome. Otherwise the window.open() command doesn't work. Then you can run the following commands:
```javascript
urls = Array.from(document.querySelectorAll('.rg_di .rg_meta')).map(el=>JSON.parse(el.textContent).ou);
window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n')));
```
### Create directory and upload urls file into your server
Choose an appropriate name for your labeled images. You can run these steps multiple times to create different labels.
```
folder = 'black'
file = 'urls_black.csv'
folder = 'teddys'
file = 'urls_teddys.csv'
folder = 'grizzly'
file = 'urls_grizzly.csv'
```
You will need to run this cell once per each category.
```
path = Path('data/bears')
dest = path/folder
dest.mkdir(parents=True, exist_ok=True)
path.ls()
```
Finally, upload your urls file. You just need to press 'Upload' in your working directory and select your file, then click 'Upload' for each of the displayed files.

## Download images
Now you will need to download your images from their respective urls.
fast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.
Let's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls.
You will need to run this line once for every category.
```
classes = ['teddys','grizzly','black']
download_images(path/file, dest, max_pics=200)
# If you have problems download, try with `max_workers=0` to see exceptions:
download_images(path/file, dest, max_pics=20, max_workers=0)
```
Then we can remove any images that can't be opened:
```
for c in classes:
print(c)
verify_images(path/c, delete=True, max_size=500)
```
## View data
```
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2,
ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)
# If you already cleaned your data, run this cell instead of the one before
# np.random.seed(42)
# data = ImageDataBunch.from_csv(path, folder=".", valid_pct=0.2, csv_labels='cleaned.csv',
# ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)
```
Good! Let's take a look at some of our pictures then.
```
data.classes
data.show_batch(rows=3, figsize=(7,8))
data.classes, data.c, len(data.train_ds), len(data.valid_ds)
```
## Train model
```
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(4)
learn.save('stage-1')
learn.unfreeze()
learn.lr_find()
# If the plot is not showing try to give a start and end learning rate
# learn.lr_find(start_lr=1e-5, end_lr=1e-1)
learn.recorder.plot()
learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4))
learn.save('stage-2')
```
## Interpretation
```
learn.load('stage-2');
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
```
## Cleaning Up
Some of our top losses aren't due to bad performance by our model. There are images in our data set that shouldn't be.
Using the `ImageCleaner` widget from `fastai.widgets` we can prune our top losses, removing photos that don't belong.
```
from fastai.widgets import *
```
First we need to get the file paths from our top_losses. We can do this with `.from_toplosses`. We then feed the top losses indexes and corresponding dataset to `ImageCleaner`.
Notice that the widget will not delete images directly from disk but it will create a new csv file `cleaned.csv` from where you can create a new ImageDataBunch with the corrected labels to continue training your model.
In order to clean the entire set of images, we need to create a new dataset without the split. The video lecture demostrated the use of the `ds_type` param which no longer has any effect. See [the thread](https://forums.fast.ai/t/duplicate-widget/30975/10) for more details.
```
db = (ImageList.from_folder(path)
.split_none()
.label_from_folder()
.transform(get_transforms(), size=224)
.databunch()
)
# If you already cleaned your data using indexes from `from_toplosses`,
# run this cell instead of the one before to proceed with removing duplicates.
# Otherwise all the results of the previous step would be overwritten by
# the new run of `ImageCleaner`.
# db = (ImageList.from_csv(path, 'cleaned.csv', folder='.')
# .split_none()
# .label_from_df()
# .transform(get_transforms(), size=224)
# .databunch()
# )
```
Then we create a new learner to use our new databunch with all the images.
```
learn_cln = cnn_learner(db, models.resnet34, metrics=error_rate)
learn_cln.load('stage-2');
ds, idxs = DatasetFormatter().from_toplosses(learn_cln)
```
Make sure you're running this notebook in Jupyter Notebook, not Jupyter Lab. That is accessible via [/tree](/tree), not [/lab](/lab). Running the `ImageCleaner` widget in Jupyter Lab is [not currently supported](https://github.com/fastai/fastai/issues/1539).
```
# Don't run this in google colab or any other instances running jupyter lab.
# If you do run this on Jupyter Lab, you need to restart your runtime and
# runtime state including all local variables will be lost.
ImageCleaner(ds, idxs, path)
```
If the code above does not show any GUI(contains images and buttons) rendered by widgets but only text output, that may caused by the configuration problem of ipywidgets. Try the solution in this [link](https://github.com/fastai/fastai/issues/1539#issuecomment-505999861) to solve it.
Flag photos for deletion by clicking 'Delete'. Then click 'Next Batch' to delete flagged photos and keep the rest in that row. `ImageCleaner` will show you a new row of images until there are no more to show. In this case, the widget will show you images until there are none left from `top_losses.ImageCleaner(ds, idxs)`
You can also find duplicates in your dataset and delete them! To do this, you need to run `.from_similars` to get the potential duplicates' ids and then run `ImageCleaner` with `duplicates=True`. The API works in a similar way as with misclassified images: just choose the ones you want to delete and click 'Next Batch' until there are no more images left.
Make sure to recreate the databunch and `learn_cln` from the `cleaned.csv` file. Otherwise the file would be overwritten from scratch, losing all the results from cleaning the data from toplosses.
```
ds, idxs = DatasetFormatter().from_similars(learn_cln)
ImageCleaner(ds, idxs, path, duplicates=True)
```
Remember to recreate your ImageDataBunch from your `cleaned.csv` to include the changes you made in your data!
## Putting your model in production
First thing first, let's export the content of our `Learner` object for production:
```
learn.export()
```
This will create a file named 'export.pkl' in the directory where we were working that contains everything we need to deploy our model (the model, the weights but also some metadata like the classes or the transforms/normalization used).
You probably want to use CPU for inference, except at massive scale (and you almost certainly don't need to train in real-time). If you don't have a GPU that happens automatically. You can test your model on CPU like so:
```
defaults.device = torch.device('cpu')
img = open_image(path/'black'/'00000021.jpg')
img
```
We create our `Learner` in production enviromnent like this, just make sure that `path` contains the file 'export.pkl' from before.
```
learn = load_learner(path)
pred_class,pred_idx,outputs = learn.predict(img)
pred_class
```
So you might create a route something like this ([thanks](https://github.com/simonw/cougar-or-not) to Simon Willison for the structure of this code):
```python
@app.route("/classify-url", methods=["GET"])
async def classify_url(request):
bytes = await get_bytes(request.query_params["url"])
img = open_image(BytesIO(bytes))
_,_,losses = learner.predict(img)
return JSONResponse({
"predictions": sorted(
zip(cat_learner.data.classes, map(float, losses)),
key=lambda p: p[1],
reverse=True
)
})
```
(This example is for the [Starlette](https://www.starlette.io/) web app toolkit.)
## Things that can go wrong
- Most of the time things will train fine with the defaults
- There's not much you really need to tune (despite what you've heard!)
- Most likely are
- Learning rate
- Number of epochs
### Learning rate (LR) too high
```
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(1, max_lr=0.5)
```
### Learning rate (LR) too low
```
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
```
Previously we had this result:
```
Total time: 00:57
epoch train_loss valid_loss error_rate
1 1.030236 0.179226 0.028369 (00:14)
2 0.561508 0.055464 0.014184 (00:13)
3 0.396103 0.053801 0.014184 (00:13)
4 0.316883 0.050197 0.021277 (00:15)
```
```
learn.fit_one_cycle(5, max_lr=1e-5)
learn.recorder.plot_losses()
```
As well as taking a really long time, it's getting too many looks at each image, so may overfit.
### Too few epochs
```
learn = cnn_learner(data, models.resnet34, metrics=error_rate, pretrained=False)
learn.fit_one_cycle(1)
```
### Too many epochs
```
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.9, bs=32,
ds_tfms=get_transforms(do_flip=False, max_rotate=0, max_zoom=1, max_lighting=0, max_warp=0
),size=224, num_workers=4).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet50, metrics=error_rate, ps=0, wd=0)
learn.unfreeze()
learn.fit_one_cycle(40, slice(1e-6,1e-4))
```
| github_jupyter |
# Event Driven Stock Prediction
Deep Learning implementation of Stock Prediction inspired by [Deep Learning for Event-Driven Stock Prediction] (Ding et al.,2015)
This is a simplified implementation where I did not include Neural Tensor Network and Convolutional Neural Network.
#### Data Preparation
##### News Data
###### News dataset from Bloomberg & Reuters (Oct.20.2006 ~ Nov.26.2013)
- Extract news titles only (generators/data_generator.py)
- Extract Relation Triples using OpenIE 5.0 (generators/svo_generator.py)
- Match Relation Triples with corresponding word embeddings (generators/svo_embedding_generator.py)
- For detailed description of preprocessing steps, refer to the corresponding .py files
##### S&P 500 Data (2006 ~ 2013)
- labeled the data based on volatility level.
- Here, I decided to train a multi-classification model based on the next day's volatility. (Original paper is a binary-classification)
```
import numpy as np
import pickle
import os
import scipy.stats as stats
import pandas as pd
from collections import defaultdict
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers import Input
#Load dictionaries
with open(os.getcwd()+'/data/news_dict.pickle', 'rb') as handle:
news_dict = pickle.load(handle)
with open(os.getcwd()+'/data/svo_dict.pickle', 'rb') as handle:
svo_dict = pickle.load(handle)
with open(os.getcwd()+'/data/svo_dict_embed.pickle', 'rb') as handle:
svo_dict_embed = pickle.load(handle)
df = pd.read_csv("target.csv")
df['Volatility'] = ((df['Close']-df['Open'])/df['Open']) * 100
df.replace('-', '', regex=True, inplace=True)
df
vol_neut = []
vol_pos = []
vol_neg = []
pos_mask = df['Volatility'] > 0.620074
neg_mask = df['Volatility'] < -0.471559
vol_pos = np.array(df[pos_mask]['Date'])
vol_neg = np.array(df[neg_mask]['Date'])
df.drop(df[pos_mask].index, inplace= True)
df.drop(df[neg_mask].index, inplace= True)
vol_nothing = np.array(df['Date'])
print(vol_nothing)
df_2 = pd.read_csv("target.csv")
df_2['Volatility'] = ((df_2['Close']-df_2['Open'])/df_2['Open']) * 100
df_2.replace('-', '', regex=True, inplace=True)
news_date_list = list(sorted(svo_dict_embed.keys()))
X_temp_list = []
y_temp_list = []
vol = []
pos_count = 0
neg_count = 0
neut_count = 0
for k, v in sorted(svo_dict_embed.items()): #in news article dict
if int(k)+3 > int(news_date_list[-1]):
print(k)
break
indx = (news_date_list.index(k))
if (df_2['Date'] == news_date_list[indx+1]).any(): #if news article d+1 in S&P500 date
pred_date = news_date_list[indx+1]
elif (df_2['Date'] == news_date_list[indx+2]).any():
pred_date = news_date_list[indx+2]
else:
pred_date = news_date_list[indx+3]
if pred_date in vol_nothing:
vol = [0,1,0]
if pred_date in vol_pos:
vol = [1,0,0]
if pred_date in vol_neg:
vol = [0,0,1]
for val in v:
if len(val[0]) != 100 :
val[0] = val[0][0]
if len(val[1]) != 100 :
val[1] = val[1][0]
if len(val[2]) != 100 :
val[2] = val[2][0]
X_temp_list.append(np.mean(val,axis=0))
y_temp_list.append(vol)
if vol[0] == 1:
pos_count += 1
if vol[1] == 1:
neut_count +=1
if vol[2] == 1:
neg_count +=1
print(pos_count)
print(neg_count)
print(neut_count)
news_date_list[-1]
y_full = np.array(y_temp_list,dtype='float')
X_full = np.stack(X_temp_list,axis=0)
#Data preparation complete
```
#### Modeling
- Simple settings with default parameters used.
- I focused on just learning the NN architecture, therefore did not optimize the model to the deploy level.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_full, y_full)
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
model = Sequential()
model.add(Dense(31, activation='relu', input_dim=100))
model.add(Dropout(0.5))
model.add(Dense(31, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(X_train, y_train, epochs=20, batch_size=128)
score = model.evaluate(X_test, y_test, batch_size=128)
```
| github_jupyter |
```
# # Update sklearn to prevent version mismatches
# !pip install sklearn --upgrade
# # install joblib. This will be used to save your model.
# # Restart your kernel after installing
# !pip install joblib
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
```
## Read the CSV and Perform Basic Data Cleaning
```
# Read in csv
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
df.describe()
```
## Select features (columns)
```
# Set target, features and feature_names.
target = df["koi_disposition"]
data = df.drop("koi_disposition", axis=1)
feature_names = data.columns
data.head()
```
## Create a Train Test Split
Use `koi_disposition` for the y values
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=42)
X_train.head()
```
## Pre-processing
Scale the data using the MinMaxScaler and perform some feature selection
```
from sklearn.preprocessing import MinMaxScaler
X_minmax = MinMaxScaler().fit(X_train)
X_train_minmax = X_minmax.transform(X_train)
X_test_minmax = X_minmax.transform(X_test)
```
## Train the Model (Random Forest)
```
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train_minmax, y_train)
print(f"Training Data Score: {rf.score(X_train_minmax, y_train)}")
print(f"Testing Data Score: {rf.score(X_test_minmax, y_test)}")
sorted(zip(rf.feature_importances_, feature_names), reverse=True)
```
## Hyperparameter Tuning
Use `GridSearchCV` to tune the model's parameters
```
# Create the GridSearchCV model
from sklearn.model_selection import GridSearchCV
param_grid = {'n_estimators': [250, 300, 350],
'max_depth': [125, 150, 175]}
grid = GridSearchCV(rf, param_grid, verbose=3)
# Train the model with GridSearch
grid.fit(X_train_minmax, y_train)
print(grid.best_params_)
print(grid.best_score_)
# Training score:
grid.score(X_train_minmax, y_train)
# Testing score:
grid.score(X_test_minmax, y_test)
# Make prediction and save to variable for report.
predictions = grid.predict(X_test_minmax)
# Print Classification Report.
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
# %matplotlib notebook
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(RandomForestClassifier())
viz.fit(X_train_minmax, y_train)
viz.score(X_test_minmax, y_test)
viz.finalize()
viz.show(outpath="Output/random_forest_classification_report.png")
from yellowbrick.model_selection import FeatureImportances
from yellowbrick.style import set_palette
from yellowbrick.features import RadViz
set_palette('yellowbrick')
viz = FeatureImportances(rf, size=(500, 500))
viz.fit(X_train_minmax, y_train)
viz.show(outpath="Output/feature_importance.png")
```
## Save the Model
```
import joblib
filename = 'Models/exoplanet_exploration_random_Forest.sav'
joblib.dump(rf, filename)
```
| github_jupyter |
```
import numpy as np
from os.path import isfile
from scipy.io import loadmat
from collections import OrderedDict
from config import DATASET
from train_classifiers import train_classifier
from utils import compute_kernel, compute_precrec
from utils import get_labels, _n_classes, _set_sizes
# EXP_NAME = 'FK'
EXP_NAME = 'imagenet-caffe-alex'
DIR_DATA = './feature_extraction/' + EXP_NAME + '/codes/'
DIR_SAVE = './feature_extraction/' + EXP_NAME + '/compdata/'
TrainList = loadmat(DIR_DATA + EXP_NAME + '_train_files.mat')
TrainList = TrainList['train_chunks']
TrainList = np.squeeze(TrainList)
TrainList = np.concatenate(TrainList, axis=0)
ValList = loadmat(DIR_DATA + EXP_NAME + '_val_files.mat')
ValList = ValList['val_chunks']
ValList = np.squeeze(ValList)
ValList = np.concatenate(ValList, axis=0)
TestList = loadmat(DIR_DATA + EXP_NAME + '_test_files.mat')
TestList = TestList['test_chunks']
TestList = np.squeeze(TestList)
TestList = np.concatenate(TestList, axis=0)
DataList = OrderedDict()
DataList['train'] = TrainList
DataList['val'] = ValList
DataList['test'] = TestList
if isfile(DIR_SAVE + 'Kernel.npy'):
print('Loading the kernel matrix ...')
K = np.load(DIR_SAVE + 'Kernel.npy')
print('Kernel matrix is loaded.')
else:
K = compute_kernel(DataList)
np.save(DIR_SAVE + 'Kernel.npy', K)
def train_one_vs_all(K, train_set, all_epsilon, all_kappa):
n_classes = _n_classes()
set_sizes = _set_sizes()
tr_size = 0
for ind, data in enumerate(DATASET):
if data in train_set:
tr_size += set_sizes[ind]
K_tr = np.zeros((tr_size, tr_size))
idx = 0
for ind1, tr1 in enumerate(DATASET):
if tr1 not in train_set:
continue
idy = 0
for ind2, tr2 in enumerate(DATASET):
if tr2 not in train_set:
continue
K_tr[idx:set_sizes[ind1]+idx,
idy:set_sizes[ind2]+idy] = K[
sum(set_sizes[:ind1]):sum(set_sizes[:ind1+1]),
sum(set_sizes[:ind2]):sum(set_sizes[:ind2+1])]
idy = set_sizes[ind2]
idx = set_sizes[ind1]
labels_raw = get_labels(train_set)
alpha = np.array([train_classifier(K_tr, labels_raw, all_epsilon, all_kappa, nc)
for nc in range(n_classes)])
return alpha
def compute_score(K, alpha, train_set, test_set):
n_classes = _n_classes()
set_sizes = _set_sizes()
tr_size = 0
ts_size = 0
for ind, data in enumerate(DATASET):
if data in train_set:
tr_size += set_sizes[ind]
if data in test_set:
ts_size += set_sizes[ind]
K_tr_ts = np.zeros((tr_size, ts_size))
idx = 0
for ind1, tr1 in enumerate(DATASET):
if tr1 not in train_set:
continue
idy = 0
for ind2, tr2 in enumerate(DATASET):
if tr2 not in test_set:
continue
K_tr_ts[idx:set_sizes[ind1]+idx,
idy:set_sizes[ind2]+idy] = K[
sum(set_sizes[:ind1]):sum(set_sizes[:ind1+1]),
sum(set_sizes[:ind2]):sum(set_sizes[:ind2+1])]
idy = set_sizes[ind2]
idx = set_sizes[ind1]
scores = np.zeros((ts_size, n_classes))
for ci in range(n_classes):
scores[:,ci] = alpha[ci,:].dot(K_tr_ts)
return scores
train_set = ['train']
test_set = ['val']
all_epsilon = np.hstack([np.arange(1, 10) * 1e-4,
np.arange(1, 10) * 1e-3,
np.arange(1, 11) * 1e-2])
all_kappa = [np.inf]
alpha_train = train_one_vs_all(K, train_set, all_epsilon, all_kappa)
train_set = ['train']
test_set = ['val']
all_epsilon = np.hstack([np.arange(1, 10) * 1e-4,
np.arange(1, 10) * 1e-3,
np.arange(1, 11) * 1e-2])
all_kappa = [0.1, 0.2, 0.3, 0.4, 0.5, np.inf]
if isfile(DIR_SAVE + 'alpha_train.npy'):
print('Loading the trained classifiers ...')
alpha_train = np.load(DIR_SAVE + 'alpha_train.npy')
print('Classifiers are loaded.')
else:
alpha_train = train_one_vs_all(K, train_set, all_epsilon, all_kappa)
np.save(DIR_SAVE + 'alpha_train.npy', alpha_train)
AP = np.zeros((len(all_kappa), len(all_epsilon), _n_classes()))
for ind_k in range(len(all_kappa)):
for ind_e in range(len(all_epsilon)):
scores = compute_score(
K, alpha_train[:,:,ind_k,ind_e], train_set, test_set)
labels = get_labels(test_set)
AP[ind_k,ind_e,:] = compute_precrec(scores, labels)
mAP = np.mean(AP, axis=2)
mAP
k_ind, e_ind = np.where(mAP == np.max(mAP[:-1,:]))
c_ind, = np.where(mAP[-1,:] == np.max(mAP[-1,:]))
train_set = ['train', 'val']
test_set = ['test']
if isfile(DIR_SAVE + 'alpha_rob.npy'):
print('Loading the robust classifier ...')
alpha_rob = np.load(DIR_SAVE + 'alpha_rob.npy')
print('Classifier is loaded.')
else:
c_opt = [all_epsilon[c_ind[0]]]
alpha_rob = train_one_vs_all(K, train_set, c_opt, [np.inf]).squeeze()
np.save(DIR_SAVE + 'alpha_rob.npy', alpha_rob)
if isfile(DIR_SAVE + 'alpha_dro.npy'):
print('Loading the robust classifier ...')
alpha_dro = np.load(DIR_SAVE + 'alpha_dro.npy')
print('Classifier is loaded.')
else:
epsilon_opt = [all_epsilon[e_ind[0]]]
kappa_opt = [all_kappa[k_ind[0]]]
alpha_dro = train_one_vs_all(K, train_set, epsilon_opt, kappa_opt).squeeze()
np.save(DIR_SAVE + 'alpha_dro.npy', alpha_dro)
scores_dro = compute_score(K, alpha_dro, train_set, test_set)
AP_dro = compute_precrec(scores_dro, get_labels(test_set))
scores_rob = compute_score(K, alpha_rob, train_set, test_set)
AP_rob = compute_precrec(scores_rob, get_labels(test_set))
AP_rob.mean()
AP_dro.mean()
```
| github_jupyter |
# WeatherPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
from citipy import citipy
# Import API key
from api_keys import weather_api_key
```
## Generate Cities List
```
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# list for holding lat_lngs and cities
lat_lngs = []
cities = []
# Random lat and lng combinations
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
# Nearest city for each lat, lng mix
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
print(len(cities))
```
### Perform API Calls
* Perform a weather check on each city using a series of successive API calls.
* Include a print log of each city as it'sbeing processed (with the city number and city name).
```
# Starting URL for Weather Map API Call
url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key
city_data = []
# Print to logger
print("Beginning Data Retrieval")
print("------------------------")
for city in cities:
city_url = url + "&q=" + city
try:
city_weather = requests.get(city_url).json()
city_lat = city_weather['coord']['lat']
city_mtemp = city_weather['main']['temp_max']
city_humidity = city_weather['main']['humidity']
city_cloudiness = city_weather['clouds']['all']
city_wspeed = city_weather['wind']['speed']
city_data.append({"City":city,
"Latitude":city_lat,
"Max Temp":city_mtemp,
"Humidity":city_humidity,
"Cloudiness":city_cloudiness,
"Wind Speed":city_wspeed,
})
print(f'Processing... {city}')
except:
print("city not found. Skipping...")
pass
# Indicate that Data Loading is complete
print("--------------------------\nData Retrieval Complete\n------------------------")
```
### Convert Raw Data to DataFrame
* Export the city data into a .csv.
* Display the DataFrame
```
weather_df = pd.DataFrame(city_data)
weather_df = weather_df[["City","Cloudiness","Humidity","Latitude","Max Temp","Wind Speed"]]
weather_df.head()
pd.DataFrame.to_csv(weather_df, "weather_data.csv")
```
## Inspect the data and remove the cities where the humidity > 100%.
----
Skip this step if there are no cities that have humidity > 100%.
```
# Get the indices of cities that have humidity over 100%.
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# Extract relevant fields from the data frame
# Export the City_Data into a csv
```
## Plotting the Data
* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
* Save the plotted figures as .pngs.
## Latitude vs. Temperature Plot
```
weather_df.plot(x="Latitude",y="Max Temp",kind ="scatter",title="Latitude vs. Maximum Temperature",grid = True)
plt.show()
```
## Latitude vs. Humidity Plot
```
weather_df.plot(x="Latitude",y="Humidity",kind ="scatter",title="Latitude vs. Humidity",grid = True)
plt.show()
```
## Latitude vs. Cloudiness Plot
```
weather_df.plot(x="Latitude",y="Cloudiness",kind ="scatter",title="Latitude vs. Cloudiness",grid = True)
plt.show()
```
## Latitude vs. Wind Speed Plot
```
weather_df.plot(x="Latitude",y="Wind Speed",kind ="scatter",title="Latitude vs. Wind Speed",grid = True)
plt.show()
```
## Linear Regression
```
# OPTIONAL: Create a function to create Linear Regression plots
# Create Northern and Southern Hemisphere DataFrames
```
#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
| github_jupyter |
# DC Resistivity: 1D parametric inversion
_Inverting for Resistivities and Layers_
Here we use the module *SimPEG.electromangetics.static.resistivity* to invert
DC resistivity sounding data and recover the resistivities and layer thicknesses
for a 1D layered Earth. In this tutorial, we focus on the following:
- How to define sources and receivers from a survey file
- How to define the survey
- Defining a model that consists of resistivities and layer thicknesses
For this tutorial, we will invert sounding data collected over a layered Earth using
a Wenner array. The end product is layered Earth model which explains the data.
## Import modules
```
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from discretize import TensorMesh
from SimPEG import (
maps, data, data_misfit, regularization,
optimization, inverse_problem, inversion, directives
)
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static.utils.StaticUtils import plot_layer
mpl.rcParams.update({'font.size': 14})
```
## Define Parameters for the Inversion
```
# Define the file path to the data file. Also define the AB/2, MN/2 and apparent resistivity columns.
# Recall that python counts starting at 0
data_filename = './sounding_data/Aung_San_Location_1_raw.csv'
half_AB_column = 'AB/2 (m)'
half_MN_column = 'MN/2 (m)'
apparent_resistivity_column = 'App. Res. (Ohm m)'
# Define the floor and percent uncertainty you would like to apply to apparent resistivity data
uncertainty_floor = 5
uncertainty_percent = 10.
# Define layer thicknesses and resistivities for the starting model. The thickness
# of the bottom layer is not used, as we assume it extends downward to infinity.
layer_thicknesses = np.r_[10, 10]
halfspace_resistivity = 300.
```
## Load Data, Define Survey and Plot
Here we load the observed data, define the DC survey geometry and plot the
data values.
```
# Load data
df = pd.read_csv(data_filename)
# Extract source and receiver electrode locations and the observed data
half_AB_separations = df[half_AB_column]
half_MN_separations = df[half_MN_column]
dobs = df[apparent_resistivity_column].values
resistivities = halfspace_resistivity*np.ones(layer_thicknesses.size+1)
# Define survey
unique_tx, k = np.unique(half_AB_separations, return_index=True)
n_sources = len(k)
k = np.sort(k)
k = np.r_[k, len(dobs)+1]
source_list = []
for ii in range(0, n_sources):
# MN electrode locations for receivers. Each is an (N, 3) numpy array
M_locations = -half_MN_separations[k[ii]:k[ii+1]]
M_locations = np.c_[M_locations, np.zeros((np.shape(M_locations)[0], 2))]
N_locations = half_MN_separations[k[ii]:k[ii+1]]
N_locations = np.c_[N_locations, np.zeros((np.shape(N_locations)[0], 2))]
receiver_list = [dc.receivers.Dipole(M_locations, N_locations)]
# AB electrode locations for source. Each is a (1, 3) numpy array
A_location = np.r_[-half_AB_separations[k[ii]], 0., 0.]
B_location = np.r_[half_AB_separations[k[ii]], 0., 0.]
source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location))
# Define survey
survey = dc.Survey(source_list)
# Compute the A, B, M and N electrode locations.
survey.getABMN_locations()
# Plot apparent resistivities on sounding curve as a function of Wenner separation
# parameter.
electrode_separations = np.sqrt(
np.sum((survey.m_locations - survey.n_locations)**2, axis=1)
)
fig, ax = plt.subplots(1, 1, figsize=(11, 5))
ax.loglog(half_AB_separations, dobs, 'b', lw=2)
ax.grid(True, which='both', ls="--", c='gray')
ax.set_xlabel("AB/2 (m)")
ax.set_ylabel("Apparent Resistivity ($\Omega m$)")
```
## Assign Uncertainties
Inversion with SimPEG requires that we define uncertainties on our data. The
uncertainty represents our estimate of the standard deviation of the noise on
our data.
```
uncertainties = uncertainty_floor + 0.01*uncertainty_percent*np.abs(dobs)
```
## Define Data
Here is where we define the data that are inverted. The data are defined by
the survey, the observation values and the uncertainties.
```
data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties)
```
## Defining the Starting Model and Mapping
```
# Define the layers as a mesh
mesh = TensorMesh([layer_thicknesses], '0')
print(mesh)
# Define model. We are inverting for the layer resistivities and layer thicknesses.
# Since the bottom layer extends to infinity, it is not a model parameter for
# which we need to invert. For a 3 layer model, there is a total of 5 parameters.
# For stability, our model is the log-resistivity and log-thickness.
starting_model = np.r_[np.log(resistivities), np.log(layer_thicknesses)]
# Since the model contains two different properties for each layer, we use
# wire maps to distinguish the properties.
wire_map = maps.Wires(('rho', mesh.nC+1), ('t', mesh.nC))
resistivity_map = maps.ExpMap(nP=mesh.nC+1) * wire_map.rho
layer_map = maps.ExpMap(nP=mesh.nC) * wire_map.t
```
## Define the Physics
Here we define the physics of the problem using the DCSimulation_1D class.
```
simulation = dc.simulation_1d.Simulation1DLayers(
survey=survey, rhoMap=resistivity_map, thicknessesMap=layer_map,
data_type="apparent_resistivity"
)
```
## Define Inverse Problem
The inverse problem is defined by 3 things:
1) Data Misfit: a measure of how well our recovered model explains the field data
2) Regularization: constraints placed on the recovered model and a priori information
3) Optimization: the numerical approach used to solve the inverse problem
```
# Define the data misfit. Here the data misfit is the L2 norm of the weighted
# residual between the observed data and the data predicted for a given model.
# The weighting is defined by the reciprocal of the uncertainties.
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object)
# Define the regularization on the parameters related to resistivity
mesh_rho = TensorMesh([mesh.hx.size+1])
reg_rho = regularization.Simple(
mesh_rho, alpha_s=1., alpha_x=1,
mapping=wire_map.rho
)
# Define the regularization on the parameters related to layer thickness
mesh_t = TensorMesh([mesh.hx.size])
reg_t = regularization.Simple(
mesh_t, alpha_s=1., alpha_x=1,
mapping=wire_map.t
)
# Combine to make regularization for the inversion problem
reg = reg_rho + reg_t
# Define how the optimization problem is solved. Here we will use an inexact
# Gauss-Newton approach that employs the conjugate gradient solver.
opt = optimization.InexactGaussNewton(
maxIter=20, maxIterCG=30, print_type='ubc'
)
# Define the inverse problem
inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt)
```
## Define Inversion Directives
Here we define any directives that are carried out during the inversion. This
includes the cooling schedule for the trade-off parameter (beta), stopping
criteria for the inversion and saving inversion results at each iteration.
```
# Defining a starting value for the trade-off parameter (beta) between the data
# misfit and the regularization.
starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1.)
# Set the rate of reduction in trade-off parameter (beta) each time the
# the inverse problem is solved. And set the number of Gauss-Newton iterations
# for each trade-off paramter value.
beta_schedule = directives.BetaSchedule(coolingFactor=2., coolingRate=1.)
# Apply and update sensitivity weighting as the model updates
update_sensitivity_weights = directives.UpdateSensitivityWeights()
# Options for outputting recovered models and predicted data for each beta.
save_iteration = directives.SaveOutputEveryIteration(save_txt=False)
# Setting a stopping criteria for the inversion.
target_misfit = directives.TargetMisfit(chifact=1)
# The directives are defined in a list
directives_list = [
starting_beta, beta_schedule, target_misfit
]
```
## Running the Inversion
To define the inversion object, we need to define the inversion problem and
the set of directives. We can then run the inversion.
```
# Here we combine the inverse problem and the set of directives
inv = inversion.BaseInversion(inv_prob, directives_list)
# Run the inversion
recovered_model = inv.run(starting_model)
# Inversion result from Mon DRD Mawlamyine location 3
res_tmp = np.array([348.4, 722.9, 282, 100.8, 51.4, 170.8, 31.1, 184.3])
thick_tmp = np.array([1.4, 1.6, 1.4, 12.1, 11.4, 25.1, 54.2])
plotting_mesh_tmp = TensorMesh([np.r_[thick_tmp, layer_thicknesses[-1]]], '0')
```
## Examining the Results
```
# Plot true model and recovered model
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
plotting_mesh = TensorMesh([np.r_[layer_map*recovered_model, layer_thicknesses[-1]]], '0')
x_min = np.min(resistivity_map*recovered_model)
x_max = np.max(resistivity_map*recovered_model)
plot_layer(resistivity_map*recovered_model, plotting_mesh, ax=ax, depth_axis=False, color='k')
#plot_layer(res_tmp, plotting_mesh_tmp, ax=ax, depth_axis=False, color='r')
#ax.set_xlim(10, 5000)
#ax.set_ylim(-300, 0)
#ax.legend(("SimPEG", "Mon State DRD"))
ax.grid(True, which='both', ls="--", c='gray')
# Plot the true and apparent resistivities on a sounding curve
fig, ax = plt.subplots(1, 1, figsize=(7, 5))
ax.loglog(half_AB_separations, dobs, 'kx', lw=2, ms=10, mew=2)
ax.loglog(half_AB_separations, inv_prob.dpred, 'k', lw=2)
ax.set_xlabel("AB/2 (m)")
ax.set_ylabel("Apparent Resistivity ($\Omega m$)")
ax.legend(['Observed data','Predicted data'])
#ax.set_ylim(50, 1000)
ax.grid(True, which='both')
```
| github_jupyter |
# **DIVE INTO CODE COURSE**
## **Graduation Assignment**
**Student Name**: Doan Anh Tien<br>
**Student ID**: 1852789<br>
**Email**: tien.doan.g0pr0@hcmut.edu.vn
## Introduction
The graduation assignment was based on one of the challenges from the Vietnamese competition **Zalo AI Challenge**. The description of the challenge is described as follows:
> During the Covid-19 outbreak, the Vietnamese government pushed the "5K" public health safety message. In the message, masking and keeping a safe distance are two key rules that have been shown to be extremely successful in preventing people from contracting or spreading the virus. Enforcing these principles on a large scale is where technology may help. In this challenge, you will create algorithm to detect whether or not a person or group of individuals in a picture adhere to the "mask" and "distance" standards.
**Basic rules**
We are given the dataset contains images of people either wearing mask or not and they are standing either close of far from each other. Our mission is to predict whether the formation of these people adhere the 5k standard.
The 5k standard is also based on the two conditions, mask (0 == not wearing, 1 == wearing) and distancing (0 == too close, 1 == far enough). People that adhere the 5k standard will not likely to expose the virus to each other in case they did caught it before, and it is to prevent the spread of the COVID-19 pandamic through people interactions.
---
```
import tensorflow as tf
tf.data.experimental.enable_debug_mode()
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
#@title
!pip install cloud_tpu_client
from cloud_tpu_client import Client
c = Client(tpu='') # For TPU runtime
print(c.runtime_version())
#@title
c.configure_tpu_version(tf.__version__, restart_type='ifNeeded')
#@title
print(c.runtime_version())
!nvidia-smi # For GPU runtime
# For when the TPU is used
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
!pip install wandb
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/MyDrive/Colab Notebooks/DIVE INTO CODE/Graduation
!ls
```
## **1. Resources preparation**
### Libraries
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import wandb
from wandb.keras import WandbCallback
from tensorflow.data import AUTOTUNE
from tensorflow import keras
from tensorflow.keras import layers
from PIL import Image
# Some libraries will be imported later throughout the notebook
print('Tensorflow version:', tf.__version__)
print('Keras version:', keras.__version__)
```
### W&B login and init project
```
!wandb login 88c91a7dc6dd5574f423e38f852c6fe640a7fcd0
wandb.init(project="diveintocode-grad-1st-approach", entity="atien228")
```
### Hyperparamaters
```
standard = 'mask' #@param ['mask', 'distancing']
SEED = 42 #@param {type:'integer'}
wandb.config = {
"learning_rate": 0.001,
"epochs": 15,
"batch_size": 16,
"momentum": 0.85,
"smoothing": 0.1
}
```
### Preprocessing data-set
```
data_path = '/content/drive/MyDrive/Colab Notebooks/DIVE INTO CODE/Graduation/data'
img_dir = os.path.join(data_path, 'images')
os.listdir(img_dir)[:10]
meta = pd.read_csv(os.path.join(data_path, 'train_meta.csv'))
meta
img1 = meta.iloc[0]
print(r'Image ID: {}, Mask: {}, Distancing: {}'.format(img1['image_id'], img1['mask'], img1['distancing']))
img = Image.open(os.path.join(img_dir, img1['fname']))
img
dataset = []
label = []
for idx, row in meta.iterrows():
if pd.notna(row[standard]):
dataset.append(os.path.join(img_dir, row['fname'])) # Mask or distancing
label.append(row[standard])
for i in range(5):
print(f'img: {dataset[i]} label: {label[i]}')
len(label_val)
```
Create a small portion of test set since the competition won't let me submit a new entry to check my score
```
df_test = df_train[1200:1500]
label_test = label_train[1200:1500]
df_train = df_train[:1200]
df_val = df_val[:300]
label_train = label_train[:1200]
label_val = label_val[:300]
df_train[0]
label_train[0]
meta.iloc[3713]
```
Create tuple of train and validation set for further process
```
df_train = tuple(zip(df_train, label_train))
df_val = tuple(zip(df_val, label_val))
df_train = tuple(zip(*df_train))
df_val = tuple(zip(*df_val))
```
### Tensorflow Hub for a variety of CNN models
EfficientNet models and ckpts (and other image classifer models too)
```
import tensorflow_hub as hub
print("Hub version:", hub.__version__)
print("GPU is", "available" if tf.config.list_physical_devices('GPU') else "NOT AVAILABLE")
def get_hub_url_and_isize(model_name):
model_handle_map = {
"efficientnetv2-s": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2",
"efficientnetv2-m": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_m/feature_vector/2",
"efficientnetv2-l": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_l/feature_vector/2",
"efficientnetv2-s-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_s/feature_vector/2",
"efficientnetv2-m-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_m/feature_vector/2",
"efficientnetv2-l-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_l/feature_vector/2",
"efficientnetv2-xl-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_xl/feature_vector/2",
"efficientnetv2-b0-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b0/feature_vector/2",
"efficientnetv2-b1-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b1/feature_vector/2",
"efficientnetv2-b2-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b2/feature_vector/2",
"efficientnetv2-b3-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b3/feature_vector/2",
"efficientnetv2-s-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_s/feature_vector/2",
"efficientnetv2-m-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_m/feature_vector/2",
"efficientnetv2-l-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_l/feature_vector/2",
"efficientnetv2-xl-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector/2",
"efficientnetv2-b0-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b0/feature_vector/2",
"efficientnetv2-b1-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b1/feature_vector/2",
"efficientnetv2-b2-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector/2",
"efficientnetv2-b3-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector/2",
"efficientnetv2-b0": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2",
"efficientnetv2-b1": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b1/feature_vector/2",
"efficientnetv2-b2": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b2/feature_vector/2",
"efficientnetv2-b3": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b3/feature_vector/2",
"efficientnet_b0": "https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1",
"efficientnet_b1": "https://tfhub.dev/tensorflow/efficientnet/b1/feature-vector/1",
"efficientnet_b2": "https://tfhub.dev/tensorflow/efficientnet/b2/feature-vector/1",
"efficientnet_b3": "https://tfhub.dev/tensorflow/efficientnet/b3/feature-vector/1",
"efficientnet_b4": "https://tfhub.dev/tensorflow/efficientnet/b4/feature-vector/1",
"efficientnet_b5": "https://tfhub.dev/tensorflow/efficientnet/b5/feature-vector/1",
"efficientnet_b6": "https://tfhub.dev/tensorflow/efficientnet/b6/feature-vector/1",
"efficientnet_b7": "https://tfhub.dev/tensorflow/efficientnet/b7/feature-vector/1",
"bit_s-r50x1": "https://tfhub.dev/google/bit/s-r50x1/1",
"inception_v3": "https://tfhub.dev/google/imagenet/inception_v3/feature-vector/4",
"inception_resnet_v2": "https://tfhub.dev/google/imagenet/inception_resnet_v2/feature-vector/4",
"resnet_v1_50": "https://tfhub.dev/google/imagenet/resnet_v1_50/feature-vector/4",
"resnet_v1_101": "https://tfhub.dev/google/imagenet/resnet_v1_101/feature-vector/4",
"resnet_v1_152": "https://tfhub.dev/google/imagenet/resnet_v1_152/feature-vector/4",
"resnet_v2_50": "https://tfhub.dev/google/imagenet/resnet_v2_50/feature-vector/4",
"resnet_v2_101": "https://tfhub.dev/google/imagenet/resnet_v2_101/feature-vector/4",
"resnet_v2_152": "https://tfhub.dev/google/imagenet/resnet_v2_152/feature-vector/4",
"nasnet_large": "https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/4",
"nasnet_mobile": "https://tfhub.dev/google/imagenet/nasnet_mobile/feature_vector/4",
"pnasnet_large": "https://tfhub.dev/google/imagenet/pnasnet_large/feature_vector/4",
"mobilenet_v2_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4",
"mobilenet_v2_130_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/feature_vector/4",
"mobilenet_v2_140_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4",
"mobilenet_v3_small_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/feature_vector/5",
"mobilenet_v3_small_075_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_small_075_224/feature_vector/5",
"mobilenet_v3_large_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5",
"mobilenet_v3_large_075_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_large_075_224/feature_vector/5",
}
model_image_size_map = {
"efficientnetv2-s": 384,
"efficientnetv2-m": 480,
"efficientnetv2-l": 480,
"efficientnetv2-b0": 224,
"efficientnetv2-b1": 240,
"efficientnetv2-b2": 260,
"efficientnetv2-b3": 300,
"efficientnetv2-s-21k": 384,
"efficientnetv2-m-21k": 480,
"efficientnetv2-l-21k": 480,
"efficientnetv2-xl-21k": 512,
"efficientnetv2-b0-21k": 224,
"efficientnetv2-b1-21k": 240,
"efficientnetv2-b2-21k": 260,
"efficientnetv2-b3-21k": 300,
"efficientnetv2-s-21k-ft1k": 384,
"efficientnetv2-m-21k-ft1k": 480,
"efficientnetv2-l-21k-ft1k": 480,
"efficientnetv2-xl-21k-ft1k": 512,
"efficientnetv2-b0-21k-ft1k": 224,
"efficientnetv2-b1-21k-ft1k": 240,
"efficientnetv2-b2-21k-ft1k": 260,
"efficientnetv2-b3-21k-ft1k": 300,
"efficientnet_b0": 224,
"efficientnet_b1": 240,
"efficientnet_b2": 260,
"efficientnet_b3": 300,
"efficientnet_b4": 380,
"efficientnet_b5": 456,
"efficientnet_b6": 528,
"efficientnet_b7": 600,
"inception_v3": 299,
"inception_resnet_v2": 299,
"nasnet_large": 331,
"pnasnet_large": 331,
}
model_type = model_handle_map.get(model_name)
pixels = model_image_size_map.get(model_name)
print(f"Selected model: {model_name} : {model_type}")
IMAGE_SIZE = (pixels, pixels)
print(f"Input size {IMAGE_SIZE}")
return model_type, IMAGE_SIZE, pixels
model_name = "efficientnetv2-b3-21k-ft1k" # @param ['efficientnetv2-s', 'efficientnetv2-m', 'efficientnetv2-l', 'efficientnetv2-s-21k', 'efficientnetv2-m-21k', 'efficientnetv2-l-21k', 'efficientnetv2-xl-21k', 'efficientnetv2-b0-21k', 'efficientnetv2-b1-21k', 'efficientnetv2-b2-21k', 'efficientnetv2-b3-21k', 'efficientnetv2-s-21k-ft1k', 'efficientnetv2-m-21k-ft1k', 'efficientnetv2-l-21k-ft1k', 'efficientnetv2-xl-21k-ft1k', 'efficientnetv2-b0-21k-ft1k', 'efficientnetv2-b1-21k-ft1k', 'efficientnetv2-b2-21k-ft1k', 'efficientnetv2-b3-21k-ft1k', 'efficientnetv2-b0', 'efficientnetv2-b1', 'efficientnetv2-b2', 'efficientnetv2-b3', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'bit_s-r50x1', 'inception_v3', 'inception_resnet_v2', 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v2_50', 'resnet_v2_101', 'resnet_v2_152', 'nasnet_large', 'nasnet_mobile', 'pnasnet_large', 'mobilenet_v2_100_224', 'mobilenet_v2_130_224', 'mobilenet_v2_140_224', 'mobilenet_v3_small_100_224', 'mobilenet_v3_small_075_224', 'mobilenet_v3_large_100_224', 'mobilenet_v3_large_075_224']
# num_epochs = 5 #@param {type: "integer"}
trainable = True #@param {type: "boolean"}
model_url, img_size, pixels = get_hub_url_and_isize(model_name)
IMG_HEIGHT = IMG_WIDTH = pixels
```
### Data-set interpretion
#### Load Image function for W&B
```
def load_img(path, label):
img = tf.io.read_file(path) # <= For non-TPU
# with open(path, "rb") as local_file: # <= For TPU
# img = local_file.read()
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (IMG_HEIGHT, IMG_WIDTH))
onehot_label = tf.argmax(label == [0.0, 1.0])
# img = np.load(img.numpy(), allow_pickle=True)
# onehot_label = np.load(onehot_label.numpy(), allow_pickle=True)
return img, onehot_label # ,img.shape(), onehot_label.shape()
```
#### Tensorflow Data-set
```
ds_train = tf.data.Dataset.from_tensor_slices((list(df_train[0]), list(df_train[1])))
# Configure with W&B settings
ds_train = (ds_train
.shuffle(buffer_size=1024)
.map(load_img, num_parallel_calls=AUTOTUNE)
.batch(wandb.config['batch_size'])
.cache()
.prefetch(AUTOTUNE))
ds_val = tf.data.Dataset.from_tensor_slices((list(df_val[0]), list(df_val[1])))
# Configure with W&B settings
ds_val = (ds_val
.shuffle(buffer_size=1024)
.map(load_img, num_parallel_calls=AUTOTUNE)
.batch(wandb.config['batch_size'])
.cache()
.prefetch(AUTOTUNE))
ds_train
```
## **2. Modeling**
### Define model structure and metrics
```
from sklearn.metrics import f1_score
tf.config.run_functions_eagerly(True)
@tf.autograph.experimental.do_not_convert
def f1(y_true, y_pred):
return f1_score(y_true,
tf.math.argmax(y_pred, 1))
# Data augmentation layer for image
tf.keras.backend.clear_session()
# =============== TPU ==================
# with strategy.scope():
# data_augmentation = tf.keras.Sequential([
# keras.layers.InputLayer(input_shape=img_size + (3,)),
# layers.RandomFlip("horizontal_and_vertical", seed=SEED),
# layers.RandomRotation(0.2, seed=SEED),
# layers.RandomZoom(0.1, seed=SEED)
# ])
# model = tf.keras.Sequential([
# data_augmentation,
# hub.KerasLayer(model_url, trainable=trainable), # Trainable: Fine tuning
# layers.Dropout(rate=0.2, seed=SEED),
# layers.Dense(units=2, # Binary classifcation
# activation='softmax')
# ])
# model.build((None,) + img_size + (3,)) # (IMG_SIZE, IMG_SIZE, 3)
# model.summary()
# # Update formula rule
# # velocity = momentum * velocity - learning_rate * g
# # w = w + momentum * velocity - learning_rate * g
# model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=wandb.config['learning_rate'], momentum=wandb.config['momentum'], nesterov=True),
# #loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=wandb.config['label_smoothing'])
# loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
# metrics=['accuracy', f1])
# =============== GPU ==================
data_augmentation = tf.keras.Sequential([
keras.layers.InputLayer(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3]),
layers.RandomFlip("horizontal_and_vertical", seed=SEED),
# layers.RandomRotation(0.2, seed=SEED),
layers.RandomZoom(0.1, seed=SEED),
layers.experimental.preprocessing.RandomWidth(0.1, seed=SEED),
])
model = tf.keras.Sequential([
data_augmentation,
hub.KerasLayer(model_url, trainable=trainable), # Trainable: Fine tuning
layers.Dropout(rate=0.2, seed=SEED),
layers.Dense(units=2, # Binary classifcation
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))
])
model.build((None,) + img_size + (3,)) # (IMG_SIZE, IMG_SIZE, 3)
model.summary()
# Update formula rule (when nesterov=True)
# velocity = momentum * velocity - learning_rate * g
# w = w + momentum * velocity - learning_rate * g
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=wandb.config['learning_rate'], momentum=wandb.config['momentum'], nesterov=False),
#loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=wandb.config['label_smoothing'])
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy', f1])
```
### Train model with W&B monitoring
```
hist = model.fit(ds_train, validation_data=ds_val,
epochs=wandb.config['epochs'],
callbacks=[WandbCallback()],
verbose=1).history
```
### Save model and weights
```
model.save(data_path + f'/{standard}.keras')
model.save_weights(
data_path + f'/{standard}_weight.h5', overwrite=True, save_format=None, options=None
)
```
## **3. Evaluation**
### Self-made test dataset
We will evaluate the model performance with the small proportion of the test data-set that we have created
#### Mask detection
Predict trial for one image
```
x_test = df_test[0] # Path to 655.jpg
y_test = label_test[0] # Mask label of 655.jpg
image = tf.io.read_file(x_test)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH))
true_label = 'No mask' if (np.argmax(y_test) == 0) else 'Mask'
plt.imshow(image/255.0)
plt.axis('off')
plt.show()
prediction_scores = model.predict(np.expand_dims(image, axis=0))
predicted_label = 'No mask' if (np.argmax(prediction_scores) == 0) else 'Mask'
print("True label: " + true_label)
print("Predicted label: " + predicted_label)
```
Evaluate the test dataset
```
from sklearn.metrics import accuracy_score
prediction_list = []
for i in range(len(df_test)):
image = tf.io.read_file(df_test[i])
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH))
prediction_scores = model.predict(np.expand_dims(image, axis=0))
prediction_list.append(np.argmax(prediction_scores))
if (i % 10 == 0):
print(f"Predicted {i} images.")
acc = accuracy_score(label_test, prediction_list)
print(f"Test accuracy: {acc}")
```
The test dataset was originally cut down from the train dataset and have not even interfere the training process of the model. So this accuracy is quite reasonable. Currently we have trained the model for detecting mask on people and predict whether they have adhered the 5K standards.
From here, we can change the `standard` variable from `'mask'` to `'distancing'` to train the second model that specifically serves for the distance detection purpose. After finished all requirements, we can use the results from both models to conclude the `5k attribute` and export the final submission.
The 5k attribute can be evaluated as the pseudo code below:
```
5k = 1 if (mask == 1 and distancing == 1) else 0
```
#### Distancing detection
Predict trial for one image
```
x_test = df_test[10] # Path to 1995.jpg
y_test = label_test[10] # Mask label of 1995.jpg
image = tf.io.read_file(x_test)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH))
true_label = 'Too close' if (np.argmax(y_test) == 0) else 'Good distance'
plt.imshow(image/255.0)
plt.axis('off')
plt.show()
prediction_scores = model.predict(np.expand_dims(image, axis=0))
predicted_label = 'Too close' if (np.argmax(prediction_scores) == 0) else 'Good distance'
print("True label: " + true_label)
print("Predicted label: " + predicted_label)
```
Because there are many images missing either mask, distancing or 5k labels (even all of them), the model cannot determine so well and hence the accuracy is reduced.
Evaluate the test dataset
```
from sklearn.metrics import accuracy_score
prediction_list = []
for i in range(len(df_test)):
image = tf.io.read_file(df_test[i])
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH))
prediction_scores = model.predict(np.expand_dims(image, axis=0))
prediction_list.append(np.argmax(prediction_scores))
if (i % 10 == 0):
print(f"Predicted {i} images.")
acc = accuracy_score(label_test, prediction_list)
print(f"Test accuracy: {acc}")
```
Apparently, the **dataset** is missing a lot of distancing attribute compared to the **mask**. As said, the accuracy for detecting the distance is quite lower than the model of mask detection.
### Public Test set
```
meta_test = pd.read_csv(data_path + '/test/public_test_meta.csv')
df_public_test = meta_test['fname']
test_img_path = data_path + '/test/images/'
```
#### Mask prediction
Load Model
```
dependencies = {
'f1': f1,
'KerasLayer': hub.KerasLayer(model_url, trainable=trainable)
}
model_mask = keras.models.load_model(data_path + f'/{standard}.keras', custom_objects=dependencies)
```
Predict
```
def predict_public_test(model, img_path):
prediction_list = []
for i, row in enumerate(df_public_test):
image = tf.io.read_file(img_path + row)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH))
prediction_scores = model.predict(np.expand_dims(image, axis=0))
prediction_list.append(np.argmax(prediction_scores))
if (i % 10 == 0):
print(f"Predicted {i} images.")
return prediction_list
# Mask prediction
prediction_mask_list = predict_public_test(model_mask, test_img_path)
```
#### Distancing prediction
```
# Switch standards
standard = 'distancing' #@param ['mask', 'distancing']
```
Load model
```
dependencies = {
'f1': f1,
'KerasLayer': hub.KerasLayer(model_url, trainable=trainable)
}
model_distancing = keras.models.load_model(data_path + f'/{standard}.keras', custom_objects=dependencies)
```
Predict
```
# Distancing prediction
prediction_distancing_list = predict_public_test(model_distancing, test_img_path)
meta_test_results = meta_test.copy()
meta_test['5k'] = [1 if prediction_mask_list[i] == 1 and prediction_distancing_list[i] == 1 else 0 for i in range(len(meta_test))]
meta_test_results[:10]
import os
os.makedirs(data_path + '/submission', exist_ok=True)
meta_test_results.to_csv(data_path + '/submission/5k-compliance-submission.csv')
```
## **4. Recreate the pipeline**
Since making the process of detecting mask and distancing to be seperated procedures, evaluate new models or changing hyperparameters would be exhausted. In this section, I manage to create the pipeline that can be run once to train, predict and monitor the metrics.
But before heading to that part, we can re-examine our problem to find a better way for a better results. One problem still remains is that the dataset contain so many missing values, and it is in fact can affect our model predictions, hence getting less accuracy.
Missing values
```
#@title
plt.figure(figsize=(10,6))
sns.heatmap(meta.isnull(), cbar=False)
#@title
print('Num. missing mask',\
len(meta[meta['mask'].isna()]))
print('Num. missing distancing',\
len(meta[meta['distancing'].isna()]))
print('Num. missing 5k',\
len(meta[meta['5k'].isna()]))
print('Num. missing mask and distancing:',\
len(meta[(meta['mask'].isna()) & (meta['distancing'].isna())]))
print('Num. missing mask and 5k:',\
len(meta[(meta['mask'].isna()) & (meta['5k'].isna())]))
print('Num. missing distancing and 5k:',\
len(meta[(meta['distancing'].isna()) & (meta['5k'].isna())]))
print('Num. missing all three attributes:',\
len(meta[(meta['mask'].isna()) & (meta['distancing'].isna()) & (meta['5k'].isna())]))
```
Apparently, the missing values are occurs as either missing one of three attribute, or a pair of attributes respectively (except for mask and distancing). None of row missing all three attributes.
To get the 5k value, we should have know the mask and distancing value first. Luckily, none of row miss these two variables. Therefore, we can fill the missing values with our own logics (not all the cases).
The original rule for 5k evaluation can be described as follow:
```
5k = 1 if (mask == 1 and distancing == 1) else 0
```
Base on this, we can design a pipeline that can fill out the missing values and produce better results:
> 1. Model mask detection -> Use to predict the missing mask values -> From there continue to fill the missing distancing values
```
if (mask == 1) and (5k == 1):
distancing = 1
elif (mask == 1) and (5k == 0):
distancing = 0
elif (mask == 0) and (5k == 0)
distancing = 0
```
In case the mask is 0, we can skip it since `mask == 0 and 5k == 0` is the only case we can intepret with and in that case, I have run the code:
`meta[(meta['mask'] == 0) & (meta['5k'] == 0) & (meta['distancing'].isna())]` and it return nothing. So it is safe to assume this part does not miss any values and is skippable.
> 2. Model distancing -> Use to predict the missing 5k values
```
if (distancing == 1) and (mask == 1)
5k == 1
elif (distancing == 0) or (mask == 0)
5k == 0
```
> 3. Model 5k -> Use to predict the final output 5k
In conclusion, the difference between the previous section and this section is that we will make three models instead of two. This is doable as we are going to fill the missing 5k values, thus we can use this attribute for our final prediction. For the new approach, please switch to `new_approach_kaggle.ipynb`
**Note 1**: After having a bad experience with Google Colab, I have switched the later approach to Kaggle with a longer session period and stronger GPU. But since Kaggle does not adapt the data storage/data retrieval well as Google Drive, I did had some trouble during using it. Thus some of the output files will need to download onto my PC in order to save the progress.
**Note 2**: The approach and procedures applied in this notebook is the initial one that I come up with first. In summary, I trained two models of mask detection and distancing detection. And after having the model trained, they will predict the `mask` and `distancing` labels. Based on the mask and distancing label, I use conditions to get the final label 5k. This approach is heavily unreliable since I skip all the missing values here.
For the new approach, I trained the mask model to predict the original train data-set again and fill all missing mask values. Then I used the updated data-set to continue to train the distance model and predict again to fill all missing distance values. After this step, I use the conditions again to fill the missing 5k values. In final step, I train the 5k model in order to predict the 5k label, which is differ from the initial approach where I did not use the 5k label for the train and evaluation process but instead generate it immediately based on the mask and distancing labels.
Comparing these two approaches, I personally think that the later one is better since it rely on all of the data. For example, having the results based on the mask and distancing only is not a good way since errors can occur in either predictions. Therefore, if we want to have 5k results for submission, we should train the model based on 5k values as well. And to make it happen, we should have investigate and interpret the missing values too.
| github_jupyter |
<a href="https://colab.research.google.com/github/chamikasudusinghe/nocml/blob/master/fft_r10_i1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Module Imports for Data Fetiching and Visualization
```
import time
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
```
Module Imports for Data Processing
```
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import pickle
```
Importing Dataset from GitHub
Train Data
```
df1 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r10/2-fft-malicious-n-0-15-m-1-r10.csv?token=AKVFSOC76AIFOVTWA6B5Y4K63I6YK')
df9 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r10/2-fft-normal-n-0-15-r10.csv?token=AKVFSOBIWMVOSTJDKPI5LYS63I6ZC')
df = df1.append(df9, ignore_index=True,sort=False)
df = df.sort_values('timestamp')
df.to_csv('fft-r1-train.csv',index=False)
df = pd.read_csv('fft-r1-train.csv')
df
df.shape
```
Test Data
```
df13 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r10/2-fft-malicious-n-0-15-m-11-r10.csv?token=AKVFSOF6CTQCKY7F6NUI5SS63I6YO')
df14 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r10/2-fft-malicious-n-0-15-m-12-r10.csv?token=AKVFSOFVGIPVBC454BZZIYS63JR4K')
df15 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r10/2-fft-malicious-n-0-15-m-7-r10.csv?token=AKVFSOBYE5BQE7QZ4AMG2NS63JR4O')
print(df13.shape)
print(df14.shape)
print(df15.shape)
```
Processing
```
df.isnull().sum()
df = df.drop(columns=['timestamp','src_ni','src_router','dst_ni','dst_router'])
df.corr()
plt.figure(figsize=(25,25))
sns.heatmap(df.corr(), annot = True)
plt.show()
def find_correlation(data, threshold=0.9):
corr_mat = data.corr()
corr_mat.loc[:, :] = np.tril(corr_mat, k=-1)
already_in = set()
result = []
for col in corr_mat:
perfect_corr = corr_mat[col][abs(corr_mat[col])> threshold].index.tolist()
if perfect_corr and col not in already_in:
already_in.update(set(perfect_corr))
perfect_corr.append(col)
result.append(perfect_corr)
select_nested = [f[1:] for f in result]
select_flat = [i for j in select_nested for i in j]
return select_flat
columns_to_drop = find_correlation(df.drop(columns=['target']))
columns_to_drop
df = df.drop(columns=['inport','cache_coherence_type','flit_id','flit_type','vnet','current_hop','hop_percentage','port_index','cache_coherence_vnet_index','vnet_vc_cc_index'])
plt.figure(figsize=(11,11))
sns.heatmap(df.corr(), annot = True)
plt.show()
plt.figure(figsize=(11,11))
sns.heatmap(df.corr())
plt.show()
```
Processing Dataset for Training
```
train_X = df.drop(columns=['target'])
train_Y = df['target']
#standardization
x = train_X.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = train_X.columns
x_scaled = min_max_scaler.fit_transform(x)
train_X = pd.DataFrame(x_scaled)
train_X.columns = columns
train_X
train_X[train_X.duplicated()].shape
test_X = df13.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router','inport','cache_coherence_type','flit_id','flit_type','vnet','current_hop','hop_percentage','port_index','cache_coherence_vnet_index','vnet_vc_cc_index'])
test_Y = df13['target']
x = test_X.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X = pd.DataFrame(x_scaled)
test_X.columns = columns
print(test_X[test_X.duplicated()].shape)
test_X
test_X1 = df14.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router','inport','cache_coherence_type','flit_id','flit_type','vnet','current_hop','hop_percentage','port_index','cache_coherence_vnet_index','vnet_vc_cc_index'])
test_Y1 = df14['target']
x = test_X1.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X1.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X1 = pd.DataFrame(x_scaled)
test_X1.columns = columns
print(test_X1[test_X1.duplicated()].shape)
test_X2 = df15.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router','inport','cache_coherence_type','flit_id','flit_type','vnet','current_hop','hop_percentage','port_index','cache_coherence_vnet_index','vnet_vc_cc_index'])
test_Y2 = df15['target']
x = test_X2.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X2.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X2 = pd.DataFrame(x_scaled)
test_X2.columns = columns
print(test_X2[test_X2.duplicated()].shape)
```
#### Machine Learning Models
Module Imports for Data Processing and Report Generation in Machine Learning Models
```
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
```
Labels
1. 0 - malicious
2. 1 - good
```
train_Y = df['target']
train_Y.value_counts()
```
Training and Validation Splitting of the Dataset
```
seed = 5
np.random.seed(seed)
X_train, X_test, y_train, y_test = train_test_split(train_X, train_Y, test_size=0.33, random_state=seed, shuffle=True)
```
Feature Selection
```
#SelectKBest for feature selection
bf = SelectKBest(score_func=chi2, k='all')
fit = bf.fit(X_train,y_train)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score']
print(featureScores.nlargest(10,'Score'))
featureScores.plot(kind='barh')
```
Decision Tree Classifier
```
#decisiontreee
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
dt = DecisionTreeClassifier(max_depth=20,max_features=10,random_state = 42)
dt.fit(X_train,y_train)
pickle.dump(dt, open("dt-r1.pickle.dat", 'wb'))
y_pred_dt= dt.predict(X_test)
dt_score_train = dt.score(X_train,y_train)
print("Train Prediction Score",dt_score_train*100)
dt_score_test = accuracy_score(y_test,y_pred_dt)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X)
dt_score_test = accuracy_score(test_Y,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X1)
dt_score_test = accuracy_score(test_Y1,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X2)
dt_score_test = accuracy_score(test_Y2,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
feat_importances = pd.Series(dt.feature_importances_, index=columns)
feat_importances.plot(kind='barh')
cm = confusion_matrix(y_test, y_pred_dt)
class_label = ["Anomalous", "Normal"]
df_cm = pd.DataFrame(cm, index=class_label,columns=class_label)
sns.heatmap(df_cm, annot=True, fmt='d')
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.show()
print(classification_report(y_test,y_pred_dt))
dt_roc_auc = roc_auc_score(y_test, y_pred_dt)
fpr, tpr, thresholds = roc_curve(y_test, dt.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='DTree (area = %0.2f)' % dt_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('DT_ROC')
plt.show()
```
XGB Classifier
```
from xgboost import XGBClassifier
from xgboost import plot_importance
xgbc = XGBClassifier(max_depth=20,min_child_weight=1,n_estimators=500,random_state=42,learning_rate=0.2)
xgbc.fit(X_train,y_train)
pickle.dump(xgbc, open("xgbc-r10l-i1.pickle.dat", 'wb'))
y_pred_xgbc= xgbc.predict(X_test)
xgbc_score_train = xgbc.score(X_train,y_train)
print("Train Prediction Score",xgbc_score_train*100)
xgbc_score_test = accuracy_score(y_test,y_pred_xgbc)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X)
xgbc_score_test = accuracy_score(test_Y,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X1)
xgbc_score_test = accuracy_score(test_Y1,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X2)
xgbc_score_test = accuracy_score(test_Y2,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
plot_importance(xgbc)
plt.show()
cm = confusion_matrix(y_test, y_pred_xgbc)
class_label = ["Anomalous", "Normal"]
df_cm = pd.DataFrame(cm, index=class_label,columns=class_label)
sns.heatmap(df_cm, annot=True, fmt='d')
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.show()
print(classification_report(y_test,y_pred_xgbc))
xgb_roc_auc = roc_auc_score(y_test, y_pred_xgbc)
fpr, tpr, thresholds = roc_curve(y_test, xgbc.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='XGBoost (area = %0.2f)' % xgb_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('XGB_ROC')
plt.show()
```
| github_jupyter |
# Непараметрические криетрии
Критерий | Одновыборочный | Двухвыборочный | Двухвыборочный (связанные выборки)
------------- | -------------|
**Знаков** | $\times$ | | $\times$
**Ранговый** | $\times$ | $\times$ | $\times$
**Перестановочный** | $\times$ | $\times$ | $\times$
## Недвижимость в Сиэттле
Имеются данные о продажной стоимости недвижимости в Сиэтле для 50 сделок в 2001 году и 50 в 2002. Изменились ли в среднем цены?
```
import numpy as np
import pandas as pd
import itertools
from scipy import stats
from statsmodels.stats.descriptivestats import sign_test
from statsmodels.stats.weightstats import zconfint
from statsmodels.stats.weightstats import *
%pylab inline
```
### Загрузка данных
```
seattle_data = pd.read_csv('seattle.txt', sep = '\t', header = 0)
seattle_data.shape
seattle_data.head()
price2001 = seattle_data[seattle_data['Year'] == 2001].Price
price2002 = seattle_data[seattle_data['Year'] == 2002].Price
pylab.figure(figsize=(12,4))
pylab.subplot(1,2,1)
pylab.grid()
pylab.hist(price2001, color = 'r')
pylab.xlabel('2001')
pylab.subplot(1,2,2)
pylab.grid()
pylab.hist(price2002, color = 'b')
pylab.xlabel('2002')
pylab.show()
```
## Двухвыборочные критерии для независимых выборок
```
print('95%% confidence interval for the mean: [%f, %f]' % zconfint(price2001))
print('95%% confidence interval for the mean: [%f, %f]' % zconfint(price2002))
```
### Ранговый критерий Манна-Уитни
$H_0\colon F_{X_1}(x) = F_{X_2}(x)$
$H_1\colon F_{X_1}(x) = F_{X_2}(x + \Delta), \Delta\neq 0$
```
stats.mannwhitneyu(price2001, price2002)
```
### Перестановочный критерий
$H_0\colon F_{X_1}(x) = F_{X_2}(x)$
$H_1\colon F_{X_1}(x) = F_{X_2}(x + \Delta), \Delta\neq 0$
```
def permutation_t_stat_ind(sample1, sample2):
return np.mean(sample1) - np.mean(sample2)
def get_random_combinations(n1, n2, max_combinations):
index = list(range(n1 + n2))
indices = set([tuple(index)])
for i in range(max_combinations - 1):
np.random.shuffle(index)
indices.add(tuple(index))
return [(index[:n1], index[n1:]) for index in indices]
def permutation_zero_dist_ind(sample1, sample2, max_combinations = None):
joined_sample = np.hstack((sample1, sample2))
n1 = len(sample1)
n = len(joined_sample)
if max_combinations:
indices = get_random_combinations(n1, len(sample2), max_combinations)
else:
indices = [(list(index), filter(lambda i: i not in index, range(n))) \
for index in itertools.combinations(range(n), n1)]
distr = [joined_sample[list(i[0])].mean() - joined_sample[list(i[1])].mean() \
for i in indices]
return distr
pylab.hist(permutation_zero_dist_ind(price2001, price2002, max_combinations = 1000))
pylab.show()
def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
t_stat = permutation_t_stat_ind(sample, mean)
zero_distr = permutation_zero_dist_ind(sample, mean, max_permutations)
if alternative == 'two-sided':
return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'less':
return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'greater':
return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr)
print("p-value: %f" % permutation_test(price2001, price2002, max_permutations = 10000))
print("p-value: %f" % permutation_test(price2001, price2002, max_permutations = 50000))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.