text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
from keras.callbacks import EarlyStopping, TensorBoard
from keras.layers import Input, Concatenate, Conv1D
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.models import Model
from sklearn.model_selection import StratifiedKFold, train_test_split
from tqdm import tqdm
import numpy as np
import pandas as pd
import glob, os, random
import itertools
```
# Settings
```
# Files Setting
limit = 8000 # Maximum amount of Star Per Class Per Survey
extraRandom = True
permutation = True # Permute Files
BALANCE_DB = True # Balance or not
# Mini Settings
MAX_NUMBER_OF_POINTS = 500
NUMBER_OF_POINTS = 500
n_splits = 10
validation_set = 0.15
# Iterations
step = 250
stepForDebug = 300
# Network Settings
verbose = True
batch_size = 256
dropout = 0.5
hidden_dims = 128
epochs = 10 # 850
# Convolutions
filters = 128
filters2 = 64
kernel_size = 50
kernel_size2 = 50
# Paths
NombreCarpeta = ''
base_path = '/Users/Carlos/Desktop/Magister/2*/'
regular_exp = base_path + 'Data/Corot/**/*.csv'
regular_exp2 = base_path + 'Data/**/OGLE-*.dat'
regular_exp3 = base_path + 'Data/VVV/**/*.csv'
```
# Open Databases
```
## Methods
# subclasses = ['cepDiez', 'cepEfe', 'RRab', 'RRc', 'nonEC', 'EC', 'Mira', 'SRV', 'Osarg']
subclasses = ['lpv','cep','rrlyr','ecl']
def get_filename(directory, N, early, activation='relu'):
if activation == 'relu':
directory += '/relu/'
elif activation == 'sigmoid':
directory += '/sigmoid/'
else:
directory += '/tanh/'
if not os.path.exists(directory):
print('[+] Creando Directorio \n\t ->', directory)
os.mkdir(directory)
name = '1) Red ' + str(N)
directory += '/'
return directory, name
def get_files(extraRandom = False, permutation=False):
files1 = np.array(list(glob.iglob(regular_exp, recursive=True)))
files2 = np.array(list(glob.iglob(regular_exp2, recursive=True)))
files3 = np.array(list(glob.iglob(regular_exp3, recursive=True)))
print('[!] Files in Memory')
# Permutations
if permutation:
files1 = files1[np.random.permutation(len(files1))]
files2 = files2[np.random.permutation(len(files2))]
files3 = files3[np.random.permutation(len(files3))]
print('[!] Permutation applied')
aux_dic = {}
corot = {}
vvv = {}
ogle = {}
for subclass in subclasses:
aux_dic[subclass] = []
corot[subclass] = 0
vvv[subclass] = 0
ogle[subclass] = 0
new_files = []
for idx in tqdm(range(len(files2))):
foundCorot = False
foundVista = False
foundOgle = False
for subclass in subclasses:
# Corot
if not foundCorot and corot[subclass] < limit and idx < len(files1) and subclass in files1[idx]:
new_files += [[files1[idx], 0]]
corot[subclass] += 1
foundCorot = True
# Ogle
if not foundOgle and ogle[subclass] < limit and subclass in files2[idx]:
new_files += [[files2[idx], 0]]
ogle[subclass] += 1
foundOgle = True
# VVV
if not foundVista and vvv[subclass] < limit and idx < len(files3) and subclass in files3[idx]:
new_files += [[files3[idx], 0]]
vvv[subclass] += 1
foundVista = True
del files1, files2, files3
print('[!] Loaded Files')
return new_files
def replicate_by_survey(files, yTrain):
surveys = ["OGLE", "VVV", "Corot"]
new_files = []
for s in surveys:
mask = [ s in i for i in yTrain]
auxYTrain = yTrain[mask]
new_files += replicate(files[mask])
return new_files
def replicate(files):
aux_dic = {}
for subclass in subclasses:
aux_dic[subclass] = []
for file, num in files:
for subclass in subclasses:
if subclass in file:
aux_dic[subclass].append([file, num])
break
new_files = []
for subclass in subclasses:
array = aux_dic[subclass]
length = len(array)
if length == 0:
continue
new_files += array
if length < limit and extraRandom:
count = 1
q = limit // length
for i in range(1, q):
for file, num in array:
new_files += [[file, count]]
count += 1
r = limit - q*length
if r > 1:
new_files += [[random.choice(array)[0], count] for i in range(r)]
return new_files
def get_survey(path):
if 'Corot' in path:
return 'Corot'
elif 'VVV' in path:
return 'VVV'
elif 'OGLE' in path:
return 'OGLE'
else:
return 'err'
def get_name(path):
for subclass in subclasses:
if subclass in path:
return subclass
return 'err'
def get_name_with_survey(path):
for subclass in subclasses:
if subclass in path:
survey = get_survey(path)
return survey + '_' + subclass
return 'err'
def open_vista(path, num):
df = pd.read_csv(path, comment='#', sep=',')
df = df[df.mjd > 0]
df = df.sort_values(by=[df.columns[1]])
# 3 Desviaciones Standard
#df = df[np.abs(df.mjd-df.mjd.mean())<=(3*df.mjd.std())]
time = np.array(df[df.columns[1]].values, dtype=float)
magnitude = np.array(df[df.columns[2]].values, dtype=float)
error = np.array(df[df.columns[3]].values, dtype=float)
# Not Nan
not_nan = np.where(~np.logical_or(np.isnan(time), np.isnan(magnitude)))[0]
time = time[not_nan]
magnitude = magnitude[not_nan]
error = error[not_nan]
# Num
step = random.randint(1, 2)
count = random.randint(0, num)
time = time[::step]
magnitude = magnitude[::step]
error = error[::step]
time = time[count:]
magnitude = magnitude[count:]
error = error[count:]
# Get Name of Class
# folder_path = os.path.dirname(os.path.dirname(os.path.dirname(path)))
# path, folder_name = os.path.split(folder_path)
return time.astype('float'), magnitude.astype('float'), error.astype('float')
def open_corot(path, num, n, columns):
df = pd.read_csv(path, comment='#', sep=',')
df = df[df.DATEBARTT > 0]
df = df.sort_values(by=[df.columns[columns[0]]])
# 3 Desviaciones Standard
#df = df[np.abs(df.mjd-df.mjd.mean())<=(3*df.mjd.std())]
time = np.array(df[df.columns[columns[0]]].values, dtype=float)
magnitude = np.array(df[df.columns[columns[1]]].values, dtype=float)
error = np.array(df[df.columns[columns[2]]].values, dtype=float)
# Not Nan
not_nan = np.where(~np.logical_or(np.isnan(time), np.isnan(magnitude)))[0]
time = time[not_nan]
magnitude = magnitude[not_nan]
error = error[not_nan]
# Num
step = random.randint(1, 2)
count = random.randint(0, num)
time = time[::step]
magnitude = magnitude[::step]
error = error[::step]
time = time[count:]
magnitude = magnitude[count:]
error = error[count:]
if len(time) > n:
time = time[:n]
magnitude = magnitude[:n]
error = error[:n]
# Get Name of Class
# folder_path = os.path.dirname(os.path.dirname(path))
# path, folder_name = os.path.split(folder_path)
return time, magnitude, error
def open_ogle(path, num, n, columns):
df = pd.read_csv(path, comment='#', sep='\s+', header=None)
df.columns = ['a','b','c']
df = df[df.a > 0]
df = df.sort_values(by=[df.columns[columns[0]]])
# Erase duplicates if it exist
df.drop_duplicates(subset='a', keep='first')
# 3 Desviaciones Standard
#df = df[np.abs(df.mjd-df.mjd.mean())<=(3*df.mjd.std())]
time = np.array(df[df.columns[columns[0]]].values, dtype=float)
magnitude = np.array(df[df.columns[columns[1]]].values, dtype=float)
error = np.array(df[df.columns[columns[2]]].values, dtype=float)
# Not Nan
not_nan = np.where(~np.logical_or(np.isnan(time), np.isnan(magnitude)))[0]
time = time[not_nan]
magnitude = magnitude[not_nan]
error = error[not_nan]
# Num
step = random.randint(1, 2)
count = random.randint(0, num)
time = time[::step]
magnitude = magnitude[::step]
error = error[::step]
time = time[count:]
magnitude = magnitude[count:]
error = error[count:]
if len(time) > n:
time = time[:n]
magnitude = magnitude[:n]
error = error[:n]
# Get Name of Class
# folder_path = os.path.dirname(os.path.dirname(os.path.dirname(path)))
# path, folder_name = os.path.split(folder_path)
return time, magnitude, error
# Data has the form (Points,(Delta Time, Mag, Error)) 1D
def create_matrix(data, N):
aux = np.append([0], np.diff(data).flatten())
# Padding with cero
if max(N-len(aux),0) > 0:
aux = np.append(aux, [0]*(N-len(aux)))
return np.array(aux[:N], dtype='float').reshape(-1,1)
def dataset(files, N):
input_1 = []
input_2 = []
yClassTrain = []
survey = []
for file, num in tqdm(files):
num = int(num)
t, m, e, c, s = None, None, None, get_name(file), get_survey(file)
if c in subclasses:
if 'Corot' in file:
if 'EN2_STAR_CHR' in file:
t, m, e = open_corot(file, num, N, [0,4,8])
else:
t, m, e = open_corot(file, num, N, [0,1,2])
elif 'VVV' in file:
t, m, e = open_vista(file, num)
elif 'OGLE' in file:
t, m, e = open_ogle(file, num, N, [0,1,2])
if t != None and c in subclasses:
input_1.append(create_matrix(t, N))
input_2.append(create_matrix(m, N))
yClassTrain.append(c)
survey.append(s)
else:
print('\t [!] E2 No paso el archivo: ', file, '\n\t\t - Clase: ', c)
else:
print('\t [!] E1 No paso el archivo: ', file, '\n\t\t - Clase: ', c)
return np.array(input_1), np.array(input_2), np.array(yClassTrain), np.array(survey)
```
# Keras Model
```
def get_model(N, classes, activation='relu'):
conv1 = Conv1D(filters, kernel_size, activation='relu')
conv2 = Conv1D(filters2, kernel_size2, activation='relu')
# For Time Tower
input1 = Input((N, 1))
out1 = conv1(input1)
out1 = conv2(out1)
# For Magnitude Tower
input2 = Input((N, 1))
out2 = conv1(input2)
out2 = conv2(out2)
out = Concatenate()([out1, out2])
out = Flatten()(out)
out = Dropout(dropout)(out)
out = Dense(hidden_dims, activation=activation)(out)
out = Dropout(dropout)(out)
out = Dense(len(classes), activation='softmax')(out)
model = Model([input1, input2], out)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
def class_to_vector(Y, classes):
new_y = []
for y in Y:
aux = []
for val in classes:
if val == y:
aux.append(1)
else:
aux.append(0)
new_y.append(aux)
return np.array(new_y)
def serialize_model(name, model):
# Serialize model to JSON
model_json = model.to_json()
with open(name + '.json', "w") as json_file:
json_file.write(model_json)
# Serialize weights to HDF5
model.save_weights(name + ".h5")
def experiment(directory, files, Y, classes, N, n_splits):
# Iterating
activations = ['tanh']
earlyStopping = [False]
for early in earlyStopping:
for activation in activations:
# try:
print('\t\t [+] Entrenando',
'\n\t\t\t [!] Early Stopping', early,
'\n\t\t\t [!] Activation', activation)
direc, name = get_filename(directory, N,
early, activation)
filename_exp = direc + name
yPred = np.array([])
yReal = np.array([])
sReal = np.array([])
modelNum = 0
skf = StratifiedKFold(n_splits=n_splits)
for train_index, test_index in skf.split(files, Y):
dTrain, dTest = files[train_index], files[test_index]
yTrain = Y[train_index]
##############
### Get DB ###
##############
# Replicate Files
dTrain = replicate_by_survey(dTrain, yTrain)
# Get Database
dTrain_1, dTrain_2, yTrain, _ = dataset(dTrain[::stepForDebug], N)
dTest_1, dTest_2, yTest, sTest = dataset(dTest[::stepForDebug], N)
yReal = np.append(yReal, yTest)
sReal = np.append(sReal, sTest)
yTrain = class_to_vector(yTrain, classes)
yTest = class_to_vector(yTest, classes)
################
## Tensorboard #
################
tensorboard = TensorBoard(log_dir= direc + 'logs',
write_graph=True, write_images=False)
# tensorboard = TensorBoard(log_dir= direc + 'logs', batch_size=64, histogram_freq=5,
# write_graph=True, write_images=False, write_grads=True)
################
## Model ##
################
model = get_model(N, classes, activation)
if early:
earlyStopping = EarlyStopping(monitor='val_loss', patience=3,
verbose=0, mode='auto')
model.fit([dTrain_1, dTrain_2], yTrain,
batch_size=batch_size, epochs=epochs,
validation_split=validation_set, verbose=1,
callbacks=[earlyStopping, tensorboard])
else:
model.fit([dTrain_1, dTrain_2], yTrain,
batch_size=batch_size, epochs=epochs,
validation_split=validation_set, verbose=1,
callbacks=[tensorboard])
yPred = np.append(yPred, np.argmax(model.predict([dTest_1, dTest_2]), axis=1))
#################
## Serialize ##
#################
modelDirectory = direc + 'model/'
if not os.path.exists(modelDirectory):
print('[+] Creando Directorio \n\t ->', modelDirectory)
os.mkdir(modelDirectory)
serialize_model(modelDirectory + str(modelNum), model)
modelNum += 1
del dTrain, dTest, yTrain, yTest, model
break
yPred = np.array([classes[int(i)] for i in yPred])
# Save Matrix
print('\n \t\t\t [+] Saving Results in', filename_exp)
np.save(filename_exp, [yReal, yPred, sReal])
print('*'*30)
# except Exception as e:
# print('\t\t\t [!] Fatal Error:\n\t\t', str(e))
print('[+] Obteniendo Filenames')
files = np.array(get_files(extraRandom, permutation))
YSubClass = []
for file, num in files:
YSubClass.append(get_name_with_survey(file))
YSubClass = np.array(YSubClass)
NUMBER_OF_POINTS = 500
while NUMBER_OF_POINTS <= MAX_NUMBER_OF_POINTS:
# Create Folder
directory = './Resultados' + NombreCarpeta
if not os.path.exists(directory):
print('[+] Creando Directorio \n\t ->', directory)
os.mkdir(directory)
experiment(directory, files, YSubClass, subclasses, NUMBER_OF_POINTS, n_splits)
NUMBER_OF_POINTS += step
t = np.arange(0, N, 0.01).reshape(-1, N, 1)
np.shape(t)
```
# Prueba
```
from keras.layers import Input, Concatenate, Conv1D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Model
from keras import backend as K
import numpy as np
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
K.set_session(sess)
N = 500
filters = 64
filters2 = 32
kernel_size = 42
kernel_size2 = 42
dropout = 0.5
validation_set = 0.2
epochs = 1
batch_size = 10
hidden_dims = 128
stride = 2
t = np.arange(0, N, 0.01).reshape(-1, N, 1)
m = np.arange(0, N, 0.01).reshape(-1, N, 1)
aux = int(N/5)
t1 = np.arange(0, aux, 0.1).reshape(-1, 50, 1)
m1 = np.arange(0, aux, 0.1).reshape(-1, 50, 1)
aux = int((len(t)/2))
y = [1, 0] * aux
y += [0, 1] * (len(t) - aux)
y = np.array(y).reshape(-1, 2)
conv1 = Conv1D(filters, kernel_size, strides=stride, activation='relu')
conv2 = Conv1D(filters2, kernel_size2, strides=stride, activation='relu')
# For Time Tower
input1 = Input((N, 1))
out1 = conv1(input1)
out1 = conv2(out1)
# For Magnitude Tower
input2 = Input((N, 1))
out2 = conv1(input2)
out2 = conv2(out2)
out = Concatenate()([out1, out2])
out = Flatten()(out)
out = Dropout(dropout)(out)
out = Dense(hidden_dims, activation='tanh')(out)
out = Dropout(dropout)(out)
out = Dense(2, activation='softmax')(out)
model = Model([input1, input2], out)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# model.fit([t, m], y,
# batch_size=batch_size, epochs=epochs,
# validation_split=validation_set, verbose=1)
model.summary()
yPred = []
model.predict([t1, m1])
yPred = np.append(yPred, np.argmax(model.predict([t1, m1]), axis=1))
np.save('./Prueba..npy', yPred)
Conv1D(strides=1, padding='valid')
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Regression with Deployment using Hardware Performance Dataset**_
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Data](#Data)
1. [Train](#Train)
1. [Results](#Results)
1. [Test](#Test)
1. [Acknowledgements](#Acknowledgements)
## Introduction
In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
In this notebook you will learn how to:
1. Create an `Experiment` in an existing `Workspace`.
2. Configure AutoML using `AutoMLConfig`.
3. Train the model using local compute.
4. Explore the results.
5. Test the best fitted model.
## Setup
As part of the setup you have already created an Azure ML Workspace object. For AutoML you will need to create an Experiment object, which is a named object in a Workspace used to run experiments.
```
import logging
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
import azureml.dataprep as dprep
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
ws = Workspace.from_config()
# Choose a name for the experiment and specify the project folder.
experiment_name = 'automl-regression-hardware'
project_folder = './sample_projects/automl-remote-regression'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
## Create or Attach existing AmlCompute
You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
#### Creation of AmlCompute takes approximately 5 minutes.
If the AmlCompute with that name is already in your workspace this code will skip the creation process.
As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota.
```
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Choose a name for your cluster.
amlcompute_cluster_name = "automlcl"
found = False
# Check if this compute target already exists in the workspace.
cts = ws.compute_targets
if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute_target = cts[amlcompute_cluster_name]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2", # for GPU, use "STANDARD_NC6"
#vm_priority = 'lowpriority', # optional
max_nodes = 6)
# Create the cluster.
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min_node_count is provided, it will use the scale settings for the cluster.
compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# For a more detailed view of current AmlCompute status, use get_status().
```
# Data
Here load the data in the get_data script to be utilized in azure compute. To do this, first load all the necessary libraries and dependencies to set up paths for the data and to create the conda_run_config.
```
if not os.path.isdir('data'):
os.mkdir('data')
if not os.path.exists(project_folder):
os.makedirs(project_folder)
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
# create a new RunConfig object
conda_run_config = RunConfiguration(framework="python")
# Set compute target to AmlCompute
conda_run_config.target = compute_target
conda_run_config.environment.docker.enabled = True
conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], conda_packages=['numpy'])
conda_run_config.environment.python.conda_dependencies = cd
```
### Load Data
Here create the script to be run in azure compute for loading the data, load the hardware dataset into the X and y variables. Next split the data using train_test_split and return X_train and y_train for training the model.
```
data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv"
dflow = dprep.auto_read_file(data)
dflow.get_profile()
X = dflow.drop_columns(columns=['ERP'])
y = dflow.keep_columns(columns=['ERP'], validate_column_exists=True)
X_train, X_test = X.random_split(percentage=0.8, seed=223)
y_train, y_test = y.random_split(percentage=0.8, seed=223)
dflow.head()
```
## Train
Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|classification or regression|
|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|
|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|
|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|
|**n_cross_validations**|Number of cross validation splits.|
|**X**|(sparse) array-like, shape = [n_samples, n_features]|
|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|
|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|
**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)
##### If you would like to see even better results increase "iteration_time_out minutes" to 10+ mins and increase "iterations" to a minimum of 30
```
automl_settings = {
"iteration_timeout_minutes": 5,
"iterations": 10,
"n_cross_validations": 5,
"primary_metric": 'spearman_correlation',
"preprocess": True,
"max_concurrent_iterations": 5,
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(task = 'regression',
debug_log = 'automl_errors_20190417.log',
path = project_folder,
run_configuration=conda_run_config,
X = X_train,
y = y_train,
**automl_settings
)
remote_run = experiment.submit(automl_config, show_output = False)
remote_run
```
## Results
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
```
from azureml.widgets import RunDetails
RunDetails(remote_run).show()
# Wait until the run finishes.
remote_run.wait_for_completion(show_output = True)
```
## Retrieve All Child Runs
You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(remote_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
## Retrieve the Best Model
Below we select the best pipeline from our iterations. The get_output method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration.
```
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
```
#### Best Model Based on Any Other Metric
Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):
```
lookup_metric = "root_mean_squared_error"
best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
iteration = 3
third_run, third_model = remote_run.get_output(iteration = iteration)
print(third_run)
print(third_model)
```
## Register the Fitted Model for Deployment
If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered.
```
description = 'AutoML Model'
tags = None
model = remote_run.register_model(description = description, tags = tags)
print(remote_run.model_id) # This will be written to the script file later in the notebook.
```
### Create Scoring Script
The scoring script is required to generate the image for deployment. It contains the code to do the predictions on input data.
```
%%writefile score.py
import pickle
import json
import numpy
import azureml.train.automl
from sklearn.externals import joblib
from azureml.core.model import Model
def init():
global model
model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
def run(rawdata):
try:
data = json.loads(rawdata)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"error": result})
return json.dumps({"result":result.tolist()})
```
### Create a YAML File for the Environment
To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. Details about retrieving the versions can be found in notebook [12.auto-ml-retrieve-the-training-sdk-versions](12.auto-ml-retrieve-the-training-sdk-versions.ipynb).
```
dependencies = remote_run.get_run_sdk_dependencies(iteration = 1)
for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:
print('{}\t{}'.format(p, dependencies[p]))
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])
conda_env_file_name = 'myenv.yml'
myenv.save_to_file('.', conda_env_file_name)
# Substitute the actual version number in the environment file.
# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.
# However, we include this in case this code is used on an experiment from a previous SDK version.
with open(conda_env_file_name, 'r') as cefr:
content = cefr.read()
with open(conda_env_file_name, 'w') as cefw:
cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))
# Substitute the actual model id in the script file.
script_file_name = 'score.py'
with open(script_file_name, 'r') as cefr:
content = cefr.read()
with open(script_file_name, 'w') as cefw:
cefw.write(content.replace('<<modelid>>', remote_run.model_id))
```
### Create a Container Image
Next use Azure Container Instances for deploying models as a web service for quickly deploying and validating your model
or when testing a model that is under development.
```
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script = script_file_name,
conda_file = conda_env_file_name,
tags = {'area': "digits", 'type': "automl_regression"},
description = "Image for automl regression sample")
image = Image.create(name = "automlsampleimage",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
if image.creation_state == 'Failed':
print("Image build log at: " + image.image_build_log_uri)
```
### Deploy the Image as a Web Service on Azure Container Instance
Deploy an image that contains the model and other assets needed by the service.
```
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "digits", 'type': "automl_regression"},
description = 'sample service for Automl Regression')
from azureml.core.webservice import Webservice
aci_service_name = 'automl-sample-hardware'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
```
### Delete a Web Service
Deletes the specified web service.
```
#aci_service.delete()
```
### Get Logs from a Deployed Web Service
Gets logs from a deployed web service.
```
#aci_service.get_logs()
```
## Test
Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values.
```
X_test = X_test.to_pandas_dataframe()
y_test = y_test.to_pandas_dataframe()
y_test = np.array(y_test)
y_test = y_test[:,0]
X_train = X_train.to_pandas_dataframe()
y_train = y_train.to_pandas_dataframe()
y_train = np.array(y_train)
y_train = y_train[:,0]
```
##### Predict on training and test set, and calculate residual values.
```
y_pred_train = fitted_model.predict(X_train)
y_residual_train = y_train - y_pred_train
y_pred_test = fitted_model.predict(X_test)
y_residual_test = y_test - y_pred_test
```
### Calculate metrics for the prediction
Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values
from the trained model that was returned.
```
%matplotlib inline
from sklearn.metrics import mean_squared_error, r2_score
# Set up a multi-plot chart.
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})
f.suptitle('Regression Residual Values', fontsize = 18)
f.set_figheight(6)
f.set_figwidth(16)
# Plot residual values of training set.
a0.axis([0, 360, -200, 200])
a0.plot(y_residual_train, 'bo', alpha = 0.5)
a0.plot([-10,360],[0,0], 'r-', lw = 3)
a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)
a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)
a0.set_xlabel('Training samples', fontsize = 12)
a0.set_ylabel('Residual Values', fontsize = 12)
# Plot residual values of test set.
a1.axis([0, 90, -200, 200])
a1.plot(y_residual_test, 'bo', alpha = 0.5)
a1.plot([-10,360],[0,0], 'r-', lw = 3)
a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)
a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)
a1.set_xlabel('Test samples', fontsize = 12)
a1.set_yticklabels([])
plt.show()
%matplotlib notebook
test_pred = plt.scatter(y_test, y_pred_test, color='')
test_test = plt.scatter(y_test, y_test, color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
```
## Acknowledgements
This Predicting Hardware Performance Dataset is made available under the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication License: https://creativecommons.org/publicdomain/zero/1.0/. Any rights in individual contents of the database are licensed under the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication License: https://creativecommons.org/publicdomain/zero/1.0/ . The dataset itself can be found here: https://www.kaggle.com/faizunnabi/comp-hardware-performance and https://archive.ics.uci.edu/ml/datasets/Computer+Hardware
_**Citation Found Here**_
| github_jupyter |
# Introduction to TensorFlow
## Computation graphs
In the first semester we used the NumPy-based `mlp` Python package to illustrate the concepts involved in automatically propagating gradients through multiple-layer neural network models. We also looked at how to use these calculated derivatives to do gradient-descent based training of models in supervised learning tasks such as classification and regression.
A key theme in the first semester's work was the idea of defining models in a modular fashion. There we considered models composed of a sequence of *layer* modules, the output of each of which fed into the input of the next in the sequence and each applying a transformation to map inputs to outputs. By defining a standard interface to layer objects with each defining a `fprop` method to *forward propagate* inputs to outputs, and a `bprop` method to *back propagate* gradients with respect to the output of the layer to gradients with respect to the input of the layer, the layer modules could be composed together arbitarily and activations and gradients forward and back propagated through the whole stack respectively.
<div style='margin: auto; text-align: center; padding-top: 1em;'>
<img style='margin-bottom: 1em;' src='res/pipeline-graph.png' width='30%' />
<i>'Pipeline' model composed of sequence of single input, single output layer modules</i>
</div>
By construction a layer was defined as an object with a single array input and single array output. This is a natural fit for the architectures of standard feedforward networks which can be thought of a single pipeline of transformations from user provided input data to predicted outputs as illustrated in the figure above.
<div style='margin: auto; text-align: center; padding-top: 1em;'>
<img style='display: inline-block; padding-right: 2em; margin-bottom: 1em;' src='res/rnn-graph.png' width='30%' />
<img style='display: inline-block; padding-left: 2em; margin-bottom: 1em;' src='res/skip-connection-graph.png' width='30%' /> <br />
<i>Models which fit less well into pipeline structure: left, a sequence-to-sequence recurrent network; right, a feed forward network with skip connections.</i>
</div>
Towards the end of last semester however we encountered several models which do not fit so well in to this pipeline-like structure. For instance (unrolled) recurrent neural networks tend to have inputs feeding in to and outputs feeding out from multiple points along a deep feedforward model corresponding to the updates of the hidden recurrent state, as illustrated in the left panel in the figure above. It is not trivial to see how to map this structure to our layer based pipeline. Similarly models with skip connections between layers as illustrated in the right panel of the above figure also do not fit particularly well in to a pipeline structure.
Ideally we would like to be able to compose modular components in more general structures than the pipeline structure we have being using so far. In particular it turns out to be useful to be able to deal with models which have structures defined by arbitrary [*directed acyclic graphs*](https://en.wikipedia.org/wiki/Directed_acyclic_graph) (DAGs), that is graphs connected by directed edges and without any directed cycles. Both the recurrent network and skip-connections examples can be naturally expressed as DAGs as well many other model structures.
When working with these more general graphical structures, rather than considering a graph made up of layer modules, it often more useful to consider lower level mathematical operations or *ops* that make up the computation as the fundamental building block. A DAG composed of ops is often termed a *computation graph*. THis terminolgy was covered briefly in [lecture 6](http://www.inf.ed.ac.uk/teaching/courses/mlp/2017-18/mlp06-enc.pdf), and also in the [MLPR course](http://www.inf.ed.ac.uk/teaching/courses/mlpr/2016/notes/w5a_backprop.html). The backpropagation rules we used to propagate gradients through a stack of layer modules can be naturally generalised to apply to computation graphs, with this method of applying the chain rule to automatically propagate gradients backwards through a general computation graph also sometimes termed [*reverse-mode automatic differentiation*](https://en.wikipedia.org/wiki/Automatic_differentiation#Reverse_accumulation).
<div style='margin: auto; text-align: center; padding-top: 1em;'>
<img style='margin-bottom: 1em;' src='res/affine-transform-graph.png' width='40%' />
<i>Computation / data flow graph for an affine transformation $\boldsymbol{y} = \mathbf{W}\boldsymbol{x} + \boldsymbol{b}$</i>
</div>
The figure above shows a very simple computation graph corresponding to the mathematical expression $\boldsymbol{y} = \mathbf{W}\boldsymbol{x} + \boldsymbol{b}$, i.e. the affine transformation we encountered last semester. Here the nodes of the graph are operations and the edges the vector or matrix values passed between operations. The opposite convention with nodes as values and edges as operations is also sometimes used. Note that just like there was ambiguity about what to define as a layer (as discussed previously at beginning of the [third lab notebook](03_Multiple_layer_models.ipynb), there are a range of choices for the level of abstraction to use in the op nodes in a computational graph. For instance, we could also have chosen to express the above computational graph with a single `AffineTransform` op node with three inputs (one matrix, two vector) and one vector output. Equally we might choose to express the `MatMul` op in terms of the underlying individual scalar addition and multiplication operations. What to consider an operation is therefore somewhat a matter of choice and what is convenient in a particular setting.
## TensorFlow
To allow us to work with models defined by more general computation graphs and to avoid the need to write `fprop` and `bprop` methods for each new model component we want to try out, this semester we will be using the open-source computation graph framework [TensorFlow](https://www.tensorflow.org/), originally developed by the Google Brain team:
> TensorFlow™ is an open source software library for numerical computation using data flow graphs. Nodes in the graph represent mathematical operations, while the graph edges represent the multidimensional data arrays (tensors) communicated between them. The flexible architecture allows you to deploy computation to one or more CPUs or GPUs
in a desktop, server, or mobile device with a single API.
TensorFlow allows complex computation graphs (also known as data flow graphs in TensorFlow parlance) to be defined via a Python interface, with efficient C++ implementations for running the corresponding operations on different devices. TensorFlow also includes tools for automatic gradient computation and a large and growing suite of pre-define operations useful for gradient-based training of machine learning models.
In this notebook we will introduce some of the basic elements of constructing, training and evaluating models with TensorFlow. This will use similar material to some of the [official TensorFlow tutorials](https://www.tensorflow.org/tutorials/) but with an additional emphasis of making links to the material covered in this course last semester. For those who have not used a computational graph framework such as TensorFlow or Theano before you may find the [basic usage tutorial](https://www.tensorflow.org/get_started/basic_usage) useful to go through.
### Installing TensorFlow
To install TensorFlow, open a terminal, activate your Conda `mlp` environment using
```
source activate mlp
```
and then run
```
pip install tensorflow # for CPU users
```
```
pip install tensorflow_gpu # for GPU users
```
This should locally install the stable release version of TensorFlow (currently 1.4.1) in your Conda environment. After installing TensorFlow you may need to restart the kernel in the notebook to allow it to be imported.
## Exercise 1: EMNIST softmax regression
As a first example we will train a simple softmax regression model to classify handwritten digit images from the EMNIST data set encountered last semester (for those fed up of working with EMNIST - don't worry you will soon be moving on to other datasets!). This is equivalent to the model implemented in the first exercise of the third lab notebook. We will walk through constructing an equivalent model in TensorFlow and explain new TensorFlow model concepts as we use them. You should run each cell as you progress through the exercise.
Similarly to the common convention of importing NumPy under the shortform alias `np` it is common to import the Python TensorFlow top-level module under the alias `tf`.
```
import tensorflow as tf
```
We begin by defining [*placeholder*](https://www.tensorflow.org/api_docs/python/io_ops/placeholders) objects for the data inputs and targets arrays. These are nodes in the computation graph to which we will later *feed* in external data, such as batches of training set inputs and targets. This abstraction allows us to reuse the same computation graph for different data inputs - we can think of placeholders as acting equivalently to the arguments of a function. It is actually possible to feed data into any node in a TensorFlow graph however the advantage of using a placeholder is that is *must* always have a value fed into it (an exception will be raised if a value isn't provided) and no arbitrary alternative values needs to be entered.
The `tf.placeholder` function has three arguments:
* `dtype` : The [TensorFlow datatype](https://www.tensorflow.org/api_docs/python/framework/tensor_types) for the tensor e.g. `tf.float32` for single-precision floating point values.
* `shape` (optional) : An iterable defining the shape (size of each dimension) of the tensor e.g. `shape=(5, 2)` would indicate a 2D tensor (matrix) with first dimension of size 5 and second dimension of size 2. An entry of `None` in the shape definition corresponds to the corresponding dimension size being left unspecified, so for example `shape=(None, 28, 28)` would allow any 3D inputs with final two dimensions of size 28 to be inputted.
* `name` (optional): String argument defining a name for the tensor which can be useful when visualising a computation graph and for debugging purposes.
As we will generally be working with batches of datapoints, both the `inputs` and `targets` will be 2D tensors with the first dimension corresponding to the batch size (set as `None` here to allow it to specified later) and the second dimension corresponding to the size of each input or output vector. As in the previous semester's work we will use a 1-of-K encoding for the class targets so for EMNIST each output corresponds to a vector of length 47 (number of digit/letter classes).
```
inputs = tf.placeholder(tf.float32, [None, 784], 'inputs')
targets = tf.placeholder(tf.float32, [None, 47], 'targets')
```
We now define [*variable*](https://www.tensorflow.org/api_docs/python/state_ops/variables) objects for the model parameters. Variables are stateful tensors in the computation graph - they have to be explicitly initialised and their internal values can be updated as part of the operations in a graph e.g. gradient updates to model parameter during training. They can also be saved to disk and pre-saved values restored in to a graph at a later time.
The `tf.Variable` constructor takes an `initial_value` as its first argument; this should be a TensorFlow tensor which specifies the initial value to assign to the variable, often a constant tensor such as all zeros, or random samples from a distribution.
```
weights = tf.Variable(tf.zeros([784, 47]))
biases = tf.Variable(tf.zeros([47]))
```
We now build the computation graph corresponding to producing the predicted outputs of the model (log unnormalised class probabilities) given the data inputs and model parameters. We use the TensorFlow [`matmul`](https://www.tensorflow.org/api_docs/python/math_ops/matrix_math_functions#matmul) op to compute the matrix-matrix product between the 2D array of input vectors and the weight matrix parameter variable. TensorFlow [overloads all of the common arithmetic operators](http://stackoverflow.com/a/35095052) for tensor objects so `x + y` where at least one of `x` or `y` is a tensor instance (both `tf.placeholder` and `tf.Variable` return (sub-classes) of `tf.Tensor`) corresponds to the TensorFlow elementwise addition op `tf.add`. Further elementwise binary arithmetic operators like addition follow NumPy style [broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html), so in the expression below the `+ biases` sub-expression will correspond to creating an operation in the computation graph which adds the bias vector to each of the rows of the 2D tensor output of the `matmul` op.
```
outputs = tf.matmul(inputs, weights) + biases
```
While we could have defined `outputs` as the softmax of the expression above to produce normalised class probabilities as the outputs of the model, as discussed last semester when using a softmax output combined with a cross-entropy error function it usually desirable from a numerical stability and efficiency perspective to wrap the softmax computation in to the error computation (as done in the `CrossEntropySoftmaxError` class in our `mlp` framework).
In TensorFlow this can be achieved with the `softmax_cross_entropy_with_logits` op which is part of the `tf.nn` submodule which contains a number of ops specifically for neural network type models. This op takes as its first input log unnormalised class probabilities (sometimes termed logits) and as second input the class label targets which should be of the same dimension as the first input. By default the last dimension of the input tensors is assumed to correspond to the class dimension - this can be altered via an optional `dim` argument.
The output of the `softmax_cross_entropy_with_logits` op here is a 1D tensor with a cross-entropy error value for each data point in the batch. We wish to minimise the mean cross-entropy error across the full dataset and will use the mean of the error on the batch as a stochastic estimator of this value. In TensorFlow ops which *reduce* a tensor along a dimension(s), for example by taking a sum, mean, or product, are prefixed with `reduce`, with the default behaviour being to perform the reduction across all dimensions of the input tensor and return a scalar output. Therefore the second line below will take the per data point cross-entropy errors and produce a single mean value across the whole batch.
```
per_datapoint_errors = tf.nn.softmax_cross_entropy_with_logits(logits=outputs, labels=targets)
error = tf.reduce_mean(per_datapoint_errors)
```
Although for the purposes of training we will use the cross-entropy error as this is differentiable, for evaluation we will also be interested in the classification accuracy i.e. what proportion of all of the predicted classes correspond to the true target label. We can calculate this in TensorFlow similarly to how we used NumPy to do this previously - we use the TensorFlow `tf.argmax` op to find the index of along the class dimension corresponding to the maximum predicted class probability and check if this is equal to the index along the class dimension of the 1-of-$k$ encoded target labels. Analagously to the error computation above, this computes per-datapoint values which we then need to average across with a `reduce_mean` op to produce the classification accuracy for a batch.
```
per_datapoint_pred_is_correct = tf.equal(tf.argmax(outputs, 1), tf.argmax(targets, 1))
accuracy = tf.reduce_mean(tf.cast(per_datapoint_pred_is_correct, tf.float32))
```
As mentioned previously TensorFlow is able to automatically calculate gradients of scalar computation graph outputs with respect to tensors in the computation graph. We can explicitly construct a new sub-graph corresponding to the gradient of a scalar with respect to one or more tensors in the graph using the [`tf.gradients`](https://www.tensorflow.org/api_docs/python/train/gradient_computation) function.
TensorFlow also however includes a number of higher-level `Optimizer` classes in the `tf.train` module that internally deal with constructing graphs corresponding to the gradients of some scalar loss with respect to one or more `Variable` tensors in the graph (usually corresponding to model parameters) and then using these gradients to update the variables (roughly equivalent to the `LearningRule` classes in the `mlp` framework). The most basic `Optimizer` instance is the `GradientDescentOptimizer` which simply adds operations corresponding to basic (stochastic) gradient descent to the graph (i.e. no momentum, adaptive learning rates etc.). The `__init__` constructor method for this class takes one argument `learning_rate` corresponding to the gradient descent learning rate / step size encountered previously.
Usually we are not interested in the `Optimizer` object other than in adding operations in the graph corresponding to the optimisation steps. This can be achieved using the `minimize` method of the object which takes as first argument the tensor object corresponding to the scalar loss / error to be minimized. A further optional keyword argument `var_list` can be used to specify a list of variables to compute the gradients of the loss with respect to and update; by default this is set to `None` which indicates to use all trainable variables in the current graph. The `minimize` method returns an operation corresponding to applying the gradient updates to the variables - we need to store a reference to this to allow us to run these operations later. Note we do not need to store a reference to the optimizer as we have no further need of this object hence commonly the steps of constructing the `Optimizer` and calling `minimize` are commonly all applied in a single line as below.
```
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(error)
```
We have now constructed a computation graph which can compute predicted outputs, use these to calculate an error value (and accuracy) and use the gradients of the error with respect to the model parameter variables to update their values with a gradient descent step.
Although we have defined our computation graph, we have not yet initialised any tensor data in memory - all of the tensor variables defined above are just symbolic representations of parts of the computation graph. We can think of the computation graph as a whole as being similar to a function - it defines a sequence of operations but does not directly run those operations on data itself.
To run the operations in (part of) a TensorFlow graph we need to create a [`Session`](https://www.tensorflow.org/api_docs/python/client/session_management) object:
> A `Session` object encapsulates the environment in which `Operation` objects are executed, and `Tensor` objects are evaluated.
A session object can be constructed using either `tf.Session()` or `tf.InteractiveSession()`. The only difference in the latter is that it installs itself as the default session on construction. This can be useful in interactive contexts such as shells or the notebook interface in which an alternative to running a graph operation using the session `run` method (see below) is to call the `eval` method of an operation e.g. `op.eval()`; generally a session in which the op runs needs to be passed to `eval`; however if an interactive session is used, then this is set as a default to use in `eval` calls.
```
sess = tf.InteractiveSession()
```
The key property of a session object is its `run` method. This takes an operation (or list of operations) in a defined graph as an argument and runs the parts of the computation graph necessary to evaluate the output(s) (if any) of the operation(s), and additionally performs any updates to variables states defined by the graph (e.g. gradient updates of parameters). The output values if any of the operation(s) are returned by the `run` call.
A standard operation which needs to be called before any other operations on a graph which includes variable nodes is a variable *initializer* operation. This, as the name suggests, initialises the values of the variables in the session to the values defined by the `initial_value` argument when adding the variables to the graph. For instance for the graph we have defined here this will initialise the `weights` variable value in the session to a 2D array of zeros of shape `(784, 10)` and the `biases` variable to a 1D array of shape `(10,)`.
We can access initializer ops for each variable individually using the `initializer` property of the variables in question and then individually run these, however a common pattern is to use the `tf.global_variables_initializer()` function to create a single initializer op which will initialise all globally defined variables in the default graph and then run this as done below.
```
init_op = tf.global_variables_initializer()
sess.run(init_op)
```
We are now almost ready to begin training our defined model, however as a final step we need to create objects for accessing batches of EMNIST input and target data. In the tutorial code provided in `tf.examples.tutorials.mnist` there is an `input_data` sub-module which provides a `read_data_sets` function for downloading the MNIST data and constructing an object for iterating over MNIST data. However in the `mlp` package we already have the MNIST and EMNIST data provider classes that we used extensively last semester, and corresponding local copies of the MNIST and EMNIST data, so we will use that here as it provides all the necessary functionality.
```
import data_providers as data_providers
train_data = data_providers.EMNISTDataProvider('train', batch_size=50, flatten=True)
valid_data = data_providers.EMNISTDataProvider('valid', batch_size=50, flatten=True)
```
We are now all set to train our model. As when training models last semester, the training procedure will involve two nested loops - an outer loop corresponding to multiple full-passes through the dataset or *epochs* and an inner loop iterating over individual batches in the training data.
The `init_op` we ran with `sess.run` previously did not depend on the placeholders `inputs` and `target` in our graph, so we simply ran it with `sess.run(init_op)`. The `train_step` operation corresponding to the gradient based updates of the `weights` and `biases` parameter variables does however depend on the `inputs` and `targets` placeholders and so we need to specify values to *feed* into these placeholders; as we wish the gradient updates to be calculated using the gradients with respect to a batch of inputs and targets, the values that we feed in are the input and target batches. This is specified using the keyword `feed_dict` argument to the session `run` method. As the name suggests this should be a Python dictionary (`dict`) with keys corresponding to references to the tensors in the graph to feed values in to and values the corresponding array values to feed in (typically NumPy `ndarray` instances) - here we have `feed_dict = {inputs: input_batch, targets: target_batch}`.
Another difference in our use of the session `run` method below is that we call it with a list of two operations - `[train_step, error]` rather than just a single operation. This allows the output (and variable updates) of multiple operations in a graph to be evaluated together - here we both run the `train_step` operation to update the parameter values and evaluate the `error` operation to return the mean error on the batch. Although we could split this into two separate session `run` calls, as the operations calculating the batch error will need to be evaluated when running the `train_step` operation (as this is the value gradients are calculated with respect to) this would involve redoing some of the computation and so be less efficient than combining them in a single `run` call.
As we are running two different operations, the `run` method returns two values here. The `train_step` operation has no outputs and so the first return value is `None` - in the code below we assign this to `_`, this being a common convention in Python code for assigning return values we are not interested in using. The second return value is the average error across the batch which we assign to `batch_error` and use to keep a running average of the dataset error across the epochs.
```
num_epoch = 20
for e in range(num_epoch):
running_error = 0.
for input_batch, target_batch in train_data:
_, batch_error = sess.run(
[train_step, error],
feed_dict={inputs: input_batch, targets: target_batch})
running_error += batch_error
running_error /= train_data.num_batches
print('End of epoch {0}: running error average = {1:.2f}'.format(e + 1, running_error))
```
To check your understanding of using sessions objects to evaluate parts of a graph and feeding values in to a graph, complete the definition of the function in the cell below. This should iterate across all batches in a provided data provider and calculate the error and classification accuracy for each, accumulating the average error and accuracy values across the whole dataset and returning these as a tuple.
```
def get_error_and_accuracy(data):
"""Calculate average error and classification accuracy across a dataset.
Args:
data: Data provider which iterates over input-target batches in dataset.
Returns:
Tuple with first element scalar value corresponding to average error
across all batches in dataset and second value corresponding to
average classification accuracy across all batches in dataset.
"""
err = 0
acc = 0
for input_batch, target_batch in data:
err += sess.run(error, feed_dict={inputs: input_batch, targets: target_batch})
acc += sess.run(accuracy, feed_dict={inputs: input_batch, targets: target_batch})
err /= data.num_batches
acc /= data.num_batches
return err, acc
```
Test your implementation by running the cell below - this should print the error and accuracy of the trained model on the validation and training datasets if implemented correctly.
```
print('Train data: Error={0:.2f} Accuracy={1:.2f}'
.format(*get_error_and_accuracy(train_data)))
print('Valid data: Error={0:.2f} Accuracy={1:.2f}'
.format(*get_error_and_accuracy(valid_data)))
```
## Exercise 2: Explicit graphs, name scopes, summaries and TensorBoard
In the exercise above we introduced most of the basic concepts needed for constructing graphs in TensorFlow and running graph operations. In an attempt to avoid introducing too many new terms and syntax at once however we skipped over some of the non-essential elements of creating and running models in TensorFlow, in particular some of the provided functionality for organising and structuring the computation graphs created and for monitoring the progress of training runs.
Now that you are hopefully more familiar with the basics of TensorFlow we will introduce some of these features as they are likely to provide useful when you are building and training more complex models in the rest of this semester.
Although we started off by motivating TensorFlow as a framework which builds computation graphs, in the code above we never explicitly referenced a graph object. This is because TensorFlow always registers a default graph at start up and all operations are added to this graph by default. The default graph can be accessed using `tf.get_default_graph()`. For example running the code in the cell below will assign a reference to the default graph to `default_graph` and print the total number of operations in the current graph definition.
```
default_graph = tf.get_default_graph()
print('Number of operations in graph: {0}'
.format(len(default_graph.get_operations())))
```
We can also explicitly create a new graph object using `tf.Graph()`. This may be useful if we wish to build up several independent computation graphs.
```
graph = tf.Graph()
```
To add operations to a constructed graph object, we use the `graph.as_default()` [context manager](http://book.pythontips.com/en/latest/context_managers.html). Context managers are used with the `with` statement in Python - `with context_manager:` opens a block in Python in which a special `__enter__` method of the `context_manager` object is called before the code in the block is run and a further special `__exit__` method is run after the block code has finished execution. This can be used to for example manage allocation of resources (e.g. file handles) but also to locally change some 'context' in the code - in the example here, `graph.as_default()` is a context manager which changes the default graph within the following block to be `graph` before returning to the previous default graph once the block code is finished running. Context managers are used extensively in TensorFlow so it is worth being familiar with how they work.
Another common context manager usage in TensorFlow is to define *name scopes*. As we encountered earlier, individual operations in a TensorFlow graph can be assigned names. As we will see later this is useful for making graphs interpretable when we use the tools provided in TensorFlow for visualising them. As computation graphs can become very big (even the quite simple graph we created in the first exercise has around 100 operations in it) even with interpretable names attached to the graph operations it can still be difficult to understand and debug what is happening in a graph. Therefore rather than simply allowing a single-level naming scheme to be applied to the individual operations in the graph, TensorFlow supports hierachical naming of sub-graphs. This allows sets of related operations to be grouped together under a common name, and thus allows both higher and lower level structure in a graph to be easily identified.
This hierarchical naming is performed by using the name scope context manager `tf.name_scope('name')`. Starting a block `with tf.name_scope('name'):`, will cause all the of the operations added to a graph within that block to be grouped under the name specified in the `tf.name_scope` call. Name scope blocks can be nested to allow finer-grained sub-groupings of operations. Name scopes can be used to group operations at various levels e.g. operations corresponding to inference/prediction versus training, grouping operations which correspond to the classical definition of a neural network layer etc.
The code in the cell below uses both a `graph.as_default()` context manager and name scopes to create a second copy of the computation graph corresponding to softmax regression that we constructed in the previous exercise.
```
with graph.as_default():
with tf.name_scope('data'):
inputs = tf.placeholder(tf.float32, [None, 784], name='inputs')
targets = tf.placeholder(tf.float32, [None, 47], name='targets')
with tf.name_scope('parameters'):
weights = tf.Variable(tf.zeros([784, 47]), name='weights')
biases = tf.Variable(tf.zeros([47]), name='biases')
with tf.name_scope('model'):
outputs = tf.matmul(inputs, weights) + biases
with tf.name_scope('error'):
error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=outputs, labels=targets))
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(error)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(
tf.equal(tf.argmax(outputs, 1), tf.argmax(targets, 1)), tf.float32))
```
As hinted earlier TensorFlow comes with tools for visualising computation graphs. In particular [TensorBoard](https://www.tensorflow.org/how_tos/summaries_and_tensorboard/) is an interactive web application for amongst other things visualising TensorFlow computation graphs (we will explore some of its other functionality in the latter part of the exercise). Typically TensorBoard in launched from a terminal and a browser used to connect to the resulting locally running TensorBoard server instance. However for the purposes of graph visualisation it is also possible to embed a remotely-served TensorBoard graph visualisation interface in a Jupyter notebook using the helper function below (a slight variant of the recipe in [this notebook](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb)).
<span style='color: red; font-weight: bold;'>Note: The code below seems to not work for some people when accessing the notebook in Firefox. You can either try loading the notebook in an alternative browser, or just skip this section for now and explore the graph visualisation tool when launching TensorBoard below.</span>
```
from IPython.display import display, HTML
import datetime
def show_graph(graph_def, frame_size=(900, 600)):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:{height}px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(height=frame_size[1], data=repr(str(graph_def)), id='graph'+timestamp)
iframe = """
<iframe seamless style="width:{width}px;height:{height}px;border:0" srcdoc="{src}"></iframe>
""".format(width=frame_size[0], height=frame_size[1] + 20, src=code.replace('"', '"'))
display(HTML(iframe))
```
Run the cell below to display a visualisation of the graph we just defined. Notice that by default all operations within a particular defined name scope are grouped under a single node; this allows the top-level structure of the graph and how data flows between the various components to be easily visualised. We can also expand these nodes however to interrogate the operations within them - simply double-click on one of the nodes to do this (double-clicking on the expanded node will cause it to collapse again). If you expand the `model` node you should see a graph closely mirroring the affine transform example given as a motivation above.
```
show_graph(graph)
```
To highlight how using name scopes can be very helpful in making these graph visualisations more interpretable, running the cell below will create a corresponding visualisation for the graph created in the first exercise, which contains the same operations but without the name scope groupings.
```
show_graph(tf.get_default_graph())
```
A common problem when doing gradient based training of complex models is how to monitor progress during training. In the `mlp` framework we used last semester we included some basic logging functionality for recording training statistics such as training and validation set error and classificaton accuracy at the end of each epoch. By printing the log output this allowed basic monitoring of how training was proceeding. However due to the noisiness of the the training procedures the raw values printed were often difficult to interpret. After a training run we often plotted training curves to allow better visualisation of how the run went but this could only be done after a run was completed and required a lot of boilerplate code to be written (or copied and pasted...).
TensorFlow [*summary* operations](https://www.tensorflow.org/api_docs/python/summary/) are designed to help deal with this issue. Summary operations can be added to the graph to allow summary statistics to be computed and serialized to event files. These event files can then be loaded in TensorBoard *during training* to allow continuous graphing of for example the training and validation set error during training. As well as summary operations for monitoring [scalar](https://www.tensorflow.org/api_docs/python/summary/generation_of_summaries_#scalar) values such as errors or accuracies, TensorFlow also includes summary operations for monitoring [histograms](https://www.tensorflow.org/api_docs/python/summary/generation_of_summaries_#histogram) of tensor quanties (e.g. the distribution of a set of weight parameters), displaying [images](https://www.tensorflow.org/api_docs/python/summary/generation_of_summaries_#image) (for example for checking if random augmentations being applied to image inputs are producing reasonable outputs) and even playing back [audio](https://www.tensorflow.org/api_docs/python/summary/generation_of_summaries_#audio).
The cell below adds two simple scalar summary operations to our new graph for monitoring the error and classification accuracy. While we can keep references to all of the summary ops we add to a graph and make sure to run them all individually in the session during training, as with variable initialisation, TensorFlow provides a convenience method to avoid having to write a lot of boilerplate code like this. The `tf.summary.merge_all()` function returns an merged op corresponding to all of the summary ops that have been added to the current default graph. We can then just run this one merged op in our session to generate all the summaries we have added.
```
with graph.as_default():
tf.summary.scalar('error', error)
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()
```
In addition to the (merged) summary operation, we also need to define a *summary writer* object(s) to specify where the summaries should be written to on disk. The `tf.summary.FileWriter` class constructor takes a `logdir` as its first argument which should specify the path to a directory where event files should be written to. In the code below the log directory is specified as a local directory `tf-log` plus a timestamp based sub-directory within this to keep event files corresponding to different runs separated. The `FileWriter` constructor also accepts an optional `graph` argument which we here set to the graph we just populated with summaries. We construct separate writer objects for summaries on the training and validation datasets.
```
import os
import datetime
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
train_writer = tf.summary.FileWriter(os.path.join('tf-log', timestamp, 'train'), graph=graph)
valid_writer = tf.summary.FileWriter(os.path.join('tf-log', timestamp, 'valid'), graph=graph)
```
The final step in using summaries is to run the merged summary op at the appropriate points in training and to add the outputs of the run summary operations to the writers. Here we evaluate the summary op on each training dataset batch and after every 100th batch evaluate the summary op on the whole validation dataset, writing the outputs of each to the relevant writers.
If you run the cell below, you should be able to visualise the resulting training run summaries by launching TensorBoard within a shell with
```bash
tensorboard --logdir=[path/to/tf-log]
```
where `[path/to/tf-log]` is replaced with the path to the `tf-log` directory specified abovem and then opening the URL `localhost:6006` in a browser.
```
with graph.as_default():
init = tf.global_variables_initializer()
sess = tf.InteractiveSession(graph=graph)
num_epoch = 5
valid_inputs = valid_data.inputs
valid_targets = valid_data.to_one_of_k(valid_data.targets)
sess.run(init)
for e in range(num_epoch):
for b, (input_batch, target_batch) in enumerate(train_data):
_, summary = sess.run(
[train_step, summary_op],
feed_dict={inputs: input_batch, targets: target_batch})
train_writer.add_summary(summary, e * train_data.num_batches + b)
if b % 100 == 0:
valid_summary = sess.run(
summary_op, feed_dict={inputs: valid_inputs, targets: valid_targets})
valid_writer.add_summary(valid_summary, e * train_data.num_batches + b)
```
That completes our basic introduction to TensorFlow. If you want more to explore more of TensorFlow before beginning your project for this semester, you may wish to go through some of the [official tutorials](https://www.tensorflow.org/tutorials/) or some of the many sites with unofficial tutorials e.g. the series of notebooks [here](https://github.com/aymericdamien/TensorFlow-Examples). If you have time you may also wish to have a go at the optional exercise below.
## Optional exercise: multiple layer EMNIST classifier using `contrib` modules
As well as the core officially supported codebase, TensorFlow is distributed with a series of contributed modules under [`tensorflow.contrib`](https://www.tensorflow.org/api_docs/python/tf/contrib). These tend to provide higher level interfaces for constructing and running common forms of computational graphs which can allow models to be constructed with much more concise code. The interfaces of the `contrib` modules tend to be less stable than the core TensorFlow Python interface and they are also more restricted in the sorts of models that can be created. Therefore it is worthwhile to also be familiar with constructing models with the operations available in the core TensorFlow codebase; you can also often mix and match use of 'native' TensorFlow and functions from `contrib` modules.
As an optional extension exercise, construct a deep EMNIST classifier model, either using TensorFlow operations directly as above or using one (or more) of the higher level interfaces defined in `contrib` modules such as [`tensorflow.contrib.learn`](https://www.tensorflow.org/tutorials/tflearn/), [`tensorflow.contrib.layers`](https://www.tensorflow.org/api_docs/python/tf/layers) or [`tensorflow.contrib.slim`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim). You should choose an appropriate model architecture (number and width of layers) and choice of activation function based on your experience fitting models from last semester.
As well as exploring the use of the interfaces in `contrib` modules you may wish to explore the more advanced optimizers available in [`tensorflow.train`](https://www.tensorflow.org/api_docs/python/tf/train) - such as [`tensorflow.train.AdamOptimizer`](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer) and [`tensorflow.train.RMSPropOptimizer`](https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer) corresponding to the adaptive learning rules implemented in the second coursework last semester.
```
```
| github_jupyter |
# Rhyming score experiments
This notebook is for rhyming score experiments. HAMR 2016.
```
from __future__ import print_function, unicode_literals
import string
import nltk
import numpy
# For plotting outputs, we'll need
import matplotlib.pyplot as plt
# To display the plotted images inside the notebook:
%matplotlib inline
# Plotting the figures at a reasonable size
import matplotlib
matplotlib.rcParams['figure.figsize'] = (30.0, 20.0)
# Dirty, dirty trick
import rhyme
reload(rhyme)
from rhyme import *
cmudict = collections.defaultdict(list)
for word, syl in nltk.corpus.cmudict.entries():
cmudict[word].append(syl)
bad_text = 'I see all I know all For i am the the oracle Give me your hand' \
' I see caked blood on concrete Dead bodies on grass' \
' Mothers crying seeing babies lowered caskets'
# bad_text = 'this thing does not rhyme even a little it is just normal text no rap'
good_text = 'Yeah, yeah It\'s the return of the Wild Style fashionist' \
' Smashin hits, make it hard to adapt to this Put pizazz and jazz in this, and cash in this' \
' Mastered this, flash this and make em clap to this DJ\'s throw on cuts and obey the crowd' \
' Just pump the volume up, and play it loud'
# good_text = 'take a step back hey really gonna hack a full stack in a day while on crack'
bad_words = tokenize(bad_text)
good_words = tokenize(good_text)
def pairwise_grid_stats(score_grid, words):
minimum = score_grid[score_grid != -1.0].min()
maximum = score_grid.max()
print('Range: {0} -- {1}'.format(minimum, maximum))
def pairwise_rhyme_visualization(score_grid, words, show=True):
fig, ax = plt.subplots()
heatmap = ax.pcolor(score_grid, cmap=plt.cm.Blues)
ax.set_xlim((0, len(words)))
ax.set_ylim((0, len(words)))
# put the major ticks at the middle of each cell
ax.set_xticks(numpy.arange(score_grid.shape[0])+0.5, minor=False)
ax.set_yticks(numpy.arange(score_grid.shape[1])+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(words, minor=False)
ax.set_yticklabels(words, minor=False)
if show:
plt.show()
def score_words(words, prondict=cmudict):
score_grid = rhyme_score_grid(words, prondict=prondict)
aggregate = aggregate_score(score_grid)
return aggregate
def score_and_visualize_words(words, prondict=cmudict, **kwargs):
score_grid = rhyme_score_grid(words, prondict=prondict, **kwargs)
score = aggregate_score(score_grid)
print('Score: {0:.4f}'.format(score))
stats = pairwise_grid_stats(score_grid, words)
pairwise_rhyme_visualization(score_grid, words)
#score_and_visualize_words(bad_words)
#score_and_visualize_words(good_words)
```
### Assessing the metric: baseline
To get some understanding of how this metric works, we need to find a baseline. Let's use some random sequences from the Brown corpus.
Obvious limit of the metric: at some point, we will randomly get a rhyming word anyway. Tackled by a sliding window of max. 16 words.
```
bt_reader = nltk.corpus.brown.words()
brown_tokens = [t for i, t in enumerate(bt_reader) if i < 10000]
bad_text_max_length = 50
n_bad_texts = 100
bad_texts = []
for i in xrange(n_bad_texts):
# Choose a random start
start = numpy.random.randint(low=0, high=len(brown_tokens) - bad_text_max_length - 1)
text = brown_tokens[start:start + bad_text_max_length]
bad_texts.append(' '.join(text))
bad_text_words = [tokenize(t) for t in bad_texts]
bad_text_scores = numpy.array([score_words(w) for w in bad_text_words[:100]])
#n, bins, patches = plt.hist(bad_text_scores, 10, normed=True)
#plt.show()
import codecs
def parse_artist(filename):
with codecs.open(filename, 'r', 'utf-8') as hdl:
lines = [l.strip() for l in hdl]
texts = []
current_text = []
for l in lines:
if len(l) == 0:
texts.append(' '.join(current_text))
current_text = []
else:
current_text.append(l)
if len(current_text) > 0:
texts.append(' '.join(current_text))
return texts
rakim_texts = parse_artist('../examples_good_rhymes/rakim')
eminem_texts = parse_artist('../examples_good_rhymes/eminem')
aesop_texts = parse_artist('../examples_good_rhymes/aesop_rock')
lilwayne_texts = parse_artist('../examples_good_rhymes/lil_wayne')
good_texts = list(itertools.chain(rakim_texts, eminem_texts, aesop_texts, lilwayne_texts))
# print(len(good_texts))
good_text_words = [tokenize(t) for t in good_texts]
#good_text_scores = [score_words(w) for w in good_words]
score_and_visualize_words(good_text_words[0][:50], nonnegative=True)
score_and_visualize_words(bad_text_words[2][:50], nonnegative=True)
# n, bins, patches = plt.hist(bad_text_scores, 10, normed=True)
# plt.show()
# _, _, _ = plt.hist(good_text_scores, bins=bins, color='r')
# plt.show()
reload(rhyme)
from rhyme import *
good_score_grid = rhyme_score_grid(good_text_words[0], prondict=cmudict)
bad_score_grid = rhyme_score_grid(bad_text_words[0], prondict=cmudict)
gw = good_text_words[0]
bw = bad_text_words[0]
print(len(bw))
gsg = good_score_grid[:45,:45]
gw_part = gw[:45]
gsbg = binarize_grid(gsg)
pairwise_rhyme_visualization(gsbg, gw_part)
bsbg = binarize_grid(bad_score_grid)
pairwise_rhyme_visualization(bsbg, bw)
good_cliques = get_rhyme_groups(gsg, gw_part)
bad_cliques = get_rhyme_groups(good_score_grid, bw)
import pprint
good_graph = get_rhyme_graph(gsg, gw_part)
bad_graph = get_rhyme_graph(bad_score_grid, bw)
g_components = list(networkx.algorithms.connected.connected_components(good_graph))
g_nontrivial = [g for g in g_components if len(g) >= 2]
print(' '.join(gw_part))
pprint.pprint(g_nontrivial)
def nontrivial_components(G):
b_components = list(networkx.algorithms.connected.connected_components(G))
b_nontrivial = [g for g in b_components if len(g) >= 2]
pprint.pprint(b_nontrivial)
def triangle_analysis(G):
t = networkx.algorithms.cluster.triangles(G)
print('Triangles: {0}'.format({k: v for k, v in t.iteritems() if v >= 1}))
print(networkx.algorithms.cluster.average_clustering(G))
triangle_analysis(good_graph)
triangle_analysis(bad_graph)
def clique_analysis(cliques):
multi_cliques = [c for c in cliques if len(c) > 3]
multi_clique_ratio = float(len(multi_cliques)) / len(cliques)
cliques_by_level = collections.defaultdict(list)
for c in cliques:
cliques_by_level[len(c)].append(set([w.split('_')[0] for w in c]))
print(multi_clique_ratio)
pprint.pprint({k: len(v) for k, v in cliques_by_level.iteritems()})
if 3 in cliques_by_level:
pprint.pprint(cliques_by_level[3])
if 4 in cliques_by_level:
pprint.pprint(cliques_by_level[4])
else:
print('No cliques above 2 members')
return cliques_by_level
gcbl = clique_analysis(good_cliques)
bcbl = clique_analysis(bad_cliques)
print('-----------------')
g_overlap = find_overlapping_cliques(gcbl[3])
pprint.pprint(g_overlap)
print('---------------')
b_overlap = find_overlapping_cliques(bcbl[3])
pprint.pprint(b_overlap)
reload(rhyme)
from rhyme import *
other_gw = good_text_words[-1][:45]
other_good_score_grid = rhyme_score_grid(other_gw, prondict=cmudict)
ogsbg = binarize_grid(other_good_score_grid)
pairwise_rhyme_visualization(ogsbg, other_gw)
ograph = get_rhyme_graph(other_good_score_grid, other_gw)
triangle_analysis(ograph)
nontrivial_components(ograph)
k_cliques = list(networkx.k_clique_communities(ograph, 2))
print(k_cliques)
```
### Ideas on improving
* Disambiguate the indefinite article.
* Remove stopwords (count them as -1). Implemented as "weak" stopwords (if the other word is not a weak stopword, count normally) vs. "strong" (if one of the words is a strong stopword, the pair gets -1)
* Only count words within a relevant window (improves on randomness)
* Only retain nouns, verbs, adjectives and adverbs. (NOT IMPLEMENTED)
* Word count patterns:
| github_jupyter |
# Object Detection with YoloV4
This notebook is intended to be an example of how to use MIGraphX to perform object detection. The model used below is a pre-trained yolov4 from the ONNX model zoo.
### Download dependencies
```
import os.path
if not os.path.exists("./utilities/coco.names"):
!wget https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/yolov4/dependencies/coco.names -P ./utilities/
if not os.path.exists("./utilities/yolov4_anchors.txt"):
!wget https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/yolov4/dependencies/yolov4_anchors.txt -P ./utilities/
if not os.path.exists("./utilities/input.jpg"):
# The image used is from the COCO dataset (https://cocodataset.org/#explore)
# Other images can be tested by replacing the link below
image_link = "https://farm3.staticflickr.com/2009/2306189268_88cc86b30f_z.jpg"
!wget -O ./utilities/input.jpg $image_link
if not os.path.exists("./utilities/yolov4.onnx"):
!wget https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/yolov4/model/yolov4.onnx -P ./utilities/
```
### Serialize model using MIGraphX Driver
Please refer to the [MIGraphX Driver example](../../migraphx/migraphx_driver) if you would like more information about this tool.
```
if not os.path.exists("yolov4_fp16.msgpack"):
!/opt/rocm/bin/migraphx-driver compile ./utilities/yolov4.onnx --gpu --enable-offload-copy --fp16ref --binary -o yolov4_fp16.msgpack
if not os.path.exists("yolov4.msgpack"):
!/opt/rocm/bin/migraphx-driver compile ./utilities/yolov4.onnx --gpu --enable-offload-copy --binary -o yolov4.msgpack
```
### Import libraries
Please refer to [this section](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#using-migraphx-python-module) of the main README if the migraphx module is not found.
```
import migraphx
import cv2
import time
import numpy as np
import image_processing as ip
from PIL import Image
```
### Read and pre-process image data
```
input_size = 416
original_image = cv2.imread("./utilities/input.jpg")
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = ip.image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
```
### Load and run model
```
# Load serialized model (either single- or half-precision)
model = migraphx.load("yolov4.msgpack", format="msgpack")
#model = migraphx.load("yolov4_fp16.msgpack", format="msgpack")
# Get the name of the input parameter and convert image data to an MIGraphX argument
input_name = next(iter(model.get_parameter_shapes()))
input_argument = migraphx.argument(image_data)
# Evaluate the model and convert the outputs for post-processing
outputs = model.run({input_name: input_argument})
detections = [np.ndarray(shape=out.get_shape().lens(), buffer=np.array(out.tolist()), dtype=float) for out in outputs]
```
### Post-process the model outputs and display image with detection bounding boxes
```
ANCHORS = "./utilities/yolov4_anchors.txt"
STRIDES = [8, 16, 32]
XYSCALE = [1.2, 1.1, 1.05]
ANCHORS = ip.get_anchors(ANCHORS)
STRIDES = np.array(STRIDES)
pred_bbox = ip.postprocess_bbbox(detections, ANCHORS, STRIDES, XYSCALE)
bboxes = ip.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.25)
bboxes = ip.nms(bboxes, 0.213, method='nms')
image = ip.draw_bbox(original_image, bboxes)
image = Image.fromarray(image)
image.show()
```
| github_jupyter |
```
# !pip install category_encoders
# https://lambdaschool.github.io/ds/unit2/portfolio-project/ds14
# https://finance.yahoo.com/quote/GOLD/history?p=GOLD (Gold)
# GOLD (47B cap) has 15 more years of historical data than GLD, GDX, and GC=F
import pandas_datareader as web
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('fivethirtyeight')
# yahoo_gold_url = 'https://query1.finance.yahoo.com/v7/finance/download/GOLD?period1=476323200&period2=1587600000&interval=1d&events=history'
# For whatever reason, use datareader to import data below for better plot with dates.
# Another advantage of using pandas_datareader is specifying current date as above URL is fixed dates.
gold = web.DataReader('GOLD', data_source='yahoo', start='1985-02-04', end='2020-04-28')
# However, beware of issues with dates in models later.
# gold = pd.read_csv(yahoo_gold_url)
gold
# - [X] Continue to iterate on your project: data cleaning, exploratory visualization, feature engineering, modeling.
# - [X] Make at least 1 partial dependence plot to explain your model.
# - [X] Make at least 1 Shapley force plot to explain an individual prediction.
# - [X] **Share at least 1 visualization (of any type) on Slack!**
# Visualize the high price
plt.figure(figsize=(16,8))
plt.title('GOLD Daily High Price Data')
plt.plot(gold['High'])
plt.xlabel('Year', fontsize=18)
plt.ylabel('High Price USD ($)', fontsize=18)
plt.show()
# No NaNs
# gold.isna().sum()
# Calculations for train, val, test, sizes to do manual split and avoid data leakage
trainsize = .6*8876
valsize = .2*8876
testsize = .2*8876
(trainsize, valsize, testsize)
# Calculation for val rows
valend = 5325.599999999999 + 1775.2
valend
train = gold.iloc[0:5325]
val = gold.iloc[5326:7100]
test = gold.iloc[7101:8876]
(train.shape, val.shape, test.shape)
# train = gold
# Split train into train & test
# This method causes data leakage
#from sklearn.model_selection import train_test_split
#train, test = train_test_split(train, train_size=0.80, test_size=0.20, random_state=42)
#(train.shape, test.shape)
# Split train into train and val
# This method causes data leakage
#train, val = train_test_split(train, train_size=0.80, test_size=0.20, random_state=42)
#(train.shape, val.shape)
# Add New Columns with Average Prices
# This feature has the most permutation importance
train['HL Avg'] = (train['High'] + train['Low'])/2
val['HL Avg'] = (val['High'] + val['Low'])/2
test['HL Avg'] = (test['High'] + test['Low'])/2
# Testing this new feature (made score worse, but has 3rd highest permuation imporance)
train['OC Avg'] = (train['Open'] + train['Close'])/2
val['OC Avg'] = (val['Open'] + val['Close'])/2
test['OC Avg'] = (test['Open'] + test['Close'])/2
# Test another feature
train['HL Range'] = (train['High'] - train['Low'])
val['HL Range'] = (val['High'] - val['Low'])
test['HL Range'] = (test['High'] - test['Low'])
# Another one (Consider using an if statement, whichever one is higher, then subtract the smaller one, for a true range)
train['OC Range'] = (train['Open'] - train['Close'])
val['OC Range'] = (val['Open'] - val['Close'])
test['OC Range'] = (test['Open'] - test['Close'])
# Convert Date column to datetime format
#train['Date'] = pd.to_datetime(train['Date'], infer_datetime_format=True)
#val['Date'] = pd.to_datetime(val['Date'], infer_datetime_format=True)
#test['Date'] = pd.to_datetime(test['Date'], infer_datetime_format=True)
# Commented out because it caused a slightly worse MAE score.
# Extract components from Date
#train['year'] = train['Date'].dt.year
#train['month'] = train['Date'].dt.month
#train['day'] = train['Date'].dt.day
#val['year'] = val['Date'].dt.year
#val['month'] = val['Date'].dt.month
#val['day'] = val['Date'].dt.day
#test['year'] = test['Date'].dt.year
#test['month'] = test['Date'].dt.month
#test['day'] = test['Date'].dt.day
# Commented out because it caused a slightly worse MAE score.
# Drop the original Date column
# train = train.drop(columns=['Date'])
# val = val.drop(columns=['Date'])
# test = test.drop(columns=['Date'])
# Assign 'High' price to target variable
target = 'High'
# How is the target distributed?
y = train['High']
# Looks pretty normal
import seaborn as sns
sns.distplot(y);
# No outliers
y.describe()
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Linear Regression Val
from sklearn.linear_model import LinearRegression
import category_encoders as ce
import plotly.express as px
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_absolute_error
pipeline = make_pipeline(
ce.OrdinalEncoder(),
LinearRegression()
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
mae = mean_absolute_error(y_val, y_pred)
print(f'Validation MAE: ${mae:}')
features = X_train.columns.tolist()
coefficients = pipeline.named_steps['linearregression'].coef_
pd.Series(coefficients, features)
# Linear Regression Test
X_test = test.drop(columns=target)
y_test = test[target]
y_pred = pipeline.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f'Test MAE: ${mae:}')
# Plot the data
train = gold.iloc[0:5326]
val = gold.iloc[5326:7101]
test = gold.iloc[7101:8876]
# Visualize the data
plt.figure(figsize=(16,8))
plt.title('GOLD Predictive Model Split')
plt.xlabel('Year', fontsize=18)
plt.ylabel('High Price USD ($)', fontsize=18)
plt.plot(train['High'])
plt.plot(val['High'])
plt.plot(test['High'])
### plt.plot(gold) trying to compare
plt.legend(['Train', 'Val', 'Test'], loc = 'lower right')
plt.show()
```
| github_jupyter |
# Собственные векторы, собственные значения. Разложение Шура и QR-алгоритм
## На прошлой лекции
- Матричное умножение
- Иерархия памяти
- BLAS
- Алгоритм Штрассена
- Вычисление QR разложения
## План на сегодня
- Собственные векторы и их приложения (PageRank)
- Круги Гершгорина
- Степенной метод вычисления собственных векторов и значений
- Теорема Шура
- Нормальные матрицы
- QR алгоритм
## Что такое собственный вектор?
**Определение.** Вектор $x \ne 0$ называется **собственным** для квадратной матрицы $A$, если найдётся такое число $\lambda$ что
$$
Ax = \lambda x.
$$
Число $\lambda$ называется **собственным значением**.
Так как матрица $A - \lambda I$ должна иметь нетривиальное ядро (что такое ядро?),
собственные значения являются корнями характеристического полинома
$$ \det (A - \lambda I) = 0.$$
## Разложение по собственным векторам (eigendecomposition)
Если матрица $A$ размера $n\times n$ имеет $n$ собственных векторов $s_i$, $i=1,\dots,n$:
$$ As_i = \lambda_i s_i, $$
то это может быть записано в виде
$$ A S = S \Lambda, \quad\text{где}\quad S=(s_1,\dots,s_n), \quad \Lambda = \text{diag}(\lambda_1, \dots, \lambda_n),$$
или эквивалентно
$$ A = S\Lambda S^{-1}. $$
Такая форма представления матрицы $A$ называется разложением по собственным векторам (**eigendecomposition**). Матрицы, которые могут быть представлены в таком виде, называются **диагонализуемыми**.
#### Существование
Какие матрицы являются диагонализуемыми?
Простым примером являются матрицы, у которых все собственные значения различны.
Более общий факт звучит так:
матрица диагонализуема тогда и только тогда, когда **алгебраическая кратность** каждого собственного значения (кратность корня характеристического многочлена) совпадает с его **геометрической кратностью** (размерностью собственного подпространства).
Для наших целей наиболее важный класс диагонализуемых матриц – это класс **нормальных матриц**:
$$AA^* = A^* A.$$
Почему такие матрицы диагонализуемы станет ясно чуть позже.
#### Пример
* Легко проверить, что матрица $$A = \begin{pmatrix} 1 & 1 \\ 0 & 1 \end{pmatrix}$$ имеет одно собственное значение $1$ кратности $2$ (поскольку характеристический многочлен имеет вид $p(\lambda)=(1-\lambda)^2$), но только один собственный вектор $\begin{pmatrix} c \\ 0 \end{pmatrix}$ и следовательно такая матрица недиагонализуема.
## Почему важны собственные векторы и собственные значения?
- Собственные векторы имеют как важное теоретическое значения, так и многочисленные приложения.
- Весь микромир подчиняется законам следующим из **уравнения Шрёдингера**, которое является задачей на поиск собственного вектора и собственного значения:
$$
H \psi = E \psi,
$$
где $E$ – нижний уровень энергии, $\psi$ – волновая функция и $H$ – гамильтониан.
- Более половины вычислительных мощностей в мире тратится на решение задач такого типа для задач дизайна материалов и разработки новых лекарств
## Собственные значения – это частоты выбраций
Обычно вычисление собственных значений и собственных векторов необходимо для изучения
- вибраций в механических структурах
- снижения сложности моделей сложных систем
```
from IPython.display import YouTubeVideo
YouTubeVideo("xKGA3RNzvKg")
```
## Google PageRank
Одна из самых известных задач, сводящихся к вычислению собственного вектора, – это задача вычисления **Google PageRank**.
- Задача состои в ранжировании веб-страницы: какие из них являются важными, а какие нет
- В интернете страницы ссылаются друг на друга
- PageRank определяется рекурсивно. Обозначим за $p_i$ **важность** $i$-ой страницы. Тогда определим эту важность как усреднённую важность всех страниц, которые ссылаются на данную страницу. Это определение приводит к следующей линейной системе
$$ p_i = \sum_{j \in N(i)} \frac{p_j}{L(j)}, $$
где $L(j)$ – число исходящих ссылок с $j$-ой страницы, $N(i)$ – число соседей $i$-ой страницы. Это может быть записано следующим образом
$$ p = G p, \quad G_{ij} = \frac{1}{L(j)} $$
или как задача на собственные значения
$$
Gp = 1 p,
$$
то есть мы уже знаем, что у матрицы $G$ есть собственное значение равное $1$.
Заметим, что $G$ – **левостохастичная** матрица, то есть сумма в каждом столбце равна $1$.
Проверьте, что любая левостохастичная матрица имеет собственное значение равное $1$.
## Демо
Мы можем вычислить PageRank с помощью библиотек на Python.
Будем использовать бибилотеку ```networkx``` для работы с графами, она может быть установлена с помощью следующей команды
```conda install networkx```
Возьмём простой пример графа [Zachary karate club](https://en.wikipedia.org/wiki/Zachary%27s_karate_club).
Этот граф был собран вручную в 1977, и является классическим графом для анализа соцсетей.
```
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
kn = nx.read_gml('karate.gml')
#nx.write_gml(kn, 'karate2.gml')
nx.draw_networkx(kn) #Draw the graph
```
Сейчас мы можем вычислить PageRank, используя функцию, встроенную в NetworkX. Мы также изобразим вершины графа пропорционально тому, наскольку они важны в смысле величины PageRank'a.
```
pr = nx.algorithms.link_analysis.pagerank(kn)
pr_vector = list(pr.values())
pr_vector = np.array(pr_vector) * 3000
nx.draw_networkx(kn, node_size=pr_vector, labels=None)
```
## Вычисление собственных значений
- Как вычислить собственные значения и собственные векторы?
Все задачи на собственные значения делятся на два класса:
- полная задача на собственные значения (нужны все собственные значения и собственные векторы)
- частичная задача на собственные значения (требуются минимальное/максимальное собственное значение, или собственные значения из заданного промежутка)
## Вычисление собственых значений с помощью характеристического многочлена
Задача на собственные значения имеет вид
$$ Ax = \lambda x, $$
или
$$ (A - \lambda I) x = 0,$$
поэтому матрица $A - \lambda I$ имеет нетривиальное ядро и должна быть вырожденной. Это значит, что её **детерминант** равен нулю:
$$ p(\lambda) = \det(A - \lambda I) = 0. $$
- Уравнение называется **характеристическим** и является полиномом степени $n$.
- Многочлен степени $n$ имеет $n$ комплексных корней!
## Вспомним, что такое детерминант
Детерминант квадратной матрицы определён как
$$\det A = \sum_{\sigma \in S_n} \mathrm{sgn}({\sigma})\prod^n_{i=1} a_{i, \sigma_i},$$
где
- $S_n$ множество всех **перестановок** чисел $1, \ldots, n$
- $\mathrm{sgn}$ обозначает **знак** перестановки ( $(-1)^p$, где $p$ – число требуемых транспозиций).
## Свойства детерминанта
Детерминант обладает многими полезными свойствами:
1. $\det(AB) = \det(A) \det(B)$
2. Разложение по минорам: мы можем вычислить детерминант с помощью процедуры разложения по выбранной строке или столбцу. В этом случае сложность вычисления будет экспоненциальна по $n$.
Можно ли вычислить детерминант за $\mathcal{O}(n^3)$?
## Собственные значения и характеристическое уравнение
Вернёмся к собственным значениям.
Характеристическое уравнение можно использовать для вычисления собственных значений, что приводит нас к наивному алгоритму:
$$p(\lambda) = \det(A - \lambda I)$$
1. Вычислить коэффициенты многочлена
2. Найти его корни
**Это хорошая идея?**
Посмотрим на небольшой пример
```
import numpy as np
n = 40
a = [[1.0 / (i - j + 0.5) for i in range(n)] for j in range(n)]
a = np.array(a)
ev = np.linalg.eigvals(a)
#There is a special numpy function for chacteristic polynomial
cf = np.poly(a)
ev_roots = np.roots(cf)
#print('Coefficients of the polynomial:', cf)
#print('Polynomial roots:', ev_roots)
plt.scatter(ev_roots.real, ev_roots.imag, marker='x', label='roots')
b = a + 0 * np.random.randn(n, n)
ev_b = np.linalg.eigvals(b)
plt.scatter(ev_b.real, ev_b.imag, marker='o', label='Lapack')
#plt.scatter(ev_roots.real, ev_roots.imag, marker='o', label='Brute force')
plt.legend(loc='best')
plt.xlabel('Real part')
plt.ylabel('Imaginary part')
```
**Мораль**:
- Не делайте так, если только у вас нет серьёзной причины
- Поиск корней полинома – очень **плохо обусловленная** задача (задача может быть обусловлена не так плохо, но с использованием другого базиса в пространстве многочленов). Заметим, что матрицей Грама для мономов
$$h_{ij} = \int_0^1 x^i x^j\, dx = \frac{1}{i+j+1},$$
является матрица Гильберта, у которой сингулярные числа экспоненциально убывают. Таким образом, мономы почти линейно зависимы.
## Круги Гершгорина
- Есть интересная теорема, которая часто помогает локализовать собственные значения.
- Она называется **теоремой Гершгорина**.
- Она утверждает, что все собственные значения $\lambda_i, i = 1, \ldots, n$ находятся внутри объединения **кругов Гершгорина** $C_i$, где $C_i$ – окружность на комплексной плоскости с центром в $a_{ii}$ и радиусом
$$r_i = \sum_{j \ne i} |a_{ij}|.$$
Более того, если круги не пересекаются, то они содержат по одному собственному значению внутри каждого круга.
## Доказательство
Сначала покажем, что если матрица $A$ обладает строгим диагональным преобладанием, то есть
$$ |a_{ii}| > \sum_{j \ne i} |a_{ij}|, $$
тогда такая матрица невырождена.
Разделим диагональную и недиагональную часть и получим
$$
A = D + S = D( I + D^{-1}S),
$$
где $\Vert D^{-1} S\Vert_1 < 1$.
Поэтому, в силу теоремы о ряде Неймана, матрица $I + D^{-1}S$ обратима и, следовательно, $A$ также обратима.
Теперь докажем утверждение теоремы от противного:
- если любое из собственных чисел лежит вне всех кругов, то матрица $(A - \lambda I)$ обладает свойством строгого диагонального преобладания
- поэтому она обратима
- это означает, что если $(A - \lambda I) x = 0$, то $x = 0$.
## Демо
```
import numpy as np
%matplotlib inline
n = 3
fig, ax = plt.subplots(1, 1)
a = [[5, 1, 1], [1, 0, 0.5], [2, 0, 10]]
#a = [[1.0 / (i - j + 0.5) for i in xrange(n)] for j in xrange(n)]
a = np.array(a)
#a = np.diag(np.arange(n))
a = a + 2 * np.random.randn(n, n)
#u = np.random.randn(n, n)
#a = np.linalg.inv(u).dot(a).dot(u)
xg = np.diag(a).real
yg = np.diag(a).imag
rg = np.zeros(n)
ev = np.linalg.eigvals(a)
for i in range(n):
rg[i] = np.sum(np.abs(a[i, :])) - np.abs(a[i, i])
crc = plt.Circle((xg[i], yg[i]), radius=rg[i], fill=False)
ax.add_patch(crc)
plt.scatter(ev.real, ev.imag, color='r', label="Eigenvalues")
plt.axis('equal')
plt.legend()
ax.set_title('Eigenvalues and Gershgorin circles')
fig.tight_layout()
```
**Замечание**: Существуют более сложные фигуры, под названием **[овалы Cassini](https://en.wikipedia.org/wiki/Cassini_oval)**, которые содержат спектр
$$
M_{ij} = \{z\in\mathbb{C}: |a_{ii} - z|\cdot |a_{jj} - z|\leq r_i r_j\}, \quad r_i = \sum_{l\not= i} |a_{il}|.
$$
## Степенной метод
- Часто в вычислительной практике требуется найти не весь спектр, а только некоторую его часть, например самое большое или самое маленькое собственые значения.
- Также отметим, что для Эрмитовых матриц $(A = A^*)$ собственные значения всегда действительны (докажите!).
- Степенной метод – простейший метод вычисления **максимального по модулю** собственного значения. Это также первый пример **итерационного метода** и **Крыловского метода**.
- Другие примеры будут далее в курсе
## Степенной метод: детали
Задача на собственые значения
$$Ax = \lambda x, \quad \Vert x \Vert_2 = 1 \ \text{для устойчивости}.$$
может быть записана как итерации с неподвижной точкой, которые называются **степенным методом** и дают максимальное по модулю собственное значение матрицы $A$.
Степенной метод имеет вид
$$ x_{k+1} = A x_k, \quad x_{k+1} := \frac{x_{k+1}}{\Vert x_{k+1} \Vert_2}.$$
и $x_{k+1}\to v_1$, где $Av_1 = \lambda_1 v_1$ и $\lambda_1$ максимальное по модулю собственное значение, и $v_1$ – соответствующий собственный вектор.
На $(k+1)$-ой итерации приближение для $\lambda_1$ может быть найдено следующим образом
$$ \lambda^{(k+1)} = (Ax_{k+1}, x_{k+1}), $$
Заметим, что $\lambda^{(k+1)}$ не требуется для $(k+2)$-ой итерации, но может быть полезно для оценки ошибки на каждой итерации: $\|Ax_{k+1} - \lambda^{(k+1)}x_{k+1}\|$.
Метод сходится со скоростью геометричекой прогрессии, с константой $q = \left|\frac{\lambda_{2}}{\lambda_{1}}\right| < 1$, где $\lambda_1>\lambda_2\geq\dots\geq \lambda_n$.
Это означает, что сходимость может быть сколь угодно медленной при близких значениях у $\lambda_1$ и $\lambda_2$.
## Анализ сходимости для $A=A^*$
- Рассмотрим степенной метод более подробно для случая эрмитовой матрицы
- Через несколько слайдов вы увидите, что любая эрмитова матрица диагонализуема, поэтому существует ортонормированный базис из собственных векторов $v_1,\dots,v_n$ такой что $Av_i = \lambda_i v_i$.
- Разложим $x_0$ в этом базисе с коэффициентами $c_i$:
$$ x_0 = c_1 v_1 + \dots + c_n v_n. $$
- Поскольку $v_i$ – собственные векторы, выполнены следующие равенства
$$
\begin{split}
x_1 &= \frac{Ax_0}{\|Ax_0\|} = \frac{c_1 \lambda_1 v_1 + \dots + c_n \lambda_n v_n}{\|c_1 \lambda_1 v_1 + \dots + c_n \lambda_n v_n \|} \\
&\vdots\\
x_k &= \frac{Ax_{k-1}}{\|Ax_{k-1}\|} = \frac{c_1 \lambda_1^k v_1 + \dots + c_n \lambda_n^k v_n}{\|c_1 \lambda_1^k v_1 + \dots + c_n \lambda_n^k v_n \|}
\end{split}
$$
- Получаем следующее выражение
$$
x_k = \frac{c_1}{|c_1|}\left(\frac{\lambda_1}{|\lambda_1|}\right)^k\frac{ v_1 + \frac{c_2}{c_1}\frac{\lambda_2^k}{\lambda_1^k}v_2 + \dots + \frac{c_n}{c_1}\frac{\lambda_n^k}{\lambda_1^k}v_n}{\left\|v_1 + \frac{c_2}{c_1}\frac{\lambda_2^k}{\lambda_1^k}v_2 + \dots + \frac{c_n}{c_1}\frac{\lambda_n^k}{\lambda_1^k}v_n\right\|},
$$
которое сходится к $v_1$ при $\left| \frac{c_1}{|c_1|}\left(\frac{\lambda_1}{|\lambda_1|}\right)^k\right| = 1$ и $\left(\frac{\lambda_2}{\lambda_1}\right)^k \to 0$ если $|\lambda_2|<|\lambda_1|$.
## Что необходимо помнить о степенном методе
- Степенной метод даёт оценку для максимального по модулю собственного числа или спектрального радиуса матрицы
- Одна итерация требует одного умножения матрицы на вектор. Если можно умножить вектор на матрицу за $\mathcal{O}(n)$ (напрмиер, она разреженная), тогда степенной метод можно использовать для больших $n$
- Сходимость может быть медленной
- Для грубой оценки максимального по модулю собственного значения и соответствующего вектора достаточно небольшого числа итераций
- Вектор решения лежит в **Крыловском подпространстве** $\{x_0, Ax_0,\dots,A^{k}x_0\}$ и имеет вид $\mu A^k x_0$, где $\mu$ нормировочная постоянная.
## А как найти весь спектр?
**Используем матричное разложение!**
## Для какой матрицы легко найти весь спектр?
Существует класс матриц, для которого собственные числа можно найти очень легко, – это **треугольные матрицы**
$$
A = \begin{pmatrix}
\lambda_1 & * & * \\
0 & \lambda_2 & * \\
0 & 0 & \lambda_3 \\
\end{pmatrix}.
$$
Собственные числа матрицы $A$ – $\lambda_1, \lambda_2, \lambda_3$. Почему?
Потому что детерминант имеет вид
$$ \det(A - \lambda I) = (\lambda - \lambda_1) (\lambda - \lambda_2) (\lambda - \lambda_3). $$
Таким образом, вычисление собственных значений для треугольной матрицы – простая задача. Теперь на помощь приходят унитарные матрицы. Пусть $U$ унитарная матрица, то есть $U^* U = I$. Тогда выполнены следующие равенства
$$ \det(A - \lambda I) = \det(U (U^* A U - \lambda I) U^*) = \det(UU^*) \det(U^* A U - \lambda I) = \det(U^* A U - \lambda I), $$
где мы используем свойства детерминанта от произведения матриц, $\det(AB) = \det(A) \det(B)$.
Это означает, что матрицы $U^* A U$ и $A$ имеют одинаковые характеристические многочлены, и, следовательно, одинаковые собственные значения.
Если мы приведём матрицу $A$ к верхнетреугольному виду $T$ с помощью унитарной матрицы $U$: $U^* A U = T$, мы решили задачу!
Умножим слева и справа на $U$ и $U^*$ соответственно, получим нужное нам разложение:
$$ A = U T U^*. $$
- Это знаменитое **разложение Шура**.
- Напомним, что использование унитарных матриц приводит к устойчивым алгоритмам, таким образом обственные значения вычисляются очень точно.
- Разложение Шура показывает, почему нам нужны матричные разложения: они представляют матрицу в виде произведения трёх матриц подходящей структуры.
## Теорема Шура
**Теорема:** Каждая матрица $A \in \mathbb{C}^{n \times n}$ может быть представлена в виде формы Шура $A = UTU^*$, где $U$ унитарная, а $T$ верхнетреугольная.
**Набросок доказательства**.
1. Каждая матрица имеет как минимум один ненулевой собственный вектор (для корня характеристического многочлена матрица $(A-\lambda I)$ вырождена и имеет нетривиальное ядро). Пусть
$$Av_1 = \lambda_1 v_1, \quad \Vert v_1 \Vert_2 = 1.$$
2. Пусть $U_1 = [v_1,v_2,\dots,v_n]$, где $v_2,\dots, v_n$ любые векторы ортогональные $v_1$. Тогда
$$
U^*_1 A U_1 = \begin{pmatrix}
\lambda_1 & * \\
0 & A_2
\end{pmatrix},
$$
где $A_2$ матрица размера $(n-1) \times (n-1)$. Это называется **блочнотреугольной формой**. Теперь мы можем проделать аналогичную процедуру для матрицы $A_2$ и так далее.
**Замечание**: Поскольку в доказательстве необходимы собственные векторы, оно не является практичным алгоритмом.
## Приложение теоремы Шура
Важное приложение теоремы Шура связано с так называемыми **нормальными матрицами**.
**Определение.** Матрица $A$ называется **нормальной матрицей**, если
$$ AA^* = A^* A. $$
**Q:** Какие примеры нормальных матриц вы можете привести?
Примеры: эрмитовы матрицы, унитарные матрицы.
## Нормальные матрицы
**Теорема**: $A$ – **нормальная матрица**, тогда и только тогда, когда $A = U \Lambda U^*$, где $U$ унитарна и $\Lambda$ диагональна.
**Набросок доказательства:** В одну сторону доказательство очевидно (если разложение существует, необходимо проверить выполнение определения нормальной матрицы).
В другую сторону доказательство более сложное. Рассмотрим форму Шура для матрицы $A$. Тогда $AA^* = A^*A$ означает, что $TT^* = T^* T$.
Сравнив поэлементно матрицы в левой и правой части, сразу становится видно, что единственная верхнетреугольная матрица, для которой такое равенство будет выполнено – это диагональная матрица!
#### Важное следствие
Любая нормальная матрица – **унитарно диагонализуема**. Это означает, что она может быть приведена к диагональному виду с помощью унитарной матрицы $U$. Другими словами, каждая нормальная матрица имеет ортогональный базис из собственных векторов.
## Как вычислить разложение Шура?
- Узнаем это через несколько слайдов :)
## Вариационный принцип для собственных значений
- Во многих задачах необходимо найти максимальный или минимальный собственный вектор и соответствующее ему значение
- Тогда, если $A$ эрмитова матрица, **отношение Релея** определяется как
$$R_A(x) = \frac{(Ax, x)}{(x, x)},$$
и максимальное собственное значение равно максимальному значению $R_A(x)$, аналогично для минимального собственного значения.
- Таким образом, мы можем использовать методы оптимизации для поиска этих экстремальных собственных значений.
А теперь приведём понятие, которое является обобщением собственных чисел
## Спектр и псевдоспектр
- Для динамических систем с матрицей $A$, спектр может много сообщить о поведении системы (например, о её устойчивости)
- Однако для **не нормальных матриц**, спектр может быть неустойчивым относительно малых возмущений матрицы
- Для измерения подобных возмущений было разработана концепция **псевдоспектра**.
## Псевдоспектр
Рассмотрим объединение всех возможных собственных значений для всевозможных возмущений матрицы $A$.
$$\Lambda_{\epsilon}(A) = \{ \lambda \in \mathbb{C}: \exists E, x \ne 0: (A + E) x = \lambda x, \quad \Vert E \Vert_2 \leq \epsilon. \}$$
Для малых $E$ и нормальных $A$ это круги вокруг собственных значений, для не нормальных матриц, структура может сильно отличаться. Подробности можно найти тут: http://www.cs.ox.ac.uk/pseudospectra/
<img src='./pseudospectrum.gif'>
## Возвращаемся к вычислению разложения Шура
- Нужно найти унитарную матрицу $U$ и верхнетреугольную матрицу $T$, такие что для данной матрице $A$ выполнено
$$ A = U T U^*. $$
## QR алгоритм
- QR алгоритм был предложен в 1961 г. независимо В. Н. Кублановской и J. Francis'ом. Статью про историю этого алгоритма и его авторов можно прочитать [тут](http://www.dm.unibo.it/~guerrini/html/an_09_10/QR_50_years_later.pdf).
- <font color='red'> **Не путайте** QR алгоритм и QR разложение! </font>
- QR разложение – это представление матрицы в виде произведения двух матриц, а QR алгоритм использует QR разложение для вычисления разложения Шура.
## Путь к QR алгоритму
Рассмотрим выражение
$$A = Q T Q^*,$$
и перепишем его в виде
$$
Q T = A Q.
$$
Слева замечаем QR разложение матрицы $AQ$.
Используем его чтобы записать одну итерацию метода неподвижной точки для разложения Шура.
## Вывод QR алгоритма из уравнения неподвижной точки
Запишем следующий итерационный процесс
$$
Q_{k+1} R_{k+1} = A Q_k, \quad Q_{k+1}^* A = R_{k+1} Q^*_k
$$
Введём новую матрицу
$$A_k = Q^* _k A Q_k = Q^*_k Q_{k+1} R_{k+1} = \widehat{Q}_k R_{k+1}$$
тогда аппроксимация для $A_{k+1}$ имеет вид
$$A_{k+1} = Q^*_{k+1} A Q_{k+1} = ( Q_{k+1}^* A = R_{k+1} Q^*_k) = R_{k+1} \widehat{Q}_k.$$
Итак, мы получили стандартную форму записи QR алгоритма.
Финальные формулы обычно записывают в **QRRQ**-форме:
1. Инициализируем $A_0 = A$.
2. Вычислим QR разложение матрицы $A_k$: $A_k = Q_k R_k$.
3. Обновим аппроксимацию $A_{k+1} = R_k Q_k$.
Продолжаем итерации пока $A_k$ не станет достаточно треугольной (например, норма подматрицы под главной диагональю не станет достаточно мала).
## Что известно о сходимости и сложности
**Утверждение**
Матрицы $A_k$ унитарно подобны матрице $A$
$$A_k = Q^*_{k-1} A_{k-1} Q_{k-1} = (Q_{k-1} \ldots Q_1)^* A (Q_{k-1} \ldots Q_1)$$
а произведение унитарных матриц – унитарная матрица.
Сложность одной итерации $\mathcal{O}(n^3)$, если используется QR разложение для общего случая.
Мы ожидаем, что $A_k$ будет **очень близка к треугольной матрице** для достаточно большого $k$.
```
import numpy as np
n = 4
a = [[1.0/(i + j + 0.5) for i in range(n)] for j in range(n)]
niters = 200
for k in range(niters):
q, rmat = np.linalg.qr(a)
a = rmat.dot(q)
print('Leading 3x3 block of a:')
print(a[:3, :3])
```
## Сходимость и сложность QR алгоритма
- QR алгоритм сходится от первого диагонального элемента к последнему.
- По крайней мере 2-3 итерации необходимо для определения каждого диагонального элемента матрицы $T$.
- Каждый шаг состоит в вычислении QR разложения и одного произведения двух матриц, в результате имеем сложность $\mathcal{O}(n^3)$.
**Q**: означает ли это итоговую сложность $\mathcal{O}(n^4)$?
**A**: к счастью, нет!
- Мы можем ускорить QR алгоритм, используя сдвиги, поскольку матрица $A_k - \lambda I$ имеет те же векторы Шура (столбцы матрицы $U$).
- Подробности в следующей лекции
## Выводы по способам вычисления разложений
- LU и QR разложения можно вычислить с помощью **прямых** методов за конечное число операций.
- SVD и разложение Шура не могут быть вычислены с помощью прямых методов (почему?). Они могут быть получены с помощью **итерационных методов**
- Однако итерационные методы также имеют сложность $\mathcal{O}(n^3)$ операций благодаря быстрой сходимости.
## Резюме по сегодняшней лекции
- Собственные значения и векторы
- Теорема Гершгорина
- Степенной метод
- Теорема Шура
- Нормальные матрицы
- QR алгоритм
## Анонс следующей лекции
- Ускорение сходимости QR алгоритма
- Методы вычисления SVD
| github_jupyter |
# Using multimetric experiments in SigOpt to identify multiple good solutions
If you have not yet done so, please make sure you are comfortable with the content in the [intro](multimetric_intro.ipynb) notebook.
Below we create the standard SigOpt [connection](https://sigopt.com/docs/overview/python) tool. If the `SIGOPT_API_TOKEN` is present in the environment variables, it is imported; otherwise, you need to copy and paste your key from the [API tokens page](https://sigopt.com/tokens).
```
import os
import numpy
from time import sleep
from matplotlib import pyplot as plt
%matplotlib inline
# Matplotlib stuff for generating plots
efficient_opts = {'linewidth': 0, 'marker': '+', 'color': 'r', 'markersize': 10, 'markeredgewidth': 2}
dominated_opts = {'linewidth': 0, 'marker': '.', 'color': 'k', 'alpha': .4}
from sigopt.interface import Connection
if 'SIGOPT_API_TOKEN' in os.environ:
SIGOPT_API_TOKEN = os.environ['SIGOPT_API_TOKEN']
else:
SIGOPT_API_TOKEN = None
assert SIGOPT_API_TOKEN is not None
conn = Connection(client_token=SIGOPT_API_TOKEN)
```
One natural situation for a multimetric experiment is in finding a second answer to an optimization problem. Given a function $f$ and domain $\Omega$ on which the maximum of $f$ occurs at $x^*$, the search for other points which have high values but are at least some distance away can be phrased as
\begin{align}
\text{value}:&\quad \max_{x\in\Omega} f(x) \\
\text{distance}:&\quad \max_{x\in\Omega} \|x - x^*\| \\
\end{align}
Here the norm is presumed to be the 2-norm, but could actually be any measurement of distance; that ambiguity is especially valuable in the situation where categorical parameters are present or a custom definition of distance is preferred.
### Finding $x^*$, the solution to the standard optimization of $f$
We start by defining a multimodal function $f$ which has multiple local optima below. The associated meta is for a standard, single metric experiment.
```
def multimodal_function(x1, x2):
return (
.5 * numpy.exp(-10 * ((x1 + .8) ** 2 + .3 * (x2 + .6) ** 2)) +
.5 * numpy.exp(-9 * (.4 * (x1 + .7) ** 2 + .4 * (x2 - .4) ** 2)) +
.5 * numpy.exp(-11 * (.2 * (x1 - .6) ** 2 + .5 * (x2 + .5) ** 2)) +
.5 * numpy.exp(-11 * (.6 * (x1) ** 2 + .5 * (x2 + .8) ** 2)) +
.5 * numpy.exp(-12 * (.4 * (x1 - .1) ** 2 + .7 * (x2 - .8) ** 2)) +
.5 * numpy.exp(-13 * (.8 * (x1) ** 2 + .7 * (x2) ** 2)) +
.5 * numpy.exp(-8 * (.3 * (x1 - .8) ** 2 + .6 * (x2 - .3) ** 2))
)
multimodal_first_solution_meta = {
'name': 'SigOpt Multimetric Demo - Single Metric Optimization (python)',
'project': 'sigopt-examples',
'metrics': [{'name': 'multimodal_function_value', 'objective': 'maximize'}],
'parameters': [
{'name': 'x1', 'bounds': {'min': -1.0, 'max': 1.0}, 'type': 'double'},
{'name': 'x2', 'bounds': {'min': -1.0, 'max': 1.0}, 'type': 'double'},
],
'type': 'offline',
'observation_budget': 40,
}
```
We can run the initial optimization to find this $x^*$ value which is the maximum of the function. The `sleep(2)` command helps simulate an actual experiment, where the cost of creating an observation is (significant) greater than the function we are studying here.
```
experiment = conn.experiments().create(**multimodal_first_solution_meta)
while experiment.progress.observation_count < experiment.observation_budget:
suggestion = conn.experiments(experiment.id).suggestions().create()
sleep(2)
value = multimodal_function(**suggestion.assignments)
conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, value=value)
experiment = conn.experiments(experiment.id).fetch()
```
The solution to this problem $x^*$ must be extracted for identifying a second solution.
```
initial_optimization_best_assignments = conn.experiments(experiment.id).best_assignments().fetch()
x_star = initial_optimization_best_assignments.data[0].assignments
```
This contour plot shows that there is an amount of complexity in this function, and that choosing a diverse portfolio of solutions may be difficult. The initial solution $x^*$, as determined from the optimization above, is also plotted.
```
xplt = numpy.linspace(-1, 1, 40)
X1, X2 = numpy.meshgrid(xplt, xplt)
Y = multimodal_function(x1=X1, x2=X2)
plt.contour(X1, X2, Y)
plt.plot(x_star['x1'], x_star['x2'], '*k', markersize=20)
plt.xlabel('$x_1$', fontsize=15)
plt.ylabel('$x_2$', fontsize=15);
```
### Using the initial solution $x^*$ to search for a second solution
Now that we have this initial solution, we can define our multimetric experiment. Note that the distance we are using is the standard 2-norm. The multimetric function returns a list of dictionaries, one each per metric to be optimized.
```
def distance_function(assignments, x_star):
return numpy.sqrt((assignments['x1'] - x_star['x1']) ** 2 + (assignments['x2'] - x_star['x2']) ** 2)
def multimetric_value_distance_function(assignments, x_star):
function_value = multimodal_function(**assignments)
distance_from_x_star = distance_function(assignments, x_star)
return [
{'name': 'function value', 'value': function_value},
{'name': 'distance from x_star', 'value': distance_from_x_star},
]
multimetric_second_solution_meta = {
'name': 'SigOpt Multimetric Demo - Search for Second Solution (python)',
'metrics': [
{'name': 'function value', 'objective': 'maximize'},
{'name': 'distance from x_star', 'objective': 'maximize'},
],
'parameters': [
{'name': 'x1', 'bounds': {'min': -1.0, 'max': 1.0}, 'type': 'double'},
{'name': 'x2', 'bounds': {'min': -1.0, 'max': 1.0}, 'type': 'double'},
],
'type': 'offline',
'observation_budget': 100,
}
experiment = conn.experiments().create(**multimetric_second_solution_meta)
while experiment.progress.observation_count < experiment.observation_budget:
suggestion = conn.experiments(experiment.id).suggestions().create()
sleep(2)
values = multimetric_value_distance_function(suggestion.assignments, x_star)
conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values)
experiment = conn.experiments(experiment.id).fetch()
```
Extracting the solution to this multimetric optimization problem yields an array of points, each of which is [Pareto efficient](https://www.sigopt.com/docs/overview/pareto_efficiency). Plotting the metric values associated with these efficient points can help provide some guidance as to what possible solutions are available, and which might be preferred in an actual application.
Recall that metric evaluations are organized in **alphabetical order** when returned from SigOpt, which may differ from the order in which they were originally defined in the experiment. The loop below recovers the values in the same value in which they were defined initially.
We also, again, produce some random data to explore the full feasible domain.
```
pareto_efficient_results = conn.experiments(experiment.id).best_assignments().fetch()
efficient_points = numpy.empty((pareto_efficient_results.count, 2))
efficient_values = numpy.empty((pareto_efficient_results.count, 2))
for k, data in enumerate(pareto_efficient_results.data):
efficient_points[k, :] = [data.assignments['x1'], data.assignments['x2']]
dv = {d.name: d.value for d in data.values}
efficient_values[k, :] = [dv[m['name']] for m in multimetric_second_solution_meta['metrics']]
rand_pts = numpy.random.uniform(
[p['bounds']['min'] for p in multimetric_second_solution_meta['parameters']],
[p['bounds']['max'] for p in multimetric_second_solution_meta['parameters']],
(1000, 2)
)
random_values = numpy.empty((len(rand_pts), 2))
for k, pt in enumerate(rand_pts):
random_values[k, :] = [v['value'] for v in multimetric_value_distance_function({'x1': pt[0], 'x2': pt[1]}, x_star)]
```
The graph on the left again shows the SigOpt generated Pareto frontier with the red + signs.
As we can see for the contour plot associated with this multimetric experiment on the right, multiple regions of solutions emerge which have high values but are at least some distance away. This contour plot is only feasible for low dimensional problems -- for higher dimensional problems a different analysis of the efficient points will be necessary.
```
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.plot(random_values[:, 0], random_values[:, 1], **dominated_opts)
ax1.plot(efficient_values[:, 0], efficient_values[:, 1], **efficient_opts)
ax1.set_xlabel('function value', fontsize=15)
ax1.set_ylabel('distance to $x^*$', fontsize=15)
xplt = numpy.linspace(-1, 1, 40)
X1, X2 = numpy.meshgrid(xplt, xplt)
Y = multimodal_function(x1=X1, x2=X2)
ax2.contour(X1, X2, Y)
ax2.plot(x_star['x1'], x_star['x2'], '*k', markersize=20)
ax2.plot(efficient_points[:, 0], efficient_points[:, 1], **efficient_opts)
ax2.set_xlabel('$x_1$', fontsize=15)
ax2.set_ylabel('$x_2$', fontsize=15);
```
| github_jupyter |
# Recurrent Neural Network to generate (predict) text data using Keras
* LSTM (Long Short-Term Memory) Network
* Code based on this article https://towardsdatascience.com/recurrent-neural-networks-by-example-in-python-ffd204f99470
* Data: full text of Alice in Wonderland taken from https://archive.org/stream/alicesadventures19033gut/19033.txt
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Embedding, LSTM, Masking
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.utils import shuffle, class_weight
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# SOURCE: https://archive.org/stream/alicesadventures19033gut/19033.txt
text = open('Alice_in_Wonderland.txt').read()
text = text.replace('.', ' .')
#text = text.replace(',', ' ,')
text = text.replace('\n', ' ')
#print(text[:5000])
text = text.split(' ')
#print(text[500:550])
```
# Use Tokenizer class to turn text into numeric data
```
# set num_words to some int value to reduce size of label array, and number of params in last layer of model
tokenizer = Tokenizer(num_words=None, char_level=False, split=' ', filters=',!"#$%&()*+-/:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts(text)
sequences = tokenizer.texts_to_sequences(text)
# for some reason, tokenizer returns sequences
flat_seq = []
for sub_arr in sequences:
for item in sub_arr:
flat_seq.append(item)
seq = np.array(flat_seq, dtype=np.int32)
print(seq)
print(len(seq), 'total words')
print(len(tokenizer.index_word), ' words in the dictionary') # number of unique words
print([tokenizer.index_word[i] for i in range(1,10)]) # first ten words in dictionary (indexing starts at 1)
```
# Prepare training data and labels
* Features (model input): 50 consecutive words from the text
* Labels (model output): The next word in that sequence
```
features = []
labels = []
training_len = 50
for i in range(training_len, len(seq)):
# sub_seq has length of trainin_len + 1
sub_seq = seq[i-training_len : i+1]
features.append(sub_seq[:-1]) # all but last word
labels.append(sub_seq[-1]) # last word in sub-sequence
# hold on to sorted data/labels for text generation after we train the model
X_sorted, y_sorted = np.array(features, dtype=np.float32), np.array(labels, dtype=np.float32)
# use shuffled data for training and testing
X,y = shuffle(X_sorted, y_sorted)
print(X.shape, y.shape)
print(X[0], y[0])
print([tokenizer.index_word[i] for i in X[0]],
tokenizer.index_word[y[0]])
```
# One-Hot encoding of labels, Train/Test split
* One-hot scheme: 3 == [0 0 0 1 0 ... 0]
* Binary array represents class probabilities (each unique word is a class)
* NOTE: the one-hot encoding will take up significantly more memory than the integer representation of labels. To reduce the size of the encoded labels, set the `num_words` parameter in the Tokenizer object. This will also reduce the number of parameters in the final layer
```
# + 1 because word dictionary indexing starts at 1
n_words = len(tokenizer.index_word) + 1
# one-hot encoding for labels
y_onehot = to_categorical(y, n_words)
#print(list(y_onehot[0]).index(1), y[0]) # these two should be the same int value
#print(y_onehot.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y_onehot, test_size=0.2)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
```
# Handling label imbalance by creating a dictionary of class weights
* The most common words are much more common than the vast majority of less common. This leads to a network which is heavily biased towards those words, and only produces them
* To combat this, we can assign more weight during training to instances whose labels are less common (not when testing/predicting)
* Using keras, we can pass in a `class_weight` dictionary to the `fit()` method
```
class_weights = class_weight.compute_class_weight('balanced', np.unique(y), y)
print(class_weights)
```
# Create Model: LSTM (Long Short-Term Memory) Network
* Wikipedia: https://en.wikipedia.org/wiki/Long_short-term_memory
* Mathematical explanation of LSTM: https://colah.github.io/posts/2015-08-Understanding-LSTMs/
* LSTMs by example: https://towardsdatascience.com/recurrent-neural-networks-by-example-in-python-ffd204f99470
```
model = Sequential()
# https://keras.io/layers/embeddings/
# can reduce model complexity by pre-computing embedding matrix, setting trainable=False
model.add(Embedding(input_dim=n_words, input_length=training_len, output_dim=100,
trainable=True, mask_zero=True))
# https://keras.io/layers/core/#Masking
#model.add(Masking(mask_value=0.0))
# https://keras.io/layers/recurrent/
model.add(LSTM(512, return_sequences=False, dropout=0.5, recurrent_dropout=0.1))
# fully-connected layer with dropout
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
# output layer - softmax activation for predicted class probabilities
model.add(Dense(n_words, activation='softmax'))
# Compile the model. Same loss function as with MNIST model (multi-class classification score)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
print(model.summary())
cb = [EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=False)]
with tf.device('/gpu:0'):
hist = model.fit(X_train, y_train, batch_size=128, epochs=30,
class_weight=class_weights, validation_split=0.2,
callbacks=[])
scores = model.evaluate(X_test, y_test, batch_size=128, verbose=0)
# bugged output? Displays way too many '=' in progress bar when verbose=1
print(scores)
plt.style.use('seaborn')
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.legend(['train accuracy', 'val accuracy'])
plt.show()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.legend(['train loss', 'val loss'])
plt.show()
```
# Use trained model to predict next word
```
rand_ind = np.random.randint(len(X_sorted)-training_len)
#rand_ind = 0 # uncomment to specify a start index
seed_seq = X_sorted[rand_ind]
# np.argmax() to go from binary label arrays to integer class labels
actual_seq = y_sorted[rand_ind : rand_ind+training_len]
pred_seq = np.empty((training_len,))
for i in range(training_len):
pred_seq[i] = np.argmax(model.predict(np.expand_dims(X_sorted[rand_ind+i, :], axis=0))[0])
print('Seed Sequence: ')
print('\"' + ' '.join([tokenizer.index_word[i] for i in seed_seq]) + '\"')
print('\nPredicted next 50 words: ')
print('\"' + ' '.join([tokenizer.index_word[i] for i in pred_seq]) + '\"')
print('\nActual next 50 words: ')
print('\"' + ' '.join([tokenizer.index_word[i] for i in actual_seq]) + '\"')
```
# Conclusion: NLP is hard :(
| github_jupyter |
```
import tensorflow as tf
import keras
import math
import numpy as np
import matplotlib.pyplot as plt
import time
import pickle as pkl
seed = 4
np.random.seed(seed)
cell_time = np.random.uniform(-2 * np.pi, 2 * np.pi, [1000, 1])
gene01_phase = np.random.uniform(0, 2 * np.pi, [1, 500])
gene01_time = np.random.normal(0, 0.1, [1, 500])
gene01_speed = np.random.uniform(0.5, 1.5, [1, 500])
gene0_phase = np.random.uniform(0, 2 * np.pi, [1, 800])
gene1_time = np.random.normal(0, 0.1, [1, 500])
gene1_speed = np.random.uniform(0.5, 1.5, [1, 500])
gene0 = np.sin(cell_time - gene0_phase)
gene1 = np.tanh(gene1_speed * (cell_time - gene1_time))
gene01 = np.sin(cell_time - gene01_phase) + np.tanh(gene01_speed * (cell_time - gene01_time))
for i in range(10):
plt.scatter(x=cell_time, y=gene0[:, i], s=1)
gene0.shape
y = keras.Input(shape=(gene0.shape[1],), name='input')
x = keras.layers.Dense(units=50,
kernel_regularizer=keras.regularizers.l2(0.0001)
)(y)
x = keras.layers.Activation(activation='tanh')(x)
x = keras.layers.Dense(units=30,
kernel_regularizer=keras.regularizers.l2(0.0001)
)(x)
x = keras.layers.Activation(activation='tanh')(x)
x = keras.layers.Dense(units=1,
use_bias=False,
kernel_regularizer=keras.regularizers.l2(0.00001),
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=4.0),
name='neck'
)(x)
x0 = keras.layers.Lambda(lambda x: keras.backend.sin(x), name='phase0')(x)
x1 = keras.layers.Lambda(lambda x: keras.backend.sin(x + math.pi * 2 / 3), name='phase1')(x)
x2 = keras.layers.Lambda(lambda x: keras.backend.sin(x + math.pi * 4 / 3), name='phase2')(x)
x = keras.layers.Concatenate(name='stack')([x0, x1, x2])
x = keras.layers.Dense(gene0.shape[1],
use_bias=False,
kernel_regularizer=None,
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=1.0),
name='reconstructed'
)(x)
model = keras.Model(outputs=x, inputs=y)
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
model.compile(loss='mean_squared_error',
optimizer=keras.optimizers.Adam(1e-3))
class MyCallback(keras.callbacks.Callback):
def __init__(self, interval = 100):
self.cnt = 0
self.interval = interval
self.start_time = 0
self.rec = {'time': [], 'loss': []}
def on_train_begin(self, logs=None):
self.start_time = time.time()
def on_epoch_end(self, batch, logs={}):
self.cnt += 1
self.rec['time'].append(time.time() - self.start_time)
self.rec['loss'].append(logs.get('loss'))
if self.cnt % self.interval == 0:
print(f'epoch: {self.cnt}/{self.params["epochs"]}, loss: {logs.get("loss") : .4f}, total train time: {self.rec["time"][-1] : .2f}s')
my_callback = MyCallback()
history = model.fit(gene0, gene0, epochs=3000, verbose=0, callbacks=[my_callback])
#model.compile(loss='mean_squared_error',
# optimizer=keras.optimizers.Adam(1e-4, epsilon=1e-4))
#history = model.fit(gene0, gene0, epochs=5000, verbose=2)
model.evaluate(x=gene0, y=gene0)
model.get_layer('neck')
res = keras.backend.function([model.layers[0].input],
[model.get_layer('neck').output, model.get_layer('reconstructed').output]
)([gene0])
for i in range(10):
plt.scatter(x=res[0] % (2 * np.pi), y=gene0[:, i], s=1)
plt.scatter(res[0] % (2 * np.pi), cell_time % (2 * np.pi), s = 1)
with open('comp-3-seed-%d.pkl' % seed, 'wb') as file:
pkl.dump(my_callback.rec, file)
```
| github_jupyter |
# <center> Анализ данных на Python </center>
# Семинар 5. Словари да множества
Поговорим про словари, множества, хэш-таблицы и другие разные штуки!
# 1. Что такое Хэш-таблица?
Вы - продавец в магазине. Когда покупатель что-то у вас покупает, вы проверяете стоимость товара по книге.
```
book = [('яйца', 60), ('чай', 16), ('кофе', 35), ('лён', 20),
('петрушка', 15), ('торт', 10), ('арбуз', 60), ('йогурт', 35),
('соя', 20), ('ролтон', 42), ('бобы', 10), ('глаз дракона', 2)]
```
Как найти сколько стоят бобы? Листать книгу, читать в ней каждую строчку до тех пор пока мы не найдём ответ.
```
x = 'бобы'
for item in book:
if item[0] == x:
print(item[1])
```
__Вопрос:__ Если у нас всего $n$ продуктов, сколько действий нам надо будет сделать в худшем случае?
Как-то долговато. Давайте попробуем ускориться. Одной из идей ускорения может быть сортировка. Если отсортировать все продукты по их названию, искать будет легче.
```
book = sorted(book, key=lambda w: w[0])
book
```
Будем открывать книгу в середине. Там буква "п". Нам нужна буква "б", она левее буквы "п". Откроем серидну левой части книги, там буква "й", нам нужно еще левее, снова откроем середину. Будем так делать до тех пор, пока не найдём бобы. Такая процедура будет работать быстрее, она называется __бинарный поиск.__
```
# Попытайтесь на досуге написать такой поиск самостоятельно :)
# ┬─┬ ノ( ゜-゜ノ) Писать код самому?
# (╯° □°)╯︵ ┻━┻
```
__Вопрос:__ Если у нас всего $n$ продуктов, сколько действий нам надо будет сделать в худшем случае?
Всё ещё долго. А можно ещё быстрее? Конечно можно. Давайте наймём помощницу по имени Алиса и заставим её выучить всю книгу с продуктами и ценами наизусть. Тогда мы сможем задавать ей вопрос и моментально будем получать ответ на него. Просто чудо, а не помощница! Где бы взять такую...
Попробуем создать её из тех структур данных, которые мы уже знаем. А именно: из массивов. Для этого будем использовать __хеш-функцию.__ Хеш-функция - это такая функция, которая на вход получает строку и возвращает число. Они и поможет нам создать свою Алису.
```
x = [0]*33 # заведём пустой массив
x[:10]
```
Пусть наша хэш-функция возвращает номер первой буквы слова в алфавите.
```
def simple_hash(x):
alph = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
return alph.index(x[0])
simple_hash('яйца')
```
Положим в массив $x$ на $32$ позицию цену на яйца. По аналогии сделаем со всеми продуктами и их ценами.
```
for food, price in book:
x[simple_hash(food)] = price
x
```
Хэщ-функция в нашем примере связывает каждое название с одним индексом. На месте этого индекса в векторе $x$ и лежит нужная цена. __Поздравляю, мы создали свою Алису!__
А теперь к нам приходит клиент и спрашивает: "А сколько стоит торт?" Мы легко можем ответить на его вопрос:
```
ind = simple_hash('торт')
x[ind]
```
И мы делаем это моментально, без перебора как в первых двух случаях.
__Вопросы:__
- Понятное дело, что на практике хеш-функции устроены сложнее, у той функции, которую мы тут использовали есть куча проблем: какие это проблемы?
- Как можно попытаться решить эти проблемы?
В python хэш-таблицы реализованы в виде словарей и множеств. Давайте с ними познакомимся.
# 2. Работаем со словарями
```
product = ['яйца', 'чай', 'кофе', 'банан', 'петрушка', 'сода',
'яблочко', 'йогурт', 'соя', 'беозар', 'бобы', 'печень дракона']
price = [60, 16, 35, 20, 15, 10, 60, 35, 20, 42, 10, 2]
zip(product, price)
list( zip(product, price) )
book_dict = dict(zip(product, price))
book_dict
book_dict['яйца'] = 70 # цены растут!
book_dict
book_dict['дефлопе'] = 500 # ура! новый продукт!
book_dict
t = book_dict.pop('чай') # чай кончился :(
book_dict
t
book_dict['чай'] # мужчинаааа, говорю же у нас нет чаааяяяя
book_dict.get('чай', 'нет такого у нас') # если нет, отдавай второе значение
'чай' in book_dict # девушка девушка, а у вас есть чай?
'кофе' in book_dict # а кофе?
'танцы' in book_dict # а может потанцуем?
book_dict.keys()
book_dict.values()
book_dict.items()
for k, v in book_dict.items():
print(f"Продукт {k} стоит {v} рублей")
```
Ещё немного магии:
```
[i for i in range(2, 20, 3)] # список
{i: i**3 for i in range(2, 20, 3)} # словарь
(i for i in range(2, 20, 3)) # не показывает (((((
range(1, 10) # тоже не показывает
list(range(1,10)) # показал !!!
```
__Вопрос:__ а почему он себя так ведёт капризно?
# 3. Множества
Это то же самое, что и словари, но без значений. Только ключи.
```
fruits = {"banana", "apple", "orange", "orange"}
fruits # ты куда один апельсин дел?!
fruits = ["banana", "apple", "orange", "orange", "apple", "apple", "apple"]
set(fruits)
my_fruits = {"apple", "orange"}
your_fruits = {"orange", "banana", "pear"}
my_fruits | your_fruits # объединение множеств
my_fruits & your_fruits # пересечение множеств
my_fruits - your_fruits # разность множеств
'orange' in my_fruits
your_fruits.remove("banana") # удаление
your_fruits
your_fruits.add("banana") # добавление
your_fruits
```
# 4. Задачки
```
def test_problem_13(func, test_data):
for inputs, true_answer in test_data:
answer = func(*inputs)
assert answer == true_answer, f'Expected {true_answer}, got {answer}. Input: {inputs}'
print("OK!")
def test_problem(func, test_data):
for inputs, true_answer in test_data:
answer = func(inputs)
assert answer == true_answer, f'Expected {true_answer}, got {answer}. Input: {inputs}'
print("OK!")
```
## Задачка 1: [камешки](https://leetcode.com/problems/jewels-and-stones/)
У Дори в глубинах океана есть кучка камней. Часть камней из этой кучки драгоценные. Недавно она пересчитала все драгоценные и забыла сколько их. Чтобы больше не забывать, Дори решила написать на питоне функцию, которая будет считать камни за неё.
Напишите на python функцию, которая принимает на вход список драгоценных камней $J$ и список камней, которые есть у Дори $S$. На выход функция возвращает число драгоценных камней в запасах Дори.
__Примеры:__
> Input: J = "aA", S = "aAAbbbb" <br />
Output: 3
Тут драгоценными считаются камни a и A. У Дори есть камни aAAbbbb. Среди них три драгоценных, aAA.
>Input: J = "z", S = "ZZ" <br />
Output: 0
Драгоценными мы считаем только камень z. У Дори два камня, оба обычные.
```
def numJewelsInStones(J, S):
### ╰( ͡° ͜ʖ ͡° )つ▬▬ι═══════ bzzzzzzzzzz
# will the code be with you
return ...
NUM_JEWELS_IN_STONES_TESTS_DATA = [
(("aA", "aAAbbbb"), 3),
(("z","ZZ"),0)
]
test_problem_13(numJewelsInStones, NUM_JEWELS_IN_STONES_TESTS_DATA)
```
__Пара слов об эффективности:__
```
from random import random
n_obs = 10**6
mylist = [random() for _ in range(n_obs)]
myset = set(mylist)
%%timeit
0.5 in mylist # список
%%timeit
0.5 in myset # множество
```
## Задачка 2: слова
Напишите функцию `stats(s)`, принимающую на вход строку `s`, содержащую слова, разделённые пробелами, и находящую самое часто встречающееся слово. Если такое слово одно, верните его, если их несколько, верните список, отсортированный в лексикографическом порядке.
Например: `stats("hello hello world")` должна вернуть строчку `"hello"`, а `stats("a a b b c")` должна вернуть список `['a','b']`.
```
def stats(s):
### ╰( ͡° ͜ʖ ͡° )つ▬▬ι═══════ bzzzzzzzzzz
# will the code be with you
return ...
STATS_TESTS_DATA = [
("hello hello world", "hello"),
("a a b b c", ['a','b'])
]
test_problem(stats, STATS_TESTS_DATA)
```
## Задачка 3: сумма двух
Дан массив из целых чисел `nums` и ещё одно целое число `target`. Найдите все такие пары чисел из массива `nums`, которые в сумме дают число `target`. Выведите на экран их индексы. Одно и то же число использовать при подсчёте суммы дважды нельзя. Попытайтесь решить эту задачу максимально эффективно.
```
def two_sum_fast(nums, target):
# ┬─┬ ノ( ゜-゜ノ)
# (╯° □°)╯︵ ┻━┻
return ...
TWO_SUM_TESTS_DATA = [
(([2, 7, 11, 15], 9), [(0, 1)]),
]
test_problem_13(two_sum_fast, TWO_SUM_TESTS_DATA)
```
## Задачка 4: магазин
Вам предостоит обработать базу данных о продажах некоторого интернет-магазина. База данных представляет собой набор кортежей, в каждом кортеже три элемента: (Покупатель, товар, количество), где Покупатель — имя покупателя (строка без пробелов), товар — название товара (строка без пробелов), количество — количество приобретенных единиц товара.
Создайте словарь, ключами которого являются имена покупателей, а значениями — словари, ключами которых являются названия товаров, а значениями — количество единиц этого товара, которые купил этот покупатель.
Напишите функцию `aggregate`, принимающую некоторое количество набор кортежей из базы данных и возвращающую сводную информацию в виде словаря.
```
def aggregate(names):
# ┬─┬ ノ( ゜-゜ノ)
# (╯° □°)╯︵ ┻━┻
return ...
AGG_TESTS_DATA = [
([("Petrov","pens",5), ("Ivanov","marker",3), ("Ivanov","paper",7),
("Petrov","envelope",20), ("Ivanov","envelope",5)],
{'Petrov': {'pens': 5, 'envelope': 20},
'Ivanov': {'marker': 3, 'paper': 7, 'envelope': 5}}),
([("Ivanov","aaa",1), ("Petrov","aaa",2), ("Sidorov","aaa",3), ("Ivanov","aaa",6),
("Petrov","aaa",7), ("Sidorov","aaa",8), ("Ivanov","bbb",3), ("Petrov","bbb",7),
("Sidorov","aaa",345), ("Ivanov","ccc",45), ("Petrov","ddd",34),
("Ziborov","eee",234), ("Ivanov","aaa",45)],
{'Ivanov': {'aaa': 52, 'bbb': 3, 'ccc': 45},
'Petrov': {'aaa': 9, 'bbb': 7, 'ddd': 34},
'Sidorov': {'aaa': 356},
'Ziborov': {'eee': 234}})
]
test_problem(aggregate, AGG_TESTS_DATA)
```
<img src="https://steemitimages.com/0x0/https://media.makeameme.org/created/repeat-repeat-repeat-5984a6.jpg" height="400" width="400">
| github_jupyter |
<h1>Using pre-trained embeddings with TensorFlow Hub</h1>
This notebook illustrates:
<ol>
<li>How to instantiate a TensorFlow Hub module</li>
<li>How to find pre-trained TensorFlow Hub modules for a variety of purposes</li>
<li>How to examine the embeddings of a Hub module</li>
<li>How one Hub module composes representations of sentences from individual words</li>
<li>How to assess word embeddings using a semantic similarity test</li>
</ol>
```
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
```
Install the TensorFlow Hub library
```
!pip install -q tensorflow-hub
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
import scipy
import math
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
import tensorflow as tf
print(tf.__version__)
```
<h2>TensorFlow Hub Concepts</h2>
TensorFlow Hub is a library for the publication, discovery, and consumption of reusable parts of machine learning models. A module is a self-contained piece of a TensorFlow graph, along with its weights and assets, that can be reused across different tasks in a process known as transfer learning, which we covered as part of the course on Image Models.
To download and use a module, it's as easy as:
However, because modules are self-contained parts of a TensorFlow graph, in order to actually collect values from a module, you'll need to evaluate it in the context of a session.
First, let's explore what hub modules there are. Go to [the documentation page](https://www.tensorflow.org/hub/modules) and explore a bit.
Note that TensorFlow Hub has modules for Images, Text, and Other. In this case, we're interested in a Text module, so navigate to the Text section.
Within the Text section, there are a number of modules. If you click on a link, you'll be taken to a page that describes the module and links to the original paper where the model was proposed. Click on a model in the Word2Vec section of the page.
Note the details section, which describes what the module expects as input, how it preprocesses data, what it does when it encounters a word it hasn't seen before (OOV means "out of vocabulary") and in this case, how word embeddings can be composed to form sentence embeddings.
Finally, note the URL of the page. This is the URL you can copy to instantiate your module.
<h2>Task 1: Create an embedding using the NNLM model</h2>
To complete this task:
<ol>
<li>Find the module URL for the NNLM 50 dimensional English model</li>
<li>Use it to instantiate a module as 'embed'</li>
<li>Print the embedded representation of "cat"</li>
</ol>
NOTE: downloading hub modules requires downloading a lot of data. Instantiating the module will take a few minutes.
```
# Task 1
embed = ...
```
When I completed this exercise, I got a vector that looked like:
[[ 0.11233182 -0.3176392 -0.01661182...]]
<h2>Task 2: Assess the Embeddings Informally</h2>
<ol>
<li>Identify some words to test</li>
<li>Retrieve the embeddings for each word</li>
<li>Determine what method to use to compare each pair of embeddings</li>
</ol>
So, now we have some vectors but the question is, are they any good? One way of testing whether they are any good is to try them for your task. But, first, let's just take a peak.
For our test, we'll need three common words such that two of the words are much closer in meaning than the third.
```
word_1 = #
word_2 = #
word_3 = #
```
Now, we'll use the same process of using our Hub module to generate embeddings but instead of printing the embeddings, capture them in a variable called 'my_embeddings'.
```
# Task 2b
```
Now, we'll use Seaborn's heatmap function to see how the vectors compare to each other. I've written the shell of a function that you'll need to complete that will generate a heatmap. The one piece that's missing is how we'll compare each pair of vectors. Note that because we are computing a score for every pair of vectors, we should have len(my_embeddings)^2 scores. There are many valid ways of comparing vectors. Generality, similarity scores are symmetric. The simplest is to take their dot product. For extra credit, implement a more complicated vector comparison function.
```
def plot_similarity(labels, embeddings):
corr = # ... TODO: fill out a len(embeddings) x len(embeddings) array
sns.set(font_scale=1.2)
g = sns.heatmap(
corr,
xticklabels=labels,
yticklabels=labels,
vmin=0,
vmax=1,
cmap="YlOrRd")
g.set_xticklabels(labels, rotation=90)
g.set_title("Semantic Textual Similarity")
plot_similarity([word_1, word_2, word_3], my_embeddings)
```
What you should observe is that, trivially, all words are identical to themselves, and, more interestingly, that the two more similar words have more similar embeddings than the third word.
<h2>Task 3: From Words to Sentences</h2>
Up until now, we've used our module to produce representations of words. But, in fact, if we want to, we can also use it to construct representations of sentences. The methods used by the module to compose a representation of a sentence won't be as nuanced as what an RNN might do, but they are still worth examining because they are so convenient.
<ol>
<li> Examine the documentation for our hub module and determine how to ask it to construct a representation of a sentence</li>
<li> Figure out how the module takes word embeddings and uses them to construct sentence embeddings </li>
<li> Construct a embeddings of a "cat", "The cat sat on the mat", "dog" and "The cat sat on the dog" and plot their similarity
</ol>
```
# Task 3
```
Which is cat more similar to, "The cat sat on the mat" or "dog"? Is this desireable?
Think back to how an RNN scans a sequence and maintains its state. Naive methods of embedding composition (mapping many to one) can't possibly compete with a network trained for this very purpose!
<h2>Task 4: Assessing the Embeddings Formally</h2>
Of course, it's great to know that our embeddings match our intuitions to an extent, but it'd be better to have a formal, data-driven measure of the quality of the representation.
Researchers have
The [STS Benchmark](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) provides an intristic evaluation of the degree to which similarity scores computed using sentence embeddings align with human judgements. The benchmark requires systems to return similarity scores for a diverse selection of sentence pairs. Pearson correlation is then used to evaluate the quality of the machine similarity scores against human judgements.
```
def load_sts_dataset(filename):
# Loads a subset of the STS dataset into a DataFrame. In particular both
# sentences and their human rated similarity score.
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
# (sent_1, sent_2, similarity_score)
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pd.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
sts_dev = load_sts_dataset(
os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(
os.path.join(
os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
return sts_dev, sts_test
sts_dev, sts_test = download_and_load_sts_data()
sts_dev.head()
```
<h3>Build the Evaluation Graph</h3>
Next, we need to build the evaluation graph.
```
sts_input1 = tf.placeholder(tf.string, shape=(None))
sts_input2 = tf.placeholder(tf.string, shape=(None))
# For evaluation we use exactly normalized rather than
# approximately normalized.
sts_encode1 = tf.nn.l2_normalize(embed(sts_input1), axis=1)
sts_encode2 = tf.nn.l2_normalize(embed(sts_input2), axis=1)
cosine_similarities = tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1)
clip_cosine_similarities = tf.clip_by_value(cosine_similarities, -1.0, 1.0)
sim_scores = 1.0 - tf.acos(clip_cosine_similarities)
```
<h3>Evaluate Sentence Embeddings</h3>
Finally, we need to create a session and run our evaluation.
```
sts_data = sts_dev #@param ["sts_dev", "sts_test"] {type:"raw"}
text_a = sts_data['sent_1'].tolist()
text_b = sts_data['sent_2'].tolist()
dev_scores = sts_data['sim'].tolist()
def run_sts_benchmark(session):
"""Returns the similarity scores"""
emba, embb, scores = session.run(
[sts_encode1, sts_encode2, sim_scores],
feed_dict={
sts_input1: text_a,
sts_input2: text_b
})
return scores
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
scores = run_sts_benchmark(session)
pearson_correlation = scipy.stats.pearsonr(scores, dev_scores)
print('Pearson correlation coefficient = {0}\np-value = {1}'.format(
pearson_correlation[0], pearson_correlation[1]))
```
<h3>Extra Credit</h3>
For extra credit, re-run this analysis with a different Hub module. Are the results different? If so, how?
<h2>Further Reading</h2>
We published a [blog post](https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html) on how bias can affect text embeddings. It's worth a read!
| github_jupyter |
```
%matplotlib notebook
from gamesopt.train import train, TrainConfig
from gamesopt.games import load_game, GameOptions, QuadraticGameConfig, GameType
from gamesopt.games.quadratic_games import make_random_matrix
from gamesopt.optimizer import load_optimizer, OptimizerOptions, OptimizerType
from gamesopt.optimizer.prox import ProxOptions, ProxType
import torch
from collections import defaultdict
import matplotlib.pyplot as plt
from gamesopt.db import Database
from pathlib import Path
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]
})
width = 487.8225
def set_size(width, fraction=1):
"""Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float
Document textwidth or columnwidth in pts
fraction: float, optional
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
# Width of figure (in pts)
fig_width_pt = width * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
# https://disq.us/p/2940ij3
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
def plot_record(record_id, *args, **kwargs):
record = db.getRecord(record_id)
plt.plot(record.metrics.num_grad, record.metrics.dist2opt, *args, **kwargs)
torch.manual_seed(1234)
options = QuadraticGameConfig(num_samples = 100, dim = 20)
m_0 = make_random_matrix(options.num_samples-50, options.num_players*options.dim, ell=1)
m_1 = make_random_matrix(50, options.num_players*options.dim, ell=10000)
options.matrix = torch.cat([m_0, m_1])
eigs = torch.linalg.eigvals(matrix)
print(eigs)
db = Database(Path("./results"))
```
# Quadratic (NoProx)
```
def create_config(optimizer: OptimizerOptions = OptimizerOptions(), importance_sampling: bool = False):
options.importance_sampling = importance_sampling
torch.manual_seed(1234)
options.matrix = matrix
game = GameOptions(game_type = GameType.QUADRATIC, quadratic_options=options)
config = TrainConfig(game = game, optimizer=optimizer, num_iter = 500)
return config
exp = db.createExp("US vs IS")
config = create_config(OptimizerOptions(optimizer_type = OptimizerType.PROX_SGDA, lr=1e-3),
importance_sampling = False)
record_us = train(config)
config = create_config(OptimizerOptions(optimizer_type = OptimizerType.PROX_SGDA, lr=2e-3),
importance_sampling = True)
record_is = train(config)
plt.figure()
plt.yscale("log")
plt.plot(record_us.metrics.num_grad, record_us.metrics.hamiltonian, label="US lr=%s"%str(config.optimizer.lr))
plt.plot(record_is.metrics.num_grad, record_is.metrics.hamiltonian, label="IS lr=%s"%str(config.optimizer.lr))
plt.legend()
```
# Quadratic with Prox
```
PATH = Path("./results/quadratic_prox")
options.importance_sampling = False
game_conf = GameOptions(game_type = GameType.QUADRATIC, quadratic_options=options)
prox = ProxOptions(prox_type=ProxType.LINF_BALL_L1_REG, ball_radius = 1e-1, l1_reg = 1e-1)
optimizer = OptimizerOptions(optimizer_type = OptimizerType.PROX_SGDA, lr=2e-3, full_batch=True)
config = TrainConfig(game = game_conf, prox=prox, optimizer=optimizer, num_iter = 100, save_file=PATH)
record = train(config)
game = load_game(config.game)
game.load(PATH)
print(game.players)
plt.figure()
plt.plot(record.metrics.prox_dist)
plt.plot(record.metrics.hamiltonian)
plt.yscale("log")
plt.figure()
plt.yscale("log")
exp = db.createExp("US vs IS")
optimizer = OptimizerOptions(optimizer_type = OptimizerType.PROX_SGDA, lr=2e-4)
config = TrainConfig(game = game_conf, prox=prox, optimizer=optimizer, num_iter = 1000, load_file=PATH)
config.game.quadratic_options.importance_sampling = False
record = exp.create_record()
train(config, record)
optimizer = OptimizerOptions(optimizer_type = OptimizerType.PROX_SGDA, lr=2e-4)
config = TrainConfig(game = game_conf, prox=prox, optimizer=optimizer, num_iter = 1000, load_file=PATH)
config.game.quadratic_options.importance_sampling = True
record = exp.create_record()
train(config, record)
plt.figure(figsize=set_size(width))
plot_record("f6b3c917-9906-4427-a3c5-645bcba959c5", label="SGDA with IS", marker="s", markevery=10)
plot_record("e38b9c26-2d20-4532-a13e-60de4b84f9e4", label="SGDA", marker="o", markevery=10)
plt.yscale("log")
plt.xlim(0, 200)
plt.xlabel("Number of oracles call")
plt.ylabel("Distance to optimality")
plt.grid()
plt.legend()
plt.savefig('figures/quadratic_prox_us_vs_is.pdf', bbox_inches='tight')
```
| github_jupyter |
# <center><font color='MAROON'>Traffic Sign Recognition Classifier</font></center>
```
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tqdm import tqdm
import seaborn as sns
import numpy as np
import os
import math
import keras
import wandb
```
## <font color='crimson'>Importing GTSRB - German Traffic Sign Recognition Benchmark Datasets</font>
<font color='maroon'>Dataset for this project is available at:</font>
https://www.kaggle.com/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign
```
current_path = os.getcwd()
print(current_path)
classes = 43
classes_name = ['Speed limit (20km/h)',
'Speed limit (30km/h)',
'Speed limit (50km/h)',
'Speed limit (60km/h)',
'Speed limit (70km/h)',
'Speed limit (80km/h)',
'End of speed limit (80km/h)',
'Speed limit (100km/h)',
'Speed limit (120km/h)',
'No passing',
'No passing veh over 3.5 tons',
'Right-of-way at intersection',
'Priority road',
'Yield',
'Stop',
'No vehicles',
'Veh > 3.5 tons prohibited',
'No entry',
'General caution',
'Dangerous curve left',
'Dangerous curve right',
'Double curve',
'Bumpy road',
'Slippery road',
'Road narrows on the right',
'Road work',
'Traffic signals',
'Pedestrians',
'Children crossing',
'Bicycles crossing',
'Beware of ice/snow',
'Wild animals crossing',
'End speed + passing limits',
'Turn right ahead',
'Turn left ahead',
'Ahead only',
'Go straight or right',
'Go straight or left',
'Keep right',
'Keep left',
'Roundabout mandatory',
'End of no passing',
'End no passing veh > 3.5 tons'
]
def ImageReader(define_path):
data = []
path = os.path.join(current_path,'gtsrb-german-traffic-sign',define_path)
images = os.listdir(path)
for traffic_sign in tqdm(images):
try:
image = Image.open(path + '\\'+ traffic_sign)
image = image.resize((32,32))
image = np.array(image)
data.append(image)
except:
print("Error Loading Image!")
return data
test_images = ImageReader('test')
X_test = np.array(test_images)
print(" X_test \n{0} ".format(X_test.shape))
```
## <font color='crimson'>Contrast Limited Adaptive Histogram Equalization (CLAHE)</font>
---
<font color='maroon'>An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image.</font>
https://scikit-image.org/docs/dev/api/skimage.exposure.html#skimage.exposure.equalize_adapthist
<font color='maroon'>Clipping limit, normalized between 0 and 1 (higher values give more contrast).</font>
---
```
from skimage import data, img_as_float
from skimage import exposure
# Function that applies normalization and local contrast enhancement
def normalize(image_data):
"""An algorithm for local contrast enhancement, that uses histograms computed over
different tile regions of the image. Local details can therefore be enhanced even in
regions that are darker or lighter than most of the image."""
norm = np.array([exposure.equalize_adapthist(image, clip_limit=0.1) for image in tqdm(image_data)])
return norm
# Run the normalization process
print('\nNormalizing Test Images...')
X_test_norm = normalize(X_test)
print('\nNormaliztion Complete.')
```
# <center><font color='MAROON'>Model Architecture</font></center>
---
## <font color='DARKMAGENTA'>1. LeNet-5</font>
https://app.wandb.ai/junth/traffic-sign-recognition-classifier/runs/sfserq93?workspace=user-junth
## <font color='DARKMAGENTA'>2. LeNet-5 + Contrast Enhancement</font>
https://app.wandb.ai/junth/traffic-sign-recognition-classifier/runs/kcebxhfe?workspace=user-junth
## <font color='DARKMAGENTA'>3. LeNet-5 + Contrast Enhancement + Augmentation(3000)</font>
https://app.wandb.ai/junth/traffic-sign-recognition-classifier/runs/qc0c461b?workspace=user-junth
## <font color='DARKMAGENTA'>4. Deep LeNet-5 + Contrast Enhancement + Augmentation(3000)</font>
https://app.wandb.ai/junth/traffic-sign-recognition-classifier/runs/zllcbd7m?workspace=user-junth
## <font color='DARKMAGENTA'>5. Deep LeNet-5 + Contrast Enhancement + Augmentation(4500) + Regularization</font>
https://app.wandb.ai/junth/traffic-sign-recognition-classifier/runs/2atxo1cf?workspace=user-junth
---
<font color='maroon'>Saved model is also available at:</font>
https://github.com/Junth19/Traffic-Signs-Recognition-System
---
## <font color='crimson'>Loading (Deep LeNet-5 + Contrast Enhancement + Augmentation(4500) + Regularization) saved model</font>
```
from keras.models import load_model
model = load_model('./Models/(5) Deep LeNet-5 + Contrast Enhancement + Augmentation(4500) + Regularization/DeepLeNet-5_CLAHE_AUG(v2).h5')
model.summary()
```
## <font color='crimson'>Calculating Test Accuracy</font>
```
from keras.utils import to_categorical
path = os.path.join(current_path,'gtsrb-german-traffic-sign','Test.csv')
data = pd.read_csv(path)
X_test = X_test_norm.astype('float32')
y_test = to_categorical(data['ClassId'].values)
test_loss, test_acc = model.evaluate(X_test, y_test)
print(test_acc)
```
## <font color='crimson'>Make predictions and explore </font>
<font color='maroon'>With the model trained, we can use it to make predictions about some images.</font>
```
test_images = X_test
a = y_test
test_labels = [np.where(r==1)[0][0] for r in a]
predictions = model.predict(test_images)
print(predictions.shape)
```
## <font color='crimson'>Precision, Recall and F1-Score</font>
```
from sklearn.metrics import classification_report
y_pred = []
y_true = test_labels
for i in range(len(X_test)):
y_pred.append(np.argmax(predictions[i]))
print(classification_report(y_true, y_pred, target_names=classes_name))
```
## <font color='crimson'>Confusion Matrix </font>
```
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(20, 20))
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.hot):
ax.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('ConfusionMatrix.png', dpi = 300)
confusionMatrix = confusion_matrix(y_true, y_pred)
plot_confusion_matrix(confusionMatrix, classes=classes_name,
title='Confusion Matrix')
```
<font color='maroon'>Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:</font>
```
print(predictions[0])
```
<font color='maroon'>A prediction is an array of 43 numbers. These describe the "confidence" of the model that the image corresponds to each of the 43 different traffic road signs. We can see which label has the highest confidence value:</font>
```
np.argmax(predictions[0])
```
<font color='maroon'>So the model is most confident that this image is a </font>`Veh > 3.5 tons prohibited`or `classes_name[16]`
```
test_labels[0]
classes_name[16]
```
## <font color='crimson'>Plot several images with their predictions (Softmax Probability)</font>
---
```
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(classes_name[predicted_label],
100*np.max(predictions_array),
classes_name[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(43), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 212
plt.figure(figsize=(16,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.savefig('PredictionHistogram10.png', dpi = 300)
i = 1
plt.figure(figsize=(16,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
```
---
<font color='maroon'>Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label.</font>
---
```
num_rows = 15
num_cols = 1
num_images = num_rows * num_cols
plt.figure(figsize=(4 * 4 * num_cols, 3 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions, test_labels)
```
| github_jupyter |
# Expression of P(G_n) in terms of M_n and M_{n+2}
In this notebook we'll validate the expression for P(G_n) in terms of r,
b, and M_n and M_{n+2}
```
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as pl
%matplotlib notebook
epsabs = 1e-12
epsrel = 1e-12
```
Here is the original expression for P(G_n) as defined in the text:
```
def P_of_Gn(r, b, n):
if (1 + r <= b):
return 0
elif abs(1 - r) < b and b <= 1 + r:
k2 = (1 - (b- r) ** 2) / (4*b*r)
kappa_half = np.arcsin(np.sqrt(k2))
func = lambda x: np.sqrt(k2-np.sin(x)**2)**n *(r-b+2*b*np.sin(x)**2)
res, err = quad(func,-kappa_half,kappa_half,epsabs=epsabs,epsrel=epsrel)
return 2*r*np.sqrt(4*b*r)**n*res
elif b <= 1 - r:
k2 = (1 - (b- r) ** 2) / (4*b*r)
func = lambda x: np.sqrt(k2-np.sin(x)**2)**n *(r-b+2*b*np.sin(x)**2)
res, err = quad(func,-np.pi/2,np.pi/2,epsabs=epsabs,epsrel=epsrel)
return 2*r*np.sqrt(4*b*r)**n*res
elif b <= r - 1:
return 0
else:
raise NotImplementedError("Missing case!")
```
And here is the expression for the integral M_n:
```
def M_n(r, b, n):
if (1 + r <= b):
return 0
elif abs(1 - r) < b and b <= 1 + r:
k2 = (1 - (b- r) ** 2) / (4*b*r)
kappa_half = np.arcsin(np.sqrt(k2))
func = lambda x: np.sqrt(k2-np.sin(x)**2)**n
res, err = quad(func,-kappa_half,kappa_half,epsabs=epsabs,epsrel=epsrel)
return np.sqrt(4*b*r)**n*res
elif b <= 1 - r:
k2 = (1 - (b- r) ** 2) / (4*b*r)
func = lambda x: np.sqrt(k2-np.sin(x)**2)**n
res, err = quad(func,-np.pi/2,np.pi/2,epsabs=epsabs,epsrel=epsrel)
return np.sqrt(4*b*r)**n*res
elif b <= r - 1:
return 0
else:
raise NotImplementedError("Missing case!")
```
Let's show that the expressions yield the same answer (to within the error of the numerical method) over a large range of $b$ and $r$ values:
```
n = 50
barr = np.linspace(1e-4, 2, n)
rarr = np.linspace(1e-4, 2, n)
diff = np.zeros((n, n))
Nu_grid = np.linspace(3,4,2)
# Nu is the order of the integral:
for k, Nu in enumerate(Nu_grid):
for i, b in enumerate(barr):
for j, r in enumerate(rarr):
diff[j, i] = abs(P_of_Gn(r, b, Nu) - (1+r**2-b**2)*M_n(r,b,Nu)+M_n(r,b,Nu+2))
fig = pl.figure(figsize=(10, 8))
pl.imshow(diff, origin="lower", extent=(0, 2, 0, 2))
pl.xlabel("Impact parameter", fontsize=16)
pl.ylabel("Radius ratio", fontsize=16)
cb = pl.colorbar()
cb.ax.set_ylabel("Difference", fontsize=16);
```
The difference is close to machine epsilon everywhere, so our expression is correct.
| github_jupyter |
```
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Bert Pipeline : PyTorch BERT News Classfication
This notebook shows PyTorch BERT end-to-end news classification example using Kubeflow Pipelines.
An example notebook that demonstrates how to:
* Get different tasks needed for the pipeline
* Create a Kubeflow pipeline
* Include Pytorch KFP components to preprocess, train, visualize and deploy the model in the pipeline
* Submit a job for execution
* Query(prediction and explain) the final deployed model
* Interpretation of the model using the Captum Insights
```
! pip uninstall -y kfp
! pip install --no-cache-dir kfp
import kfp
import json
import os
from kfp.onprem import use_k8s_secret
from kfp import components
from kfp.components import load_component_from_file, load_component_from_url
from kfp import dsl
from kfp import compiler
kfp.__version__
```
# Enter your gateway and the cookie
[Use this extension on chrome to get token]( https://chrome.google.com/webstore/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg?hl=en)

## Update values for the ingress gateway and auth session
```
INGRESS_GATEWAY='http://istio-ingressgateway.istio-system.svc.cluster.local'
AUTH="<enter your token here>"
NAMESPACE="kubeflow-user-example-com"
COOKIE="authservice_session="+AUTH
EXPERIMENT="Default"
```
## Set Log bucket and Tensorboard Image
```
MINIO_ENDPOINT="http://minio-service.kubeflow:9000"
LOG_BUCKET="mlpipeline"
TENSORBOARD_IMAGE="public.ecr.aws/pytorch-samples/tboard:latest"
client = kfp.Client(host=INGRESS_GATEWAY+"/pipeline", cookies=COOKIE)
client.create_experiment(EXPERIMENT)
experiments = client.list_experiments(namespace=NAMESPACE)
my_experiment = experiments.experiments[0]
my_experiment
```
## Set Inference parameters
```
DEPLOY_NAME="bertserve"
MODEL_NAME="bert"
! python utils/generate_templates.py bert/template_mapping.json
prepare_tensorboard_op = load_component_from_file("yaml/tensorboard_component.yaml")
prep_op = components.load_component_from_file(
"yaml/preprocess_component.yaml"
)
train_op = components.load_component_from_file(
"yaml/train_component.yaml"
)
deploy_op = load_component_from_file("yaml/deploy_component.yaml")
minio_op = components.load_component_from_file(
"yaml/minio_component.yaml"
)
```
## Define pipeline
```
@dsl.pipeline(name="Training pipeline", description="Sample training job test")
def pytorch_bert( # pylint: disable=too-many-arguments
minio_endpoint=MINIO_ENDPOINT,
log_bucket=LOG_BUCKET,
log_dir=f"tensorboard/logs/{dsl.RUN_ID_PLACEHOLDER}",
mar_path=f"mar/{dsl.RUN_ID_PLACEHOLDER}/model-store",
config_prop_path=f"mar/{dsl.RUN_ID_PLACEHOLDER}/config",
model_uri=f"s3://mlpipeline/mar/{dsl.RUN_ID_PLACEHOLDER}",
tf_image=TENSORBOARD_IMAGE,
deploy=DEPLOY_NAME,
namespace=NAMESPACE,
confusion_matrix_log_dir=f"confusion_matrix/{dsl.RUN_ID_PLACEHOLDER}/",
num_samples=1000,
max_epochs=1
):
"""Thid method defines the pipeline tasks and operations"""
prepare_tb_task = prepare_tensorboard_op(
log_dir_uri=f"s3://{log_bucket}/{log_dir}",
image=tf_image,
pod_template_spec=json.dumps({
"spec": {
"containers": [{
"env": [
{
"name": "AWS_ACCESS_KEY_ID",
"valueFrom": {
"secretKeyRef": {
"name": "mlpipeline-minio-artifact",
"key": "accesskey",
}
},
},
{
"name": "AWS_SECRET_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "mlpipeline-minio-artifact",
"key": "secretkey",
}
},
},
{
"name": "AWS_REGION",
"value": "minio"
},
{
"name": "S3_ENDPOINT",
"value": f"{minio_endpoint}",
},
{
"name": "S3_USE_HTTPS",
"value": "0"
},
{
"name": "S3_VERIFY_SSL",
"value": "0"
},
]
}]
}
}),
).set_display_name("Visualization")
prep_task = (
prep_op().after(prepare_tb_task
).set_display_name("Preprocess & Transform")
)
confusion_matrix_url = f"minio://{log_bucket}/{confusion_matrix_log_dir}"
script_args = f"model_name=bert.pth," \
f"num_samples={num_samples}," \
f"confusion_matrix_url={confusion_matrix_url}"
# For GPU , set gpus count and accelerator type
ptl_args = f"max_epochs={max_epochs},profiler=pytorch,gpus=0,accelerator=None"
train_task = (
train_op(
input_data=prep_task.outputs["output_data"],
script_args=script_args,
ptl_arguments=ptl_args
).after(prep_task).set_display_name("Training")
)
# For GPU uncomment below line and set GPU limit and node selector
# ).set_gpu_limit(1).add_node_selector_constraint
# ('cloud.google.com/gke-accelerator','nvidia-tesla-p4')
(
minio_op(
bucket_name="mlpipeline",
folder_name=log_dir,
input_path=train_task.outputs["tensorboard_root"],
filename="",
).after(train_task).set_display_name("Tensorboard Events Pusher")
)
minio_mar_upload = (
minio_op(
bucket_name="mlpipeline",
folder_name=mar_path,
input_path=train_task.outputs["checkpoint_dir"],
filename="bert_test.mar",
).after(train_task).set_display_name("Mar Pusher")
)
(
minio_op(
bucket_name="mlpipeline",
folder_name=config_prop_path,
input_path=train_task.outputs["checkpoint_dir"],
filename="config.properties",
).after(train_task).set_display_name("Conifg Pusher")
)
model_uri = str(model_uri)
# pylint: disable=unused-variable
isvc_yaml = """
apiVersion: "serving.kubeflow.org/v1beta1"
kind: "InferenceService"
metadata:
name: {}
namespace: {}
spec:
predictor:
serviceAccountName: sa
pytorch:
storageUri: {}
resources:
requests:
cpu: 4
memory: 8Gi
limits:
cpu: 4
memory: 8Gi
""".format(deploy, namespace, model_uri)
# For GPU inference use below yaml with gpu count and accelerator
gpu_count = "1"
accelerator = "nvidia-tesla-p4"
isvc_gpu_yaml = """
apiVersion: "serving.kubeflow.org/v1beta1"
kind: "InferenceService"
metadata:
name: {}
namespace: {}
spec:
predictor:
serviceAccountName: sa
pytorch:
storageUri: {}
resources:
requests:
cpu: 4
memory: 8Gi
limits:
cpu: 4
memory: 8Gi
nvidia.com/gpu: {}
nodeSelector:
cloud.google.com/gke-accelerator: {}
""".format(deploy, namespace, model_uri, gpu_count, accelerator)
# Update inferenceservice_yaml for GPU inference
deploy_task = (
deploy_op(action="apply", inferenceservice_yaml=isvc_yaml
).after(minio_mar_upload).set_display_name("Deployer")
)
dsl.get_pipeline_conf().add_op_transformer(
use_k8s_secret(
secret_name="mlpipeline-minio-artifact",
k8s_secret_key_to_env={
"secretkey": "MINIO_SECRET_KEY",
"accesskey": "MINIO_ACCESS_KEY",
},
)
)
# Compile pipeline
compiler.Compiler().compile(pytorch_bert, 'pytorch.tar.gz', type_check=True)
# Execute pipeline
run = client.run_pipeline(my_experiment.id, 'pytorch-bert', 'pytorch.tar.gz')
```
## Wait for inference service below to go to `READY True` state.
```
!kubectl get isvc $DEPLOY
```
# Get Inferenceservice name
```
INFERENCE_SERVICE_LIST = ! kubectl get isvc {DEPLOY_NAME} -n {NAMESPACE} -o json | python3 -c "import sys, json; print(json.load(sys.stdin)['status']['url'])"| tr -d '"' | cut -d "/" -f 3
INFERENCE_SERVICE_NAME = INFERENCE_SERVICE_LIST[0]
INFERENCE_SERVICE_NAME
```
# Prediction Request
```
!curl -v -H "Host: $INFERENCE_SERVICE_NAME" -H "Cookie: $COOKIE" "$INGRESS_GATEWAY/v1/models/$MODEL_NAME:predict" -d @./bert/sample.txt > bert_prediction_output.json
! cat bert_prediction_output.json
```
# Explanation Request
```
!curl -v -H "Host: $INFERENCE_SERVICE_NAME" -H "Cookie: $COOKIE" "$INGRESS_GATEWAY/v1/models/$MODEL_NAME:explain" -d @./bert/sample.txt > bert_explaination_output.json
! cat bert_explaination_output.json
explanations_json = json.loads(open("./bert_explaination_output.json", "r").read())
explanations_json
prediction_json = json.loads(open("./bert_prediction_output.json", "r").read())
import torch
attributions = explanations_json["explanations"][0]['importances']
tokens = explanations_json["explanations"][0]['words']
delta = explanations_json["explanations"][0]['delta']
attributions = torch.tensor(attributions)
pred_prob = 0.75
pred_class = prediction_json["predictions"][0]
true_class = "Business"
attr_class ="world"
```
# Visualization of Predictions
```
from captum.attr import visualization
vis_data_records =[]
vis_data_records.append(visualization.VisualizationDataRecord(
attributions,
pred_prob,
pred_class,
true_class,
attr_class,
attributions.sum(),
tokens,
delta))
vis = visualization.visualize_text(vis_data_records)
```
### visualization appreas as below

## Cleanup Script
```
! kubectl delete --all isvc -n $NAMESPACE
! kubectl delete pod --field-selector=status.phase==Succeeded -n $NAMESPACE
```
| github_jupyter |
# Assignment 2
##  Before you start working on this assignment please click File -> Save a Copy in Drive.
Before you turn this problem in, make sure everything runs as expected. First, restart the kernel (in the menubar, select Kernel → Restart) and then run all cells (in the menubar, select Cell → Run All). You can speak with others regarding the assignment but all work must be your own.
### This is a 30 point assignment.
**You may find it useful to go through the notebooks from the course materials when doing these exercises.**
**If you attempt to fake passing the tests you will receive a 0 on the assignment and it will be considered an ethical violation.**
```
files = "https://github.com/rpi-techfundamentals/introml_website_fall_2020/raw/master/files/assignment2.zip"
!pip install otter-grader && wget $files && unzip -o assignment2.zip
#Run this. It initiates autograding.
import otter
grader = otter.Notebook()
```
## Exercise-Packages
This creates an Numpy array. Numpy is a common package that we will use to work with arrays. You can read more about Numpy [here](http://www.numpy.org/).
```
a = np.array([2,3,4])
print(a)
```
To get this to work, you will have to make sure that the numpy(np) package is installed.
(1) Verify that Numpy is installed. How did you know?
Describe how you would you install it if it wasn't installed?
```
man1="""
Enter your answer here.
"""
```
(2) Fix the cell below so that `a` is a `numpy` array.
```
#Fix this code of q2.
a = [5,6,7,8]
print(a, type(a))
grader.check('q02')
```
(3) Create a numpy array `b` with the values `12, 13, 14, 15`.
```
#<insert q3 code here>
grader.check('q03')
```
## Exercise - Operations on Variables
(4) Describe what happens when you multiply an integer times a boolean?
What is the resulting type? Provide examples.
```
#You must assign your answer to q4_answer.
man4="""
Enter your answer here.
"""
```
(5) Describe happens when you try to multiply an integer value times a null?
```
man5="""
Enter your answer here.
"""
```
(6) Take 5 to the power of 4 and assign it to a variable `c`. Then transform the variable `c` to a type `float`.
```
#<insert q6 code here>
grader.check('q06')
```
## Exercise-Lists
Hint: [This link is useful.](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists) as is the process of tab completion (using tab to find available methods of an object).
(7) Create a list `elist1` with the following values `1,2,3,4,5`.<br>
```
#<insert q7 code here>
grader.check('q07')
```
(8) Create a new list `elist2` by first creating a copy of `elist1` and then reversing the order.
*HINT, remember there is a specific function to copy a list.*
```
#<insert q8 code here>
grader.check('q08')
```
(9) Create a new list `elist3` by first creating a copy of `elist1` and then adding `7, 8, 9` to the end. (Hint: Search for a different function if appending doesn't work.)
```
#<insert q9 code here>
grader.check('q09')
```
(10) Create a new list `elist4` by first creating a copy of `elist3` and then insert `6` between `5` and `7`.
```
#<insert q10 code here>
grader.check('q10')
```
## Exercise-Sets/Dictionary
This [link to documentation on sets](https://docs.python.org/3/tutorial/datastructures.html#sets) may be useful.
(11) Create a set `eset1` with the following values (1,2,3,4,5).
```
#<insert q11 code here>
grader.check('q11')
```
(12) Create a new set `eset2` the following values (1,3,6).
```
#<insert q12 code here>
grader.check('q12')
```
(13) Create a new set `eset3` that is `eset1-eset2`.
```
#<insert q13 code here>
grader.check('q13')
```
(14) Create a new set `eset4` that is the union of `eset1+eset2`.
```
#<insert q14 code here>
grader.check('q14')
```
(15) Create a new set `eset5` that includes values that are in both `eset1` and `eset2` (intersection).
```
#<insert q15 code here>
grader.check('q15')
```
(16) Create a new dict `edict1` with the following keys and associated values: st1=45; st2=32; st3=40; st4=31.
*Hint: There is a good section on dictionaries [here](https://docs.python.org/3/tutorial/datastructures.html#dictionaries).
```
#<insert q16 code here>
grader.check('q16')
```
(17) Create a new variable `key1` where the value is equal to the value of dictionary edict1 with key `st3`.
```
#<insert q17 code here>
grader.check('q17')
```
## Exercise-Numpy Array
(18) Create a new numpy array `nparray1` that is 3x3 and all the number 3 (should be integer type).
```
#<insert q18 code here>
grader.check('q18')
```
(19) Create a new variable `nparray1sum` that sums all of column 0.
```
#<insert q19 code here>
grader.check('q19')
```
(20) Create a new variable `nparray1mean` that takes the average of column 0.
```
#<insert q20 code here>
grader.check('q20')
```
(21) Create a new numpy array `nparray2` that selects only column 1 of `nparray1` (all rows).
```
#<insert q21 code here>
grader.check('q21')
```
(22) Create a new numpy array `nparray3` that is equal to `nparray1` times `2` (you should not alter `nparray1`).
```
#<insert q22 code here>
grader.check('q22')
```
(23) Create a new numpy array nparray4 that is a verticle stack of `nparray1` and `nparray3`.
```
#<insert q23 code here>
grader.check('q23')
```
## Exercise-Pandas
For these you will need to import the iris dataset. You should find the file `iris.csv` in the main directory.
While we showed 2 ways of importing a csv, you should use the `read_csv` method of Pandas to load the csv into a dataframe called `df`.
You can fine the IRIS dataset here:
[https://raw.githubusercontent.com/rpi-techfundamentals/introml_website_fall_2020/master/files/iris.csv](https://raw.githubusercontent.com/rpi-techfundamentals/introml_website_fall_2020/master/files/iris.csv)
```
#Load iris.csv into a Pandas dataframe df here.
#Check out the first few rows with the head command.
```
(24) Create a variable `df_rows` that includes the number of rows in the `df` dataframe.
```
#<insert q24 code here>
grader.check('q24')
```
(25) Create a new dataframe `df_train` that includes the first half of the `df` dataframe. Create a new dataframe `df_test` that includes the second half.
```
#<insert q25 code here>
grader.check('q25')
```
(26) Create a new Pandas Series `sepal_length` from the `sepal_length` column of the df dataframe.
```
#<insert q26 code here>
grader.check('q26')
```
(27) Using, the Iris dataset, find the mean of the `sepal_length` series in our sample and assign it to the `sepal_length_mean` variable. You should round the result to 3 digits after the decimal.
```
#Round example
a=99.9999999999999
#For example, the following will round a to 2 digits.
b = round(a,2)
```
```
#<insert q27 code here>
grader.check('q27')
```
## MAKE SURE THAT THIS ENTIRE NOTEBOOK RUNS WITHOUT ERRORS. TO TEST THIS DO RUNTIME --> RESTART AND RUN ALL
It should run without errors.
### Click File -> Download .ipynb to download the assignment. Then Upload it to the LMS.
| github_jupyter |
```
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Lambda, Conv2DTranspose, SeparableConv2D
from dataset import create_artifact_dataset
import matplotlib.pyplot as plt
%matplotlib inline
"""from tensorflow.keras.backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess)"""
def get_ARCNN(input_shape=(32,32,1)):
inp = Input(shape=input_shape)
conv1 = Conv2D(64,9,activation='relu', padding='same', use_bias=True,name="Feature_extract")(inp)
conv2 = Conv2D(32,7,activation='relu', padding='same', use_bias=True,name="Feature_Enhance")(conv1)
conv3 = Conv2D(64,1,activation='relu', padding='valid', use_bias=True,name="Mapping")(conv2)
conv_trans = Conv2DTranspose(1,7,padding='same')(conv3)
ARCNN = Model(inputs=inp,outputs=conv_trans,name="ARCNN")
return ARCNN
def get_Fast_ARCNN(input_shape=(32,32,1)):
inp = Input(shape=input_shape)
conv1 = Conv2D(64,9,activation='relu', padding='same', use_bias=True,name="Feature_extract")(inp)
conv2 = Conv2D(32,1,activation='relu', padding='valid', use_bias=True,name="Feature_Enhance_speed")(conv1)
conv3 = Conv2D(32,7,activation='relu', padding='same', use_bias=True,name="Feature_Enhance")(conv2)
conv4 = Conv2D(64,1,activation='relu', padding='valid', use_bias=True,name="Mapping")(conv3)
conv_trans = Conv2DTranspose(1,7,padding='same')(conv4)
ARCNN = Model(inputs=inp,outputs=conv_trans,name="Faster_ARCNN")
return ARCNN
def get_ARCNN_lite(input_shape=(32,32,1)):
inp = Input(shape=input_shape)
conv1 = Conv2D(32,5,dilation_rate=4,activation='relu', padding='same', use_bias=True,name="Feature_extract")(inp)
conv2 = Conv2D(32,1,activation='relu', padding='valid', use_bias=True,name="Feature_Enhance_speed")(conv1)
conv3 = Conv2D(32,5,dilation_rate=2,activation='relu', padding='same', use_bias=True,name="Feature_Enhance")(conv2)
conv4 = Conv2D(32,1,activation='relu', padding='valid', use_bias=True,name="Mapping")(conv3)
conv_trans = Conv2DTranspose(1,3,dilation_rate=4,name="Upscale",padding='same')(conv4)
ARCNN = Model(inputs=inp,outputs=conv_trans)
return ARCNN
ARCNN = get_ARCNN()
ARCNN.summary()
Faster_ARCNN = get_Fast_ARCNN()
Faster_ARCNN.summary()
ARCNN_lite = get_ARCNN_lite()
ARCNN_lite.summary()
data = create_artifact_dataset()
data = data.prefetch(tf.data.experimental.AUTOTUNE)
optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4)
ARCNN_v1.compile(optimizer=optimizer,loss=tf.keras.losses.MeanSquaredError(),metrics=[ssim,psnr])
tboard = tf.keras.callbacks.TensorBoard(log_dir="./logs/no_batch",write_images=True)
es = tf.keras.callbacks.EarlyStoppinag()
ARCNN_v1.fit(data,
epochs=2,
callbacks=[tboard,es])
```
| github_jupyter |
```
#default_exp radar.config_v1
```
# radar.config_v1
The TI 1443/1843 radar firmware accepts a some commands over the serial port to configure the radar waveform.
This module parses these commands so that we can interperet and process the raw ADC readings correctly.
Futher details on this commands can be found in the [mmWave SDK user guide](https://www.ti.com/tool/MMWAVE-SDK).
*Note:* We do not process all commands, but only command that affect the ADC data. The 1843 supports addtional commands for onboard processing. All these are ignored.
```
#export
import logging
logger = logging.getLogger()
#export
def read_radar_params(filename):
"""Reads a text file containing serial commands and returns parsed config as a dictionary"""
with open(filename) as cfg:
iwr_cmds = cfg.readlines()
iwr_cmds = [x.strip() for x in iwr_cmds]
radar_cfg = parse_commands(iwr_cmds)
logger.debug(radar_cfg)
return radar_cfg
def parse_commands(commands):
"""Calls the corresponding parser for each command in commands list"""
cfg = None
for line in commands:
try:
cmd = line.split()[0]
args = line.split()[1:]
cfg = command_handlers[cmd](args, cfg)
except KeyError:
logger.debug(f'{cmd} is not handled')
except IndexError:
logger.debug(f'line is empty "{line}"')
return cfg
def dict_to_list(cfg):
"""Generates commands from config dictionary"""
cfg_list = ['flushCfg','dfeDataOutputMode 1']
# rx antennas/lanes for channel config
rx_bool = [cfg['rx4'], cfg['rx3'], cfg['rx2'], cfg['rx1']]
rx_mask = sum(2 ** i for i, v in enumerate(reversed(rx_bool)) if v)
# number of tx antennas for channel config
tx_bool = [cfg['tx3'], cfg['tx2'], cfg['tx1']]
tx_mask = sum(2 ** i for i, v in enumerate(reversed(tx_bool)) if v)
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) is False else 0
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is False and (cfg['tx1'] or cfg['tx3']) is True else 0
#print('[NOTE] Elevation and Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) else 0
cfg_list.append('channelCfg %s %s 0' % (rx_mask, tx_mask)) # rx and tx mask
# adc config
if cfg['isComplex'] and cfg['image_band']:
outputFmt = 2
#print('[NOTE] Complex 2x mode, both Imaginary and Real IF spectrum is filtered and sent to ADC, so\n'
# ' if Sampling rate is X, ADC data would include frequency spectrum from -X/2 to X/2.')
elif cfg['isComplex'] and not cfg['image_band'] == True:
outputFmt = 1
#print('[NOTE] Complex 1x mode, Only Real IF Spectrum is filtered and sent to ADC, so if Sampling rate\n'
# ' is X, ADC data would include frequency spectrum from 0 to X.')
else: raise ValueError("Real Data Type Not Supported")
cfg_list.append('adcCfg 2 %s' % outputFmt) # 16 bits (mandatory), complex 1x or 2x
# adc power
if cfg['adcPower'] =='low':
power_mode = 1
#print('[NOTE] The Low power ADC mode limits the sampling rate to half the max value.')
elif cfg['adcPower'] =='regular': power_mode = 0
else: raise ValueError("ADC power level Not Supported")
cfg_list.append('lowPower 0 %s' % power_mode) # power mode
# profile configs
for profile_ii in cfg['profiles']:
cfg_list.append('profileCfg %s %s %s %s %s %s %s %s %s %s %s %s %s %s'
% (profile_ii['id'],
float(profile_ii['start_frequency']/1e9),
float(profile_ii['idle']/1e-6),
float(profile_ii['adcStartTime']/1e-6),
float(profile_ii['rampEndTime']/1e-6),
int(profile_ii['txPower']),
int(profile_ii['txPhaseShift']),
float(profile_ii['freqSlopeConst']/1e12),
float(profile_ii['txStartTime']/1e-6),
int(profile_ii['adcSamples']),
int(profile_ii['adcSampleRate']/1e3),
int(profile_ii['hpfCornerFreq1']),
int(profile_ii['hpfCornerFreq2']),
int(profile_ii['rxGain'])))
# chirp configs
for chirp_ii in cfg['chirps']:
# Check if chirp is referring to valid profile config
profile_valid = False
for profile_ii in cfg['profiles']:
if chirp_ii['profileID'] == profile_ii['id']: profile_valid = True
if profile_valid is False: raise ValueError("The following profile id used in chirp "
"is invalid: %i" % chirp_ii['profileID'])
###############################################################################################################
'''
# check if tx values are valid
if hamming([chirp_ii['chirptx3'],chirp_ii['chirptx2'],chirp_ii['chirptx1']],
[cfg['tx3'], cfg['tx2'], cfg['tx1']])*3 > 1:
raise ValueError("Chirp should have at most one different Tx than channel cfg")
'''
###############################################################################################################
if chirp_ii['chirpStartIndex'] > chirp_ii['chirpStopIndex']: raise ValueError("Particular chirp start index after chirp stop index")
tx_bool = [chirp_ii['chirptx3'],chirp_ii['chirptx2'],chirp_ii['chirptx1']]
tx_mask = sum(2 ** i for i, v in enumerate(reversed(tx_bool)) if v)
cfg_list.append('chirpCfg %s %s %s %s %s %s %s %s'
% (chirp_ii['chirpStartIndex'],
chirp_ii['chirpStopIndex'],
chirp_ii['profileID'],
chirp_ii['startFreqVariation'],
chirp_ii['slopeVariation'],
chirp_ii['idleVariation'],
chirp_ii['adcStartVariation'],
tx_mask))
# frame config
chirpStop = 0
chirpStart = 511 # max value for chirp start index
for chirp_ii in cfg['chirps']:
chirpStop = max(chirpStop, chirp_ii['chirpStopIndex'])
chirpStart = min(chirpStart,chirp_ii['chirpStartIndex'])
chirps_len = chirpStop + 1
numLoops = cfg['numChirps']/chirps_len
if chirpStart > chirpStop: raise ValueError("Chirp(s) start index is after chirp stop index")
if numLoops % 1 != 0: raise ValueError("Number of loops is not integer")
if numLoops > 255 or numLoops < 1: raise ValueError("Number of loops must be int in [1,255]")
numFrames = cfg['numFrames'] if 'numFrames' in cfg.keys() else 0 # if zero => inf
cfg_list.append('frameCfg %s %s %s %s %s 1 0'
% (chirpStart, chirpStop, int(numLoops), numFrames, 1000/cfg['fps']))
cfg_list.append('testFmkCfg 0 0 0 1')
cfg_list.append('setProfileCfg disable ADC disable')
return cfg_list
def channelStr_to_dict(args, curr_cfg=None):
"""Handler for `channelcfg`"""
if curr_cfg:
cfg = curr_cfg
else:
cfg = {}
# This is the number of receivers which is equivalent to the number of lanes in the source code
# Later, may include the result from the number of transmitters
rx_bin = bin(int(args[0]))[2:].zfill(4)
cfg['numLanes'] = len([ones for ones in rx_bin if ones == '1'])
(cfg['rx4'],cfg['rx3'],cfg['rx2'],cfg['rx1']) = [bool(int(ones)) for ones in rx_bin]
# This is the number of transmitters
tx_bin = bin(int(args[1]))[2:].zfill(3)
cfg['numTx'] = len([ones for ones in tx_bin if ones == '1'])
(cfg['tx3'], cfg['tx2'], cfg['tx1']) = [bool(int(ones)) for ones in tx_bin]
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) is False else 0
#print('[NOTE] Azimuth angle can be determined from channel config.') if cfg['tx2'] is False and (cfg['tx1'] or cfg['tx3']) is True else 0
#print('[NOTE] Elevation and Azimuth angle can be determined from channel config.') if cfg['tx2'] is True and (cfg['tx1'] or cfg['tx3']) else 0
return cfg
def profileStr_to_dict(args, curr_cfg=None):
"""Handler for `profileCfg`"""
normalizer = [None, 1e9, 1e-6, 1e-6, 1e-6, None, None, 1e12, 1e-6, None, 1e3, None, None, None]
dtype = [int, float, float, float, float, float, float, float, float, int, float, int, int, float]
keys = ['id',
'start_frequency',
'idle',
'adcStartTime',
'rampEndTime',
'txPower',
'txPhaseShift',
'freqSlopeConst',
'txStartTime',
'adcSamples',
'adcSampleRate',
'hpfCornerFreq1',
'hpfCornerFreq2',
'rxGain',
]
# Check if the main dictionary exists
if curr_cfg:
cfg = curr_cfg
if 'profiles' not in cfg.keys():
cfg['profiles']=[]
else:
cfg = {'profiles': []}
profile_dict = {}
for k, v, n, d in zip(keys, args, normalizer, dtype):
profile_dict[k] = d(float(v) * n if n else v)
cfg['profiles'].append(profile_dict)
return cfg
def chirp_to_dict(args,curr_cfg=None):
"""Handler for `chirpCfg`"""
if curr_cfg:
cfg = curr_cfg
if 'chirps' not in cfg.keys():
cfg['chirps'] = []
else:
cfg = {'chirps': []}
chirp_dict = {}
chirp_dict['chirpStartIndex'] = int(args[0])
chirp_dict['chirpStopIndex'] = int(args[1])
chirp_dict['profileID'] = int(args[2])
chirp_dict['startFreqVariation'] = float(args[3])
chirp_dict['slopeVariation'] = float(args[4])
chirp_dict['idleVariation'] = float(args[5])
chirp_dict['adcStartVariation'] = float(args[6])
tx_bin = bin(int(args[7]))[2:].zfill(3)
(chirp_dict['chirptx3'], chirp_dict['chirptx2'], chirp_dict['chirptx1']) = [bool(int(ones)) for ones in tx_bin]
cfg['chirps'].append(chirp_dict)
return cfg
def power_to_dict(args,curr_cfg=None):
"""handler for `lowPower`"""
if curr_cfg:
cfg = curr_cfg
else:
cfg = {}
if int(args[1]) ==1:
cfg['adcPower'] = 'low'
#print('[NOTE] The Low power ADC mode limits the sampling rate to half the max value.')
elif int(args[1]) ==0:
cfg['adcPower'] = 'regular'
else:
raise ValueError ("Invalid Power Level")
return cfg
def frameStr_to_dict(args, cfg):
"""Handler for `frameCfg`"""
# Number of chirps
if 'chirps' not in cfg.keys():
raise ValueError("Need to define chirps before frame")
chirpStop =0
for ii in range(len(cfg['chirps'])):
chirpStop = max(chirpStop,cfg['chirps'][ii]['chirpStopIndex'])
chirps_len = chirpStop + 1
cfg['numChirps'] = int(args[2]) * chirps_len # num loops * len(chirps)
if int(args[3]) != 0: cfg['numFrames'] = int(args[3])
# args[4] is the time in milliseconds of each frame
cfg['fps'] = 1000/float(args[4])
return cfg
def adcStr_to_dict(args, curr_cfg=None):
"""Handler for `adcCfg`"""
if curr_cfg:
cfg = curr_cfg
else:
cfg = {}
if int(args[1]) == 1:
cfg['isComplex'] = True
cfg['image_band'] = False
#print('[NOTE] Complex 1x mode, Only Real IF Spectrum is filtered and sent to ADC, so if Sampling rate\n'
# ' is X, ADC data would include frequency spectrum from 0 to X.')
elif int(args[1]) == 2:
cfg['isComplex'] = True
cfg['image_band'] = True
#print('[NOTE] Complex 2x mode, both Imaginary and Real IF spectrum is filtered and sent to ADC, so\n'
# ' if Sampling rate is X, ADC data would include frequency spectrum from -X/2 to X/2.')
else:
raise ValueError("Real Data Type Not Supported")
return cfg
#Mapping of serial command to command handler
command_handlers = {
'channelCfg': channelStr_to_dict,
'profileCfg': profileStr_to_dict,
'chirpCfg': chirp_to_dict,
'frameCfg': frameStr_to_dict,
'adcCfg': adcStr_to_dict,
'lowPower': power_to_dict,
}
```
# Example Usage
For a text file with the following commands:
```
flushCfg
dfeDataOutputMode 1
channelCfg 15 5 0
adcCfg 2 1
lowPower 0 0
profileCfg 0 77.0 58.0 7.0 40.0 0 0 100.0 1.0 304 9499 0 0 30
chirpCfg 0 0 0 0.0 0.0 0.0 0.0 1
chirpCfg 1 1 0 0.0 0.0 0.0 0.0 4
frameCfg 0 1 32 0 33.333 1 0
testFmkCfg 0 0 0 1
setProfileCfg disable ADC disable
sensorStart
```
```
#hide
radar_config_filename = '../samples/indoor_human_rcs.cfg'
read_radar_params(radar_config_filename)
```
| github_jupyter |

Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Configuration
_**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_
---
---
## Table of Contents
1. [Introduction](#Introduction)
1. What is an Azure Machine Learning workspace
1. [Setup](#Setup)
1. Azure subscription
1. Azure ML SDK and other library installation
1. Azure Container Instance registration
1. [Configure your Azure ML Workspace](#Configure%20your%20Azure%20ML%20workspace)
1. Workspace parameters
1. Access your workspace
1. Create a new workspace
1. [Next steps](#Next%20steps)
---
## Introduction
This notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.
Typically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.
In this notebook you will
* Learn about getting an Azure subscription
* Specify your workspace parameters
* Access or create your workspace
* Add a default compute cluster for your workspace
### What is an Azure Machine Learning workspace
An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.
## Setup
This section describes activities required before you can access any Azure ML services functionality.
### 1. Azure Subscription
In order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces.
### 2. Azure ML SDK and other library installation
If you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.
Also install following libraries to your environment. Many of the example notebooks depend on them
```
(myenv) $ conda install -y matplotlib tqdm scikit-learn
```
Once installation is complete, the following cell checks the Azure ML SDK version:
```
import azureml.core
print("This notebook was created using version 1.7.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
If you are using an older version of the SDK then this notebook was created using, you should upgrade your SDK.
### 3. Azure Container Instance registration
Azure Machine Learning uses of [Azure Container Instance (ACI)](https://azure.microsoft.com/services/container-instances) to deploy dev/test web services. An Azure subscription needs to be registered to use ACI. If you or the subscription owner have not yet registered ACI on your subscription, you will need to use the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and execute the following commands. Note that if you ran through the AML [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) you have already registered ACI.
```shell
# check to see if ACI is already registered
(myenv) $ az provider show -n Microsoft.ContainerInstance -o table
# if ACI is not registered, run this command.
# note you need to be the subscription owner in order to execute this command successfully.
(myenv) $ az provider register -n Microsoft.ContainerInstance
```
---
## Configure your Azure ML workspace
### Workspace parameters
To use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:
* Your subscription id
* A resource group name
* (optional) The region that will host your workspace
* A name for your workspace
You can get your subscription ID from the [Azure portal](https://portal.azure.com).
You will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.
The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.
The name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.
The following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values.
If you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.
Replace the default values in the cell below with your workspace parameters
```
import os
subscription_id = os.getenv("SUBSCRIPTION_ID", default="<my-subscription-id>")
resource_group = os.getenv("RESOURCE_GROUP", default="<my-resource-group>")
workspace_name = os.getenv("WORKSPACE_NAME", default="<my-workspace-name>")
workspace_region = os.getenv("WORKSPACE_REGION", default="eastus2")
```
### Access your workspace
The following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it.
```
from azureml.core import Workspace
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
print("Workspace configuration succeeded. Skip the workspace creation steps below")
except:
print("Workspace not accessible. Change your parameters or create a new workspace below")
```
### Create a new workspace
If you don't have an existing workspace and are the owner of the subscription or resource group, you can create a new workspace. If you don't have a resource group, the create workspace command will create one for you using the name you provide.
**Note**: As with other Azure services, there are limits on certain resources (for example AmlCompute quota) associated with the Azure ML service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
This cell will create an Azure ML workspace for you in a subscription provided you have the correct permissions.
This will fail if:
* You do not have permission to create a workspace in the resource group
* You do not have permission to create a resource group if it's non-existing.
* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription
If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.
**Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.
Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.
```
from azureml.core import Workspace
# Create the workspace using the specified parameters
ws = Workspace.create(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group,
location = workspace_region,
create_resource_group = True,
sku = 'basic',
exist_ok = True)
ws.get_details()
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
```
---
## Next steps
In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.
If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into "how-to" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process.
| github_jupyter |
____
__Universidad Tecnológica Nacional, Buenos Aires__<br/>
__Ingeniería Industrial__<br/>
__Cátedra de Ciencia de Datos - Curso I5521 - Turno sabado mañana__<br/>
__Elaborado por: Nicolas Aguirre__
____
```
from google.colab import drive
drive.mount('/gdrive')
DRIVE_FOLDER = 'ClusterAI2020/'
CLASS_FOLDER = 'clase_02/'
DATA_PATH = "../data/clase_02/"
%cd {'/gdrive/My Drive/'+DRIVE_FOLDER+CLASS_FOLDER}
```
# clase_02: EDA : Analisis Exploratorio de los Datos
## Librerias
```
#Importar paquetes de herramientas:
#Datos
import pandas as pd
import numpy as np
#Graficos
import matplotlib.pyplot as plt
import seaborn as sns
#Otros
import warnings
warnings.filterwarnings('ignore')
```
# Dataset
El dataset que usaremos se encuentra en:
https://www.kaggle.com/dgomonov/new-york-city-airbnb-open-data
* Descargar e importarlo
* Verificar que se haya cargado bien el dataset
* Dimension del dataset
```
df = pd.read_csv(DATA_PATH+'clusterai_2020_clase02_dataset_ny_airbnb.csv',index_col="id")
print(f'Shape: {np.shape(df)}')
df.head(3)
```
# Limpieza
* Duplicados
* Cantidad y % de NaN por columna
* Luego de quitarlos, devolver shape del df
```
df.drop_duplicates(inplace=True,keep='first')
cant_NaN = df.isnull().sum()
print(cant_NaN,'\r\n')
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(6)
#Limpiamos registros
df.dropna(how ='any', inplace = True)
# Verificamos como quedo el data-set
print(f'Nos quedamos con un df de {df.shape[0]} filas x{df.shape[1]} columnas')
```
# Practica!
* Crear columna 'log_precios'. Grafico de la distribucion de precio y de log(precio)
* En base al que mejor se ajuste, comparar los precios segun los distintos "neighbourhood_group" (boxplot)
* Mediana, Limite inferior y superior del 'neighbourhood_group' con mayor y menor precio.
* Crear una nueva columa llamada "Banda", con 7 categorias de precios.
Bandas = [0, 50, 75, 100, 150, 200, 250]
* Boxplot de "Bandas" vs log(Reviews)
* Scatter plot de Longitud y Latitud donde se distinga "neighbourhood_group"
* Scatter plot de Longitud y Latitud donde se distingan las "Bandas"
* Cantidad de barrios
* Comparacion de la cantidad de hospedajes para los 10 barrios mas populares (barras)
* Cantidad de hospedajes para los 10 barrios mas populares, segregados por bandas (pivot_table)
```
#Columna log precios
df['log_price'] = np.log(1+df['price'])
#Plot Precio
sns.kdeplot(df.price, color="b", shade = True)
plt.xlabel("Price")
plt.title('Distribucion de Precio',size = 20)
plt.show()
#Plot Log-Precio
sns.distplot(np.log(1+df['price']),color='b')
plt.xlabel("Price")
plt.show()
#En base al que mejor se ajuste, comparar los precios segun los distintos "neighbourhood_group" (boxplot)
g = sns.catplot(x='neighbourhood_group',y='log_price',data=df, kind="box", height = 5 ,aspect=3,palette = "muted")
g.despine(left=True)
g.set_xticklabels(rotation=90)
plt.xlabel("Barrio")
plt.ylabel("Log_Precio")
plt.title('Plot de log_price vs Barrio',size = 12)
plt.show()
# Mediana, Limite inferior y superior del 'neighbourhood_group' con mayor y menor precio.
grup1 = 'Manhattan'
df_pivot= df[df['neighbourhood_group']==grup1]
plt.figure()
A = plt.boxplot(df_pivot['log_price'])
medians = [median.get_ydata() for median in A["medians"]]
#whiskers
whiskers = [whiskers.get_ydata() for whiskers in A["whiskers"]]
#print(f'Valores outliers:\r\n{outliers[0]}\r\n')
print(f'Mediana:\r\n {medians[0][0]}\r\n')
print(f'Limites Inferiores :\r\n{whiskers[0][0]}\r\n')
print(f'Limites Superiores:\r\n{whiskers[1][1]}\r\n')
grup2 = 'Bronx'
df_pivot= df[df['neighbourhood_group']==grup2]
plt.figure()
A = plt.boxplot(df_pivot['log_price'])
medians = [median.get_ydata() for median in A["medians"]]
#whiskers
whiskers = [whiskers.get_ydata() for whiskers in A["whiskers"]]
#print(f'Valores outliers:\r\n{outliers[0]}\r\n')
print(f'Mediana:\r\n {medians[0][0]}\r\n')
print(f'Limites Inferiores :\r\n{whiskers[0][0]}\r\n')
print(f'Limites Superiores:\r\n{whiskers[1][1]}\r\n')
#Definimos y creamos las bandas:
df.loc[(df['price'] > 0) & (df['price'] <= 50), 'Banda'] = 1
df.loc[(df['price'] > 50) & (df['price'] <= 75), 'Banda'] = 2
df.loc[(df['price'] > 75) & (df['price'] <= 100), 'Banda'] = 3
df.loc[(df['price'] > 100) & (df['price'] <= 150), 'Banda'] = 4
df.loc[(df['price'] > 150) & (df['price'] <= 200), 'Banda'] = 5
df.loc[(df['price'] > 200) & (df['price'] <= 250), 'Banda'] = 6
df.loc[(df['price'] > 250), 'Banda'] = 7
#Boxplot de "Bandas" vs log(Reviews)
df['log_review'] = np.log(1+df['number_of_reviews'])
g = sns.catplot(x="Banda",y='log_review',data=df, kind="box", height = 5,aspect=3 ,palette = "muted")
g.despine(left=True)
g.set_xticklabels(rotation=90)
g = g.set_ylabels("log_Reviews")
plt.title('Boxen plot Bandas VS Reviews',size = 20)
plt.show()
df.plot(kind='scatter', x='longitude', y='latitude', c='Banda',
cmap=plt.get_cmap(), colorbar=True, alpha=0.5, figsize=(10,8))
plt.show()
plt.figure(figsize=(10,8))
sns.scatterplot(df.longitude,df.latitude,hue=df.neighbourhood_group,palette='muted')
plt.show()
q_barrios = len(df['neighbourhood'].unique())
print(f'En total hay {q_barrios} barrios\r\n')
top_neighbourhood = df['neighbourhood'].value_counts().index[0:10]
plt.figure(figsize=(15,3))
g = sns.countplot(x='neighbourhood',data=df, palette = "muted",order=top_neighbourhood)
g.set_xticklabels(g.get_xticklabels(), rotation=90, ha="right")
plt.title('Cuenta por Barrio',size = 20)
plt.xlabel("Barrio")
plt.ylabel("Count")
plt.show()
table = pd.pivot_table(df[df['neighbourhood'].isin(top_neighbourhood)],index='neighbourhood',columns = 'Banda', values='log_price' ,fill_value=0, aggfunc = 'count')
table
```
| github_jupyter |
This script generates:
1) URIs for resources, i.e. entities found in Compact Memory by Tagme _that are also available in Judaicalink_.
2) URIs for references (mentions), each identifying a "spot" or a mention in Compact Memory.
See the documentation for more info.
```
import os, json, pickle
import urllib.parse
def generate_res_ref_data(cm_mentions, ep_inverted_index, link_prob=0.6, rho=0.4, save=False, out_path=""):
print("Generating resource and reference data with link probability={} and rho={}...".format(link_prob, rho))
base_resource = 'http://data.judaicalink.org/data/dbpedia/'
base_reference = 'http://data.judaicalink.org/data/cm-tagme/'
ref_index = 1000000
base_dbp = 'http://dbpedia.org/resource/'
base_dbp_de = 'http://de.dbpedia.org/resource/'
base_wiki_en = 'https://en.wikipedia.org/wiki/'
base_wiki_de = 'https://de.wikipedia.org/wiki/'
cm_jl_mentions = []
for mention in cm_mentions:
if mention[3] > link_prob and mention[4] > rho:
resource_name = mention[6].replace(' ', '_')
resource_uri = base_resource+resource_name
entity_exists = False # check if this resource exists in jl, under any form. Use entity pages for this
if resource_uri in resource2ep: # first check if the jl/dbpedia uri format for this resource is in the entity pages
entity_exists = True
else: # check other wikipedia/dbpedia uri formats for this resource
wiki_en = base_wiki_en + urllib.parse.quote(resource_name)
wiki_de = base_wiki_de + urllib.parse.quote(resource_name)
dbp_de = base_dbp_de + resource_name
dbp = base_dbp + resource_name
alt_uris = [wiki_en, wiki_de, dbp_de, dbp]
for alt_uri in alt_uris:
if alt_uri in resource2ep:
entity_page = resource2ep[alt_uri] # get entity_page for this resource
entity_exists = True
break
if entity_exists and entity_page != "": # generate mention data
cm_jl_mentions.append({
'resource': resource_uri,
'ref': base_reference+str(ref_index),
'spot': mention[0],
'start': mention[1],
'end': mention[2],
'link_prob': mention[3],
'rho': mention[4],
'journal_id': mention[7].split('_')[0],
'page_id': mention[7].replace('_', '-')
})
ref_index += 1
print("Generated data for {} entity mentions from CM-tagme.".format(len(cm_jl_mentions)))
if save is True:
print("Saving data to {}...".format(out_path))
with open(out_path, 'wb') as outfile:
pickle.dump(cm_jl_mentions, outfile)
print("Done!")
return cm_jl_mentions
# load tagme output
cm_mentions = pickle.load(open("/data/cm/output/linker/cm_entities_tagme.pickle", 'rb'))
# load entity pages inverted index
resource2ep = pickle.load(open('ep_inv_index.pickle', 'rb'))
# generate data
res_ref_data = generate_res_ref_data(cm_mentions, resource2ep)
with open('cm_tagme_resource_reference_data.pickle', 'wb') as outfile:
pickle.dump(res_ref_data, outfile)
```
| github_jupyter |
# Convolutional Variational Auto Encoder sample using tensorflow
tensorflow を利用して MNIST で CVAE を実行するサンプルです。
- [Convolutional Variational Autoencoder][tutorial]
[tutorial]: https://www.tensorflow.org/tutorials/generative/cvae
## 環境の確認
```
!cat /etc/issue
!free -h
!cat /proc/cpuinfo
!nvidia-smi
!python --version
from logging import Logger
def get_logger() -> Logger:
import logging
logger = logging.getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
logging.basicConfig(level=logging.INFO, format=fmt)
return logger
logger = get_logger()
!pip install -q tensorflow-gpu==2.0.0
def check_tf_version() -> None:
import tensorflow as tf
logger.info(tf.__version__)
check_tf_version()
```
## ソースコードの取得
```
# 対象のコードを取得
!git clone -n https://github.com/iimuz/til.git
%cd til
!git checkout 184b032
%cd python/cvae_tensorflow
```
## 実行
### データセットの確認
```
%run -i dataset.py
import dataset
raw_train, raw_test = dataset.get_batch_dataset()
```
### ネットワークの確認
```
%run -i network.py
```
### 学習の実行
```
import time
from IPython import display
import dataset
import train
import utils
def training() -> None:
logger.info(f"eager execution: {tf.executing_eagerly()}")
epochs = 100
latent_dim = 50
num_example_to_generate = 16
random_vector_for_generation = tf.random.normal(
shape=[num_example_to_generate, latent_dim]
)
history_filepath = "_data/history.pkl"
history_image_filepath = "_data/history.png"
train_dataset, test_dataset = dataset.get_batch_dataset(
train_buff=60000, batch_size=128
)
model = CVAE(latent_dim)
optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint, checkpoint_manager = utils.get_checkpoint_and_manager(
save_dir="_data/ckpts", max_to_keep=3, model=model, optimizer=optimizer
)
utils.restore_latest(checkpoint, checkpoint_manager)
elbo_history = train.restore_history(history_filepath)
start_epoch = checkpoint.save_counter.numpy()
for epoch in range(start_epoch, epochs):
start_time = time.time()
for train_x in train_dataset:
train.compute_apply_gradients(model, train_x, optimizer)
end_time = time.time()
checkpoint_manager.save()
loss = tf.keras.metrics.Mean()
for test_x in test_dataset:
loss(train.compute_loss(model, test_x))
elbo_history.append(-loss.result())
if display is not None:
display.clear_output(wait=False)
logger.info(
f"Epoch: {epoch}"
f", Test set ELBO: {elbo_history[-1]}"
f", time elapse for current epoch {end_time - start_time}"
)
predictions = model.sample(random_vector_for_generation)
train.show_and_save_images(predictions, f"_data/image_at_epoch_{epoch:04d}.png")
train.show_and_save_history(elbo_history, history_image_filepath)
train.save_history(elbo_history, history_filepath)
training()
```
### 結果
```
import utils
import IPython
from IPython import display
def show_generated_images():
filepath = "_data/dcgan.gif"
utils.save_gif("_data/", "image_at_epoch_*", filepath)
try:
from google.colab import files
except ImportError:
pass
else:
files.download(filepath)
show_generated_images()
```
| github_jupyter |
# July 2021 CVE Data
This notebook will pull all [JSON Data](https://nvd.nist.gov/vuln/data-feeds#JSON_FEED) from the NVD and performs some basic data analysis of CVEd data.
## Getting Started
### Collecting Data
This cell pulls all JSON files from the NVD that we will be working with.
```
%%capture
!mkdir -p jsondata
%cd jsondata
!rm *.json
!rm *.zip
!wget https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{2020..2021}.json.zip
!unzip -o "*.zip"
```
### Import Python Libraries
```
import calplot
import glob
import logging
import json
import matplotlib.pyplot as plt
import missingno as msno
import numpy as np
import os
import pandas as pd
import re
import uuid
import warnings
from datetime import datetime
logging.getLogger('matplotlib.font_manager').disabled = True
warnings.filterwarnings("ignore")
```
# 2021 Q1 CVE Data
### Build Base DataFrame
This code builds a Panda dataframe from the JSON files we downloaded, removing all CVE's marked rejected.
```
row_accumulator = []
for filename in glob.glob('nvdcve-1.1-2021.json'):
with open(filename, 'r', encoding='utf-8') as f:
nvd_data = json.load(f)
for entry in nvd_data['CVE_Items']:
cve = entry['cve']['CVE_data_meta']['ID']
try:
published_date = entry['publishedDate']
except KeyError:
published_date = 'Missing_Data_JG'
try:
attack_vector = entry['impact']['baseMetricV3']['cvssV3']['attackVector']
except KeyError:
attack_vector = 'Missing_Data_JG'
try:
attack_complexity = entry['impact']['baseMetricV3']['cvssV3']['attackComplexity']
except KeyError:
attack_complexity = 'Missing_Data_JG'
try:
privileges_required = entry['impact']['baseMetricV3']['cvssV3']['privilegesRequired']
except KeyError:
privileges_required = 'Missing_Data_JG'
try:
user_interaction = entry['impact']['baseMetricV3']['cvssV3']['userInteraction']
except KeyError:
user_interaction = 'Missing_Data_JG'
try:
scope = entry['impact']['baseMetricV3']['cvssV3']['scope']
except KeyError:
scope = 'Missing_Data_JG'
try:
confidentiality_impact = entry['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']
except KeyError:
confidentiality_impact = 'Missing_Data_JG'
try:
integrity_impact = entry['impact']['baseMetricV3']['cvssV3']['integrityImpact']
except KeyError:
integrity_impact = 'Missing_Data_JG'
try:
availability_impact = entry['impact']['baseMetricV3']['cvssV3']['availabilityImpact']
except KeyError:
availability_impact = 'Missing_Data_JG'
try:
base_score = entry['impact']['baseMetricV3']['cvssV3']['baseScore']
except KeyError:
base_score = '0.0'
try:
base_severity = entry['impact']['baseMetricV3']['cvssV3']['baseSeverity']
except KeyError:
base_severity = 'Missing_Data_JG'
try:
exploitability_score = entry['impact']['baseMetricV3']['exploitabilityScore']
except KeyError:
exploitability_score = 'Missing_Data_JG'
try:
impact_score = entry['impact']['baseMetricV3']['impactScore']
except KeyError:
impact_score = 'Missing_Data_JG'
try:
cwe = entry['cve']['problemtype']['problemtype_data'][0]['description'][0]['value']
except IndexError:
cwe = 'Missing_Data_JG'
try:
description = entry['cve']['description']['description_data'][0]['value']
except IndexError:
description = ''
new_row = {
'CVE': cve,
'Published': published_date,
'AttackVector': attack_vector,
'AttackComplexity': attack_complexity,
'PrivilegesRequired': privileges_required,
'UserInteraction': user_interaction,
'Scope': scope,
'ConfidentialityImpact': confidentiality_impact,
'IntegrityImpact': integrity_impact,
'AvailabilityImpact': availability_impact,
'BaseScore': base_score,
'BaseSeverity': base_severity,
'ExploitabilityScore': exploitability_score,
'ImpactScore': impact_score,
'CWE': cwe,
'Description': description
}
if not description.startswith('** REJECT **'): # disputed, rejected and other non issues start with '**'
row_accumulator.append(new_row)
nvd_2021 = pd.DataFrame(row_accumulator)
nvd_2021 = nvd_2021[(nvd_2021['Published'] > '2021-07-01') & (nvd_2021['Published'] < '2021-08-01')]
nvd_2021['Published'] = pd.to_datetime(nvd_2021['Published']).apply(lambda x: x.date())
print ('CVEs from NVD:', nvd_2021['CVE'].count())
nvdcount = nvd_2021['Published'].count()
per_day = nvdcount/31
per_day = round(per_day, 0)
print('CVEs Published Per Day:', per_day)
```
### CVEs Per Day Graph
```
nvd_data_2021 = nvd_2021['Published'].value_counts()
cg = nvd_data_2021.plot(colormap='jet', marker='.', figsize=(16, 8), markersize=2, title='CVEs Per Day')
plt.grid()
cg.set_ylabel("New CVEs")
cg.set_xlabel("Date")
plt.savefig('July2021.jpg', dpi=300, bbox_inches='tight')
```
### Most CVEs Per Day
```
nvd_2021['Published'].value_counts().head(10)
```
# CVSS 3 Breakdown
```
nvd_2021['BaseScore'] = pd.to_numeric(nvd_2021['BaseScore']);
nvd_2021['BaseScore'] = nvd_2021['BaseScore'].replace(0, np.NaN);
nvd_2021['BaseScore'].plot(kind="hist", title='CVSS Breakdown');
plt.savefig('July2021CVSS.jpg', dpi=300, bbox_inches='tight')
```
Average CVSS Score:
```
nvd_2021['BaseScore'].mean()
```
# July 2020 CVE Data
### Build Base DataFrame
This code builds a Panda dataframe from the JSON files we downloaded, removing all CVE's marked rejected.
```
row_accumulator = []
for filename in glob.glob('nvdcve-1.1-2020.json'):
with open(filename, 'r', encoding='utf-8') as f:
nvd_data = json.load(f)
for entry in nvd_data['CVE_Items']:
cve = entry['cve']['CVE_data_meta']['ID']
try:
published_date = entry['publishedDate']
except KeyError:
published_date = 'Missing_Data_JG'
try:
attack_vector = entry['impact']['baseMetricV3']['cvssV3']['attackVector']
except KeyError:
attack_vector = 'Missing_Data_JG'
try:
attack_complexity = entry['impact']['baseMetricV3']['cvssV3']['attackComplexity']
except KeyError:
attack_complexity = 'Missing_Data_JG'
try:
privileges_required = entry['impact']['baseMetricV3']['cvssV3']['privilegesRequired']
except KeyError:
privileges_required = 'Missing_Data_JG'
try:
user_interaction = entry['impact']['baseMetricV3']['cvssV3']['userInteraction']
except KeyError:
user_interaction = 'Missing_Data_JG'
try:
scope = entry['impact']['baseMetricV3']['cvssV3']['scope']
except KeyError:
scope = 'Missing_Data_JG'
try:
confidentiality_impact = entry['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']
except KeyError:
confidentiality_impact = 'Missing_Data_JG'
try:
integrity_impact = entry['impact']['baseMetricV3']['cvssV3']['integrityImpact']
except KeyError:
integrity_impact = 'Missing_Data_JG'
try:
availability_impact = entry['impact']['baseMetricV3']['cvssV3']['availabilityImpact']
except KeyError:
availability_impact = 'Missing_Data_JG'
try:
base_score = entry['impact']['baseMetricV3']['cvssV3']['baseScore']
except KeyError:
base_score = '0.0'
try:
base_severity = entry['impact']['baseMetricV3']['cvssV3']['baseSeverity']
except KeyError:
base_severity = 'Missing_Data_JG'
try:
exploitability_score = entry['impact']['baseMetricV3']['exploitabilityScore']
except KeyError:
exploitability_score = 'Missing_Data_JG'
try:
impact_score = entry['impact']['baseMetricV3']['impactScore']
except KeyError:
impact_score = 'Missing_Data_JG'
try:
cwe = entry['cve']['problemtype']['problemtype_data'][0]['description'][0]['value']
except IndexError:
cwe = 'Missing_Data_JG'
try:
description = entry['cve']['description']['description_data'][0]['value']
except IndexError:
description = ''
new_row = {
'CVE': cve,
'Published': published_date,
'AttackVector': attack_vector,
'AttackComplexity': attack_complexity,
'PrivilegesRequired': privileges_required,
'UserInteraction': user_interaction,
'Scope': scope,
'ConfidentialityImpact': confidentiality_impact,
'IntegrityImpact': integrity_impact,
'AvailabilityImpact': availability_impact,
'BaseScore': base_score,
'BaseSeverity': base_severity,
'ExploitabilityScore': exploitability_score,
'ImpactScore': impact_score,
'CWE': cwe,
'Description': description
}
if not description.startswith('** REJECT **'): # disputed, rejected and other non issues start with '**'
row_accumulator.append(new_row)
nvd_2020 = pd.DataFrame(row_accumulator)
nvd_2020 = nvd_2020[(nvd_2020['Published'] > '2020-07-01') & (nvd_2020['Published'] < '2020-08-01')]
nvd_2020['Published'] = pd.to_datetime(nvd_2020['Published']).apply(lambda x: x.date())
print ('CVEs from NVD:', nvd_2020['CVE'].count())
nvdcount = nvd_2020['Published'].count()
per_day = nvdcount/31
per_day = round(per_day, 0)
print('CVEs Published Per Day:', per_day)
```
### CVEs Per Day Graph
```
nvd_data_2020 = nvd_2020['Published'].value_counts()
cg = nvd_data_2020.plot(colormap='jet', marker='.', figsize=(16, 8), markersize=2, title='CVEs Per Day')
plt.grid()
cg.set_ylabel("New CVEs");
cg.set_xlabel("Date");
plt.savefig('July2020.jpg', dpi=300, bbox_inches='tight')
```
### Most CVEs Per Day
```
nvd_2020['Published'].value_counts().head(10)
```
### CVE Heat Map
# CVSS 3 Breakdown
```
nvd_2020['BaseScore'] = pd.to_numeric(nvd_2020['BaseScore']);
nvd_2020['BaseScore'] = nvd_2020['BaseScore'].replace(0, np.NaN);
nvd_2020['BaseScore'].plot(kind="hist", title='CVSS Breakdown');
plt.savefig('July2020CVSS.jpg', dpi=300, bbox_inches='tight')
```
Average CVSS Score:
```
nvd_2020['BaseScore'].mean()
```
| github_jupyter |
# Missing Values Imputer
## Import Packages
```
import pandas as pd
from autoc.explorer import cserie,DataExploration
from autoc.utils.helpers import *
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
matplotlib.style.use('ggplot')
import seaborn as sns
plt.rcParams['figure.figsize'] = (12.0, 8)
np.random.seed(0)
```
## Give me some Credit data
```
# Load Give me Some credit
path = '/Users/ericfourrier/Documents/Data/Give_Me_Some_Credit/cs-training.csv'
df_train = pd.read_csv(path)
# if you prefer to work wit hdatabase
# from sqlalchemy import create_engine
# engine = create_engine('sqlite://')
# df_train.to_sql('cstraining',engine)
# engine.table_names()
# test = pd.read_sql("select * from cstraining",engine)
df_train.head(10)
df_train.info()
df_train.groupby('NumberOfDependents').mean()
exploration = DataExploration(df_train)
exploration.structure()
exploration.nearzerovar()
exploration.findcorr()
```
### Bin true numeric values
```
df_train_categoric = df_train.copy()
# Bin true numeric columns
cols_to_bin = ['RevolvingUtilizationOfUnsecuredLines', 'DebtRatio', 'MonthlyIncome']
nb_quantiles = 10
for col in cols_to_bin:
df_train_categoric.loc[:, col] = pd.qcut(df_train_categoric.loc[:,col],nb_quantiles).astype('str')
#Transform ervery variables to Categorical type of pandas
# fix problem with category variable
# df_train_categoric = df_train_categoric.apply(lambda x: x.astype('str'),axis = 0)
df_train_categoric.dtypes
df_train_categoric.describe()
df_simu = df_train_categoric.copy()
ec = DataExploration(df_simu)
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
col = df.loc[:, colname].drop(tokeep) # we are not smapling from tokeep
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
# fix problem with category variable
simulate_na_col(df_simu,list(df_train_categoric.columns),n=80000)
df_simu.isnull().sum(axis = 0)
df_simu.hist()
df_train_categoric.hist()
df_train.dtypes
kl_series(df_simu.SeriousDlqin2yrs,df_train_categoric.SeriousDlqin2yrs)
df_simu.SeriousDlqin2yrs.dropna().value_counts(normalize=True).values
df_train_categoric.SeriousDlqin2yrs.value_counts(normalize=True).values
kl(df_train_categoric.SeriousDlqin2yrs.value_counts(normalize=True).values,df_simu.SeriousDlqin2yrs.dropna().value_counts(normalize=True).values)
for col in df_simu.columns:
try :
print("Kullback-Leibler divergence between both distribution: {}".format(
kl_series(df_simu.loc[:,col],df_train_categoric.loc[:,col])))
except Exception as e:
print('error:{}'.format(e))
```
### NaImputer class test
```
from autoc import NaImputer, missing_map
missing_map(df_simu,nmax=1000) # no pattern visible
na = NaImputer(df_simu)
na.corrplot_na() # totally missing at random
na.infos_na()
```
### Prediction using skicit learn
#### Structure of the data
```
ec.structure()
```
#### Cleaning
```
# Dirty cleaning
df_simu.loc[df_simu.NumberOfOpenCreditLinesAndLoans >=10,'NumberOfOpenCreditLinesAndLoans'] =10
df_simu.loc[df_simu.NumberRealEstateLoansOrLines >=5,'NumberRealEstateLoansOrLines'] = 5
df_simu.loc[df_simu.NumberOfTimes90DaysLate >=5,'NumberOfTimes90DaysLate'] = 5
df_simu.loc[df_simu.NumberOfTimes90DaysLate >=5,'NumberOfTimes90DaysLate'] = 5
```
#### Transformation to discrete variables
```
df_simu.age = pd.qcut(df_simu.age,10)
df_simu.age.value_counts()
DataExploration(df_simu).structure()
```
#### Slkicit learn age imputation
```
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
test = pd.get_dummies(df_simu[df_simu.age.isnull()].drop('age',axis =1).fillna('mean')).as_matrix()
X = pd.get_dummies(df_simu[df_simu.age.notnull()].drop('age',axis =1).fillna('mean')).as_matrix()
y = df_simu.age[df_simu.age.notnull()].values
clf.fit(X, y)
X.shape
# train prediction
X.shape
clf.predict(X)
clf.predict_proba(X)
clf.score(X, y, sample_weight=None)
res= np.array([clf.predict(X),y]).T
res
# test prediction
test.shape
#clf.predict(test)
y
df_simu.drop('age',axis =1)
df_simu.age.isnull()
df_simu.age
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Challenge Notebook
## Problem: Determine whether there is a path between two nodes in a graph.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
* [Solution Notebook](#Solution-Notebook)
## Constraints
* Is the graph directed?
* Yes
* Can we assume we already have Graph and Node classes?
* Yes
* Can we assume this is a connected graph?
* Yes
* Can we assume the inputs are valid?
* Yes
* Can we assume this fits memory?
* Yes
## Test Cases
Input:
* `add_edge(source, destination, weight)`
```
graph.add_edge(0, 1, 5)
graph.add_edge(0, 4, 3)
graph.add_edge(0, 5, 2)
graph.add_edge(1, 3, 5)
graph.add_edge(1, 4, 4)
graph.add_edge(2, 1, 6)
graph.add_edge(3, 2, 7)
graph.add_edge(3, 4, 8)
```
Result:
* search_path(start=0, end=2) -> True
* search_path(start=0, end=0) -> True
* search_path(start=4, end=5) -> False
## Algorithm
Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/graph_path_exists/path_exists_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
## Code
```
%run ../graph/graph.py
%load ../graph/graph.py
class GraphPathExists(Graph):
def path_exists(self, start, end):
# TODO: Implement me
pass
```
## Unit Test
**The following unit test is expected to fail until you solve the challenge.**
```
# %load test_path_exists.py
from nose.tools import assert_equal
class TestPathExists(object):
def test_path_exists(self):
nodes = []
graph = GraphPathExists()
for id in range(0, 6):
nodes.append(graph.add_node(id))
graph.add_edge(0, 1, 5)
graph.add_edge(0, 4, 3)
graph.add_edge(0, 5, 2)
graph.add_edge(1, 3, 5)
graph.add_edge(1, 4, 4)
graph.add_edge(2, 1, 6)
graph.add_edge(3, 2, 7)
graph.add_edge(3, 4, 8)
assert_equal(graph.path_exists(nodes[0], nodes[2]), True)
assert_equal(graph.path_exists(nodes[0], nodes[0]), True)
assert_equal(graph.path_exists(nodes[4], nodes[5]), False)
print('Success: test_path_exists')
def main():
test = TestPathExists()
test.test_path_exists()
if __name__ == '__main__':
main()
```
## Solution Notebook
Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/graph_path_exists/path_exists_solution.ipynb) for a discussion on algorithms and code solutions.
| github_jupyter |
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
#from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
from sklearn.decomposition import PCA
from scipy.stats.mstats import zscore # This is to standardized the parameters
%matplotlib inline
np.random.seed(1)
```
## 1. Load Data
```
import pickle
fpath="Datasets\Data_M_2.8_R_0.5_S_4_Sec_256_2D_129_45.pkl"
pkl_file = open(fpath, 'rb')
data = pickle.load(pkl_file)
label="Datasets\Label_M_2.8_R_0.5_S_4_Sec_256.npy"
label=np.load(label)
print(data.shape)
print(label.shape)
def split_reshape_dataset(X, Y, ratio):
#X = X.T[:,:,np.newaxis, np.newaxis]
#Y = Y.T
m = X.shape[0] # number of samples
sortInd = np.arange(m)
np.random.shuffle(sortInd)
nTrain = int(ratio * m)
X_train = X[sortInd[:nTrain], :, :, :]
Y_train = Y[sortInd[:nTrain],:]
X_test = X[sortInd[nTrain:], :, :, :]
Y_test = Y[sortInd[nTrain:],:]
return X_train, X_test, Y_train, Y_test
#data = data[300:700,:]
#data = (data - np.mean(data, axis = 0, keepdims= True)) / np.std(data, axis = 0, keepdims = True)
data=zscore(data)
RatioTraining=0.8; # 0.8 before
X_train, X_test, Y_train, Y_test = split_reshape_dataset(data, label, RatioTraining)
Y_train =convert_to_one_hot(Y_train,2).T
Y_test = convert_to_one_hot(Y_test,2).T
print(X_train.shape)
print(Y_train.shape)
print(data.shape)
print(label.shape)
i = 104
```
## 2. TensorFlow Functions
```
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
X = tf.placeholder(tf.float32,shape=(None, n_H0, n_W0, n_C0))#None
Y = tf.placeholder(tf.float32,shape=(None,n_y))#None
return X, Y
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
filtersize1=4; # originally 4
filtersize2=2; # originally 2
NumFilters1=8; #4
NumFilters2=16; #8
W1 = tf.get_variable("W1", [16, 4, 1, NumFilters1], initializer = tf.contrib.layers.xavier_initializer(seed = 0))#None
W2 = tf.get_variable("W2", [8, 2, NumFilters1, NumFilters2], initializer = tf.contrib.layers.xavier_initializer(seed = 0))#None
# W1 = tf.get_variable("W1", [filtersize1, 1, 1, NumFilters1], initializer = tf.contrib.layers.xavier_initializer(seed = 0))#None
# W2 = tf.get_variable("W2", [filtersize2, 1, NumFilters1, NumFilters2], initializer = tf.contrib.layers.xavier_initializer(seed = 0))#None
parameters = {"W1": W1,
"W2": W2}
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME')#None
# RELU
A1 = tf.nn.relu(Z1)#None
# MAXPOOL: window 8x8, sride 8, padding 'SAME'
# P1 = tf.nn.max_pool(A1, ksize = [1,128,1,1], strides = [1,1,1,1], padding = 'SAME')#None
P1 = tf.nn.max_pool(A1, ksize = [1,16,4,1], strides = [1,1,1,1], padding = 'SAME')#None
# P1 = tf.nn.max_pool(A1, ksize = [1,4,4,1], strides = [1,1,1,1], padding = 'SAME')#None
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1,W2, strides = [1,1,1,1], padding = 'SAME')#None
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
# P2 = tf.nn.max_pool(A2, ksize = [1,64,1,1], strides = [1,1,1,1], padding = 'SAME')#None
P2 = tf.nn.max_pool(A2, ksize = [1,8,2,1], strides = [1,1,1,1], padding = 'SAME')#None
# FLATTEN
P2 = tf.contrib.layers.flatten(P2)#None
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(P2, num_outputs=2,activation_fn=None)
return Z3, W1, W2
def compute_cost(Z3, Y, W1, W2, beta):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
m = int(Y.get_shape()[1])
cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits = Z3, labels = Y)
regularizer = tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2)
#egularizer = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
cost = tf.reduce_mean(cost + 1/m* beta * regularizer)
### END CODE HERE ###
return cost
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 10, minibatch_size = 50, print_cost = True, beta = 0.1):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep results consistent (tensorflow seed)
seed = 3 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # To keep track of the cost
# Create Placeholders of the correct shape
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)#None
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()#None
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3, W1, W2 = forward_propagation(X, parameters)#None
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y, W1, W2, beta)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)#None
### END CODE HERE ###
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
#print(Y_train.shape)
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})#None
### END CODE HERE ###
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return train_accuracy, test_accuracy, parameters
_, _, parameters = model(X_train, Y_train, X_test, Y_test, num_epochs =30, learning_rate = 0.001, beta = .1)
sum(label)/label.shape[0]
```
| github_jupyter |
# OpenStreetMap: Historical Analysis
In this notebook, we analyze the evolution of OpenStreetMap availability through time from 2011 to 2018. More specifically, we are interested in the mapping trends related to the road network, the buildings footprints, and land use polygons.
## Imports & Parameters
```
import os
import sys
from functools import partial
from itertools import product
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
import requests
from tqdm import tqdm_notebook as tqdm
from shapely.geometry import shape, mapping
import json
import subprocess
# Add local module to the path
src = os.path.abspath('../src')
if src not in sys.path:
sys.path.append(src)
from metadata import City, CITIES
from generate_aoi import reproject_geom, as_geojson
%matplotlib inline
# Plotting style
plt.style.use(['seaborn-paper', 'seaborn-whitegrid'])
plt.rc('font', family='serif', serif='cmr10', size=12)
plt.rc('axes', titlesize=12)
plt.rc('axes', labelsize=12)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
plt.rc('legend', fontsize=12)
plt.rc('figure', titlesize=12)
plt.rc('text', usetex=True)
plt.rc('savefig', dpi=300)
KEYS = ['highway', 'building', 'leisure', 'natural', 'landuse']
YEARS = [2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]
def prettify(city_name):
"""Better city name."""
city_name = city_name.replace('_', '-')
city_name = city_name.title()
return city_name
```
## Data Acquisition
OpenStreetMap data are downloaded from the [Geofabrik](http://download.geofabrik.de) website. Two kinds of data files are available: (1) full dumps that contains all the historical informations (e.g. when each feature has been added, corrected or removed), identified by the `osh.pbf` extension, and (2) default dumps that contains the latest snapshot of the OpenStreetMap data, identified by the `osm.pbf`. In this case, we are interested in the first one. NB: the full historical dump for Africa has a size of `3.3GB` as of April 2018.
```
africa_history_f = os.path.abspath(os.path.join('../data/input/osm/africa.osh.pbf'))
if not os.path.isfile(africa_history_f):
africa_history_url = 'http://download.geofabrik.de/africa.osh.pbf'
os.makedirs(os.path.dirname(africa_history_f), exist_ok=True)
r = requests.get(africa_history_url, stream=True)
size = int(r.headers['Content-Length'])
#progress = tqdm(total=size, unit='B', unit_scale=True)
with open(africa_history_f, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
#progress.update(1024)
#progress.close()
```
## Data Pre-Processing
Data pre-processing is mainly performed with the [`osmium`](http://osmcode.org/osmium-tool/) command-line tool. The documentation is available [here](http://osmcode.org/osmium-tool/manual.html#the-osmium-command).
### Geographical extracts
We create geographical extracts of the full history dump (based on the area of interest of each case study) using the `osmium extract --with-history` command. The area of interest is provided through a bounding box string such as `16.87,-22.74,17.26,-22.37`.
```
#progress = tqdm(total=len(CITIES))
for city_name in CITIES:
city = City(city_name)
# In our case, the AOI is stored in an UTM CRS
# Osmium requires a GeoJSON file in lat/lon coordinates
aoi = shape(city.aoi)
aoi = reproject_geom(aoi, city.epsg, 4326)
bbox = '{},{},{},{}'.format(*aoi.bounds)
history_dump = os.path.join(city.intermediary_dir, 'osm', 'history', 'dump.osh.pbf')
os.makedirs(os.path.dirname(history_dump), exist_ok=True)
if not os.path.isfile(history_dump):
subprocess.run([
'osmium', 'extract', '-b', bbox,
'--with-history', '--overwrite',
'-o', history_dump, africa_history_f
])
#progress.update(1)
#progress.close()
```
### Filtering by year and by tags
We are looking for informations related to various OSM objects: roads, buildings footprints, and potentially non-built polygons (i.e. leisure, landuse, and natural). The objective is to produce one data file for each object of interest at multiple years and for each case study. The processing makes use of three different `osmium` commands. Firstly, yearly temporal extracts are produced for each case study using the `osmium time-filter` command. Then, individual data files are extracted for each object of interest using the `osmium tags-filter` command. Finally, the data files are converted to the GEOJSON format using the `osmium export` command.
```
time = '{year}-01-01T00:00:00Z'
progress = tqdm(total=len(CITIES) * len(YEARS) * len(KEYS))
for city_name in CITIES:
city = City(city_name)
full_dump = os.path.join(city.intermediary_dir, 'osm', 'history', 'dump.osh.pbf')
for year in YEARS:
output_dir = os.path.join(city.intermediary_dir, 'osm', 'history', str(year))
os.makedirs(output_dir, exist_ok=True)
yearly_dump = os.path.join(output_dir, 'dump.osm.pbf')
subprocess.run([
'osmium', 'time-filter', '-O', '-o', yearly_dump,
full_dump, time.format(year=year)
])
for key in KEYS:
filtered_dump = os.path.join(output_dir, '{}.osm.pbf'.format(key))
filt_expression = 'w/{}'.format(key)
subprocess.run([
'osmium', 'tags-filter', '-O', '-o', filtered_dump,
yearly_dump, filt_expression
])
geojson = os.path.join(output_dir, 'dump_{}.geojson'.format(key))
subprocess.run([
'osmium', 'export', '-O', '-o', geojson, filtered_dump
])
# Keep only objects of a given geometry type
# and fix geometries that are not contained by the AOI.
# Also reproject to an UTM CRS
if key == 'highway':
geom_type = 'LineString'
else:
geom_type = 'MultiPolygon'
try:
features = gpd.read_file(geojson)
features = features[features.geom_type == geom_type]
features = features.to_crs(city.crs)
features = features[features.intersects(shape(city.aoi))]
features.to_file(os.path.join(output_dir, key + '.geojson'), driver='GeoJSON')
except:
pass
progress.update(1)
# Remove useless files
for file in os.listdir(output_dir):
if 'osm.pbf' in file or 'dump' in file:
os.remove(os.path.join(output_dir, file))
progress.close()
```
As a result, for each case study and each year between 2011 and 2018, we have the following data files:
- `highway.geojson`
- `building.geojson`
- `landuse.geojson`
- `leisure.geojson`
- `natural.geojson`
## Local analysis
```
def count_and_measure(geojson_file, area_of_interest):
"""Count & measure objects available in a GeoJSON file."""
features = gpd.read_file(geojson_file)
geometries = features.intersection(area_of_interest)
features = features.assign(geometry=geometries)
n = len(features)
if features.type[0] == 'LineString':
s = features.length.sum()
else:
s = features.area.sum()
return n, round(s)
index = pd.MultiIndex.from_product((CITIES, KEYS))
count = pd.DataFrame(index=index, columns=YEARS)
sizes = pd.DataFrame(index=index, columns=YEARS)
#progress = tqdm(total=len(CITIES) * len(YEARS) * len(KEYS))
for city_name in CITIES:
city = City(city_name)
aoi = shape(city.aoi)
for year in YEARS:
data_dir = os.path.join(city.intermediary_dir, 'osm', 'history', str(year))
for key in KEYS:
geojson_path = os.path.join(data_dir, key + '.geojson')
if os.path.isfile(geojson_path):
n, s = count_and_measure(geojson_path, aoi)
count.at[((city_name, key), year)] = n
sizes.at[((city_name, key), year)] = s
#progress.update(1)
#progress.close()
# Replace NaN by 0
count = count.replace(np.nan, 0)
sizes = sizes.replace(np.nan, 0)
# Convert sizes to km and ha
def convert(row):
"""Convert lengths to km and surfaces to ha."""
(city_name, key), data = row
if key == 'highway':
data *= 1e-3
else:
data *= 1e-4
func = partial(round, ndigits=1)
return data.map(func)
for i, row in enumerate(sizes.iterrows()):
sizes.iloc[i] = convert(row)
f, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True)
for key, ax in zip(('highway', 'building', 'landuse'), axes.flat):
evolution = count.xs(key, level=1)
evolution.index = evolution.index.map(prettify)
evolution.T.plot(ax=ax)
ax.plot(evolution.mean(), label='Mean', linestyle=':', color='C3')
ax.legend([])
ax.set_xlim(2011, 2018)
ax.set_title('Number of $\\texttt{{{key}}}$ objects'.format(key=key))
for key in KEYS:
evolution = count.xs(key, level=1).sum()
evolution = evolution / evolution.max()
axes[1][1].plot(evolution, label=key.title())
axes[1][1].set_xlim(2011, 2018)
axes[1][1].set_ylim(0, 1)
axes[1][1].legend(ncol=1, frameon=True)
axes[1][1].set_title('Normalized count for all case studies')
axes.flat[1].legend(ncol=1, frameon=True)
f.tight_layout()
f.savefig('../paper/figures/osm_history_local.pdf')
f.savefig('../paper/figures/osm_history_local.png');
```
## Global analysis
Geographical extracts are available through the [Geofabrik](https://www.geofabrik.de/data/download.html) website at various years (mainly from January 2014 to January 2018). Here, we collect the file size in bytes for various continents or countries from 2014 to 2018 by requesting HTTP headers to the website.
```
CONTINENTS = ['africa', 'antarctica', 'asia', 'australia-oceania', 'central-america', 'europe', 'north-america', 'south-america']
COUNTRIES = ['germany', 'france', 'belgium', 'spain', 'italy', 'mozambique', 'uganda', 'nigeria', 'madagascar', 'namibia']
YEARS = ['2014', '2015', '2016', '2017', '2018']
def get_size(continent, year, country=None):
"""Get size in bytes of the OSM file of a given continent for a given year.
Request HTTP headers from the geofabrik.de website to get the file size in bytes.
"""
year = str(year)[-2:]
if country:
url = 'http://download.geofabrik.de/{}/{}-{}0101.osm.pbf'.format(continent, country, year)
else:
url = 'http://download.geofabrik.de/{}-{}0101.osm.pbf'.format(continent, year)
r = requests.head(url)
if r.status_code == 200:
size = r.headers['Content-Length']
return int(size)
else:
return None
size_per_continent = pd.DataFrame(index=CONTINENTS, columns=YEARS)
size_per_country = pd.Series(index=COUNTRIES)
for continent, year in product(CONTINENTS, YEARS):
size = get_size(continent, year)
size_per_continent.at[(continent, year)] = size
for country in COUNTRIES:
size = get_size('africa', 2018, country)
if not size:
size = get_size('europe', 2018, country)
size_per_country.loc[country] = size
```
Convert Bytes to Gigabytes:
```
size_per_continent *= 1e-9
size_per_country *= 1e-9
func = partial(round, ndigits=2)
size_per_continent = size_per_continent.applymap(func)
size_per_country = size_per_country.map(func)
size_per_continent
size_per_country
for c in ('antarctica', 'central-america'):
if c in size_per_continent.index:
size_per_continent.drop(c, axis=0, inplace=True)
f, ax = plt.subplots(figsize=(5,5))
for continent in size_per_continent.index:
evolution = size_per_continent.loc[continent]
ax.plot(evolution, label=continent.title())
ax.set_yscale('log')
ax.set_xlim(2014, 2018)
ax.set_ylabel('Gigabytes')
for line in ax.lines:
y = line.get_ydata()[-1]
label = line.get_label()
annot = ax.annotate(label, xy=(1, y), xytext=(5, 0), xycoords=ax.get_yaxis_transform(), textcoords='offset points', va='center')
f.tight_layout();
f.savefig('../paper/figures/osm_per_continent.pdf', bbox_inches='tight')
f.savefig('../paper/figures/osm_per_continent.png', bbox_inches='tight');
```
Save dataframes to disk:
```
output_dir = os.path.abspath('../data/output')
os.makedirs(output_dir, exist_ok=True)
output_f = os.path.join(output_dir, 'osm_evolution_continent.csv')
size_per_continent.to_csv(output_f)
output_f = os.path.join(output_dir, 'osm_evolution_city.csv')
sizes.to_csv(output_f)
```
| github_jupyter |
## Set up the dependencies
```
# for reading and validating data
import emeval.input.spec_details as eisd
import emeval.input.phone_view as eipv
import emeval.input.eval_view as eiev
# Visualization helpers
import emeval.viz.phone_view as ezpv
import emeval.viz.eval_view as ezev
# Analytics results
import emeval.metrics.segmentation as ems
# For plots
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
%matplotlib inline
import IPython.display as ipyd
# For maps
import folium
import branca.element as bre
# For easier debugging while working on modules
import importlib
import pandas as pd
pd.options.display.float_format = '{:.6f}'.format
import arrow
import numpy as np
THIRTY_MINUTES = 30 * 60
TEN_MINUTES = 10 * 60
```
## The spec
The spec defines what experiments were done, and over which time ranges. Once the experiment is complete, most of the structure is read back from the data, but we use the spec to validate that it all worked correctly. The spec also contains the ground truth for the legs. Here, we read the spec for the trip to UC Berkeley.
```
DATASTORE_URL = "http://cardshark.cs.berkeley.edu"
AUTHOR_EMAIL = "shankari@eecs.berkeley.edu"
sd_la = eisd.SpecDetails(DATASTORE_URL, AUTHOR_EMAIL, "unimodal_trip_car_bike_mtv_la")
sd_sj = eisd.SpecDetails(DATASTORE_URL, AUTHOR_EMAIL, "car_scooter_brex_san_jose")
sd_ucb = eisd.SpecDetails(DATASTORE_URL, AUTHOR_EMAIL, "train_bus_ebike_mtv_ucb")
```
## The views
There are two main views for the data - the phone view and the evaluation view.
### Phone view
In the phone view, the phone is primary, and then there is a tree that you can traverse to get the data that you want. Traversing that tree typically involves nested for loops; here's an example of loading the phone view and traversing it. You can replace the print statements with real code. When you are ready to check this in, please move the function to one of the python modules so that we can invoke it more generally
```
importlib.reload(eipv)
pv_la = eipv.PhoneView(sd_la)
pv_sj = eipv.PhoneView(sd_sj)
pv_ucb = eipv.PhoneView(sd_ucb)
ANDROID_MODE_MAP = {0: "AUTOMOTIVE", 1: "CYCLING", 2: "WALKING", 3: "STATIONARY"}
ANDROID_MAP_FN = lambda t: ANDROID_MODE_MAP[t["zzbhB"]]
def IOS_MAP_FN(t):
t_series = pd.Series(t)
all_true = t_series[t_series == True].index.tolist()
if len(all_true) == 1:
return all_true[0].upper()
else:
# Do something more sophisticated here?
return "INVALID"
MAP_FNS = {"android": ANDROID_MAP_FN, "ios": IOS_MAP_FN}
TRANSITION_FNS = {"android": ems.get_transition_mask_android, "ios": ems.get_transition_mask_ios}
importlib.reload(ems)
ems.fill_sensed_section_ranges(pv_la)
ems.fill_sensed_section_ranges(pv_sj)
ems.fill_sensed_section_ranges(pv_ucb)
importlib.reload(ems)
BASE_MODE = {"WALKING": "WALKING", "BICYCLING": "CYCLING", "ESCOOTER": "CYCLING", "BUS": "AUTOMOTIVE", "TRAIN": "AUTOMOTIVE", "LIGHT_RAIL": "AUTOMOTIVE", "SUBWAY": "AUTOMOTIVE", "CAR": "AUTOMOTIVE"}
def get_tradeoff_entries(pv):
tradeoff_entry_list = []
for phone_os, phone_map in pv.map().items():
print(15 * "=*")
print(phone_os, phone_map.keys())
for phone_label, phone_detail_map in phone_map.items():
print(4 * ' ', 15 * "-*")
print(4 * ' ', phone_label, phone_detail_map.keys())
if "control" in phone_detail_map["role"]:
print("Ignoring %s phone %s since they are always on" % (phone_detail_map["role"], phone_label))
continue
# this spec does not have any calibration ranges, but evaluation ranges are actually cooler
for r in phone_detail_map["evaluation_ranges"]:
print(8 * ' ', 30 * "=")
print(8 * ' ',r.keys())
print(8 * ' ',r["trip_id"], r["eval_common_trip_id"], r["eval_role"], len(r["evaluation_trip_ranges"]))
bcs = r["battery_df"]["battery_level_pct"]
delta_battery = bcs.iloc[0] - bcs.iloc[-1]
print("Battery starts at %d, ends at %d, drain = %d" % (bcs.iloc[0], bcs.iloc[-1], delta_battery))
for tr in r["evaluation_trip_ranges"]:
matching_section_map = ems.find_matching_segments(tr["evaluation_section_ranges"],
"trip_id", tr["sensed_section_ranges"])
print("For trip %s, found matching ranges %s" % (tr["trip_id"], matching_section_map))
for section in tr["evaluation_section_ranges"]:
section_gt_leg = pv.spec_details.get_ground_truth_for_leg(tr["trip_id_base"],
section["trip_id_base"])
if section_gt_leg["type"] == "WAITING":
print("Skipping WAITING section %s %s with potential partway transitions" %
(tr["trip_id"], section["trip_id"]))
continue
result = ems.get_mode_check_results(section, section_gt_leg, matching_section_map)
tradeoff_entry = {"phone_os": phone_os, "phone_label": phone_label,
"timeline": pv.spec_details.curr_spec["id"],
"range_id": r["trip_id"],
"run": r["trip_run"], "duration": r["duration"],
"role": r["eval_role_base"], "battery_drain": delta_battery,
"section_count": len(tr["sensed_section_ranges"]),
"trip_id": tr["trip_id"],
"section_id": section["trip_id"]}
tradeoff_entry.update(result)
tradeoff_entry_list.append(tradeoff_entry)
return tradeoff_entry_list
importlib.reload(ems)
# We are not going to look at battery life at the evaluation trip level; we will end with evaluation range
# since we want to capture the overall drain for the timeline
tradeoff_entries_list = []
tradeoff_entries_list.extend(get_tradeoff_entries(pv_la))
tradeoff_entries_list.extend(get_tradeoff_entries(pv_sj))
tradeoff_entries_list.extend(get_tradeoff_entries(pv_ucb))
tradeoff_df = pd.DataFrame(tradeoff_entries_list)
```
## Add in other entries to the dataframe to allow us to plot better
```
r2q_map = {"power_control": 0, "HAMFDC": 1, "MAHFDC": 2, "HAHFDC": 3, "accuracy_control": 4}
q2r_map = {0: "power", 1: "HAMFDC", 2: "MAHFDC", 3: "HAHFDC", 4: "accuracy"}
# Make a number so that can get the plots to come out in order
tradeoff_df["quality"] = tradeoff_df.role.apply(lambda r: r2q_map[r])
tradeoff_df["gt_duration_mins"] = tradeoff_df.gt_duration // 60
```
## Timeline + section count variations
We should ideally have only one transition in every TRAVEL section
```
tradeoff_df.query("timeline=='unimodal_trip_car_bike_mtv_la' & run == 1 & role == 'HAMFDC'").section_id
tradeoff_df.head()
# tradeoff_df_filtered = tradeoff_df.query("gt_duration > (20*60) & ((section_id != 'commuter_rail_with_tunnels_0' & section_id != 'inner_suburb_downtown_walk_0') | phone_os != 'android')")
tradeoff_df_filtered = tradeoff_df.query("((section_id != 'commuter_rail_with_tunnels_0' & section_id != 'inner_suburb_downtown_walk_0') | phone_os != 'android')")
tradeoff_df_filtered.section_id.unique()
'tt' not in 'tt_city_escooter_city_bus_rapid_transit_0'
ifig, ax_array = plt.subplots(nrows=2,ncols=3,figsize=(9,6), sharex=False, sharey=False)
timeline_list = ["train_bus_ebike_mtv_ucb", "car_scooter_brex_san_jose", "unimodal_trip_car_bike_mtv_la"]
for i, tl in enumerate(timeline_list):
print(len(tradeoff_df_filtered.query("timeline == @tl & phone_os == 'android'")))
tradeoff_df_filtered.query("timeline == @tl & phone_os == 'android'").boxplot(ax = ax_array[0][i], column=["matching_pct"], by=["quality"])
ax_array[0][i].set_title(tl)
print(len(tradeoff_df_filtered.query("timeline == @tl & phone_os == 'ios'")))
tradeoff_df_filtered.query("timeline == @tl & phone_os == 'ios'").boxplot(ax = ax_array[1][i], column=["matching_pct"], by=["quality"])
ax_array[1][i].set_title("")
# tradeoff_df.query("timeline == @tl & phone_os == 'ios'").boxplot(ax = ax_array[2][i], column=["visit_reports"], by=["quality"])
# ax_array[2][i].set_title("")
# print(android_ax_returned.shape, ios_ax_returned.shape)
for i, ax in enumerate(ax_array[0]):
ax.set_xticklabels([q2r_map[int(t.get_text())] for t in ax.get_xticklabels()])
ax.set_xlabel("")
for i, ax in enumerate(ax_array[1]):
ax.set_xticklabels([q2r_map[int(t.get_text())] for t in ax.get_xticklabels()])
ax.set_xlabel("")
# for ax in ax_array[1]:
# ax.set_xticklabels(q2r_ios_list[1:])
# ax.set_xlabel("")
# for ax in ax_array[2]:
# ax.set_xticklabels(q2r_ios_list[1:])
# ax.set_xlabel("")
ax_array[0][0].set_ylabel("Difference in trip counts (android)")
ax_array[1][0].set_ylabel("Difference in trip counts (ios)")
# ax_array[2][0].set_ylabel("Difference in visit reports (ios)")
ifig.suptitle("Section count differences v/s configured quality over multiple timelines")
# ifig.tight_layout()
tradeoff_df_filtered.plot(x="gt_duration", y="matching_pct", kind='scatter')
tradeoff_df_filtered.query("matching_pct > 1").plot(x="gt_duration", y="matching_pct", kind='scatter')
tradeoff_df_filtered.query("matching_pct <= 1").plot(x="gt_duration", y="matching_pct", kind='scatter')
matching_pct_range_list = []
for k, df in tradeoff_df_filtered.groupby("gt_duration_mins"):
print (k, df.matching_pct.mean(), df.matching_pct.min(), df.matching_pct.max())
matching_pct_range_list.append({"gt_duration_mins": k, "mean": df.matching_pct.mean(), "min": df.matching_pct.min(), "max": df.matching_pct.max()})
matching_pct_range_df = pd.DataFrame(matching_pct_range_list)
ifig, ax = plt.subplots(1,1, figsize=(4,4), squeeze=True)
ax.errorbar(matching_pct_range_df.gt_duration_mins, y=matching_pct_range_df["mean"], yerr = [matching_pct_range_df["mean"] - matching_pct_range_df["min"],
matching_pct_range_df["max"] - matching_pct_range_df["mean"]])
matching_pct_range_df_filtered = matching_pct_range_df.query('gt_duration_mins > 10')
ifig, ax = plt.subplots(1,1, figsize=(4,4), squeeze=True)
ax.errorbar(matching_pct_range_df_filtered.gt_duration_mins, y=matching_pct_range_df_filtered["mean"], yerr = [matching_pct_range_df_filtered["mean"] - matching_pct_range_df_filtered["min"],
matching_pct_range_df_filtered["max"] - matching_pct_range_df_filtered["mean"]])
np.array(list(zip(np.repeat([1], 37), np.repeat([10], 37)))).shape
tradeoff_df_filtered.sort_values(by="matching_pct", ascending=False)
tradeoff_df.matching_pct.min(), tradeoff_df.matching_pct.max()
out_of_battery_phones = tradeoff_df.query("timeline=='train_bus_ebike_mtv_ucb' & role=='HAHFDC' & trip_id=='berkeley_to_mtv_SF_express_bus_0' & phone_os == 'android'")
for i in out_of_battery_phones.index:
tradeoff_df.loc[i,"end_diff_mins"] = float('nan')
```
### Anomaly checks
We can clearly see that there are several outliers with the start/end timestamps for the sections. Let us explore these in greater detail and see if we can find any patterns.
```
fmt = lambda ts: arrow.get(ts).to("America/Los_Angeles")
def check_outlier(eval_range, trip_idx, section_id, base_mode):
eval_trip = eval_range["evaluation_trip_ranges"][trip_idx]
eval_range["motion_activity_df"]["fmt_time"] = eval_range["motion_activity_df"].ts.apply(lambda ts: fmt(ts))
eval_trip["motion_activity_df"]["fmt_time"] = eval_trip["motion_activity_df"].ts.apply(lambda ts: fmt(ts))
eval_section = [s for s in eval_trip["evaluation_section_ranges"] if s["trip_id"] == section_id][0]
print(fmt(eval_section["start_ts"]), "->", fmt(eval_section["end_ts"]))
print([(fmt(ssr["start_ts"]), fmt(ssr["end_ts"]), ssr["mode"]) for ssr in eval_trip["sensed_section_ranges"]])
matching_section_map = ems.find_matching_segments(eval_trip["evaluation_section_ranges"], "trip_id", eval_trip["sensed_section_ranges"])
sensed_section_range = matching_section_map[section_id]["match"]
print([(fmt(cm["start_ts"]), fmt(cm["end_ts"]), cm["mode"]) for cm in sensed_section_range])
matching_sections = [s for s in sensed_section_range if s["mode"] == base_mode]
print("For %s (%s -> %s) %s, matching_sections = %s" %
(eval_section["trip_id"], eval_section["start_ts"], eval_section["end_ts"], base_mode,
matching_sections))
matching_ts = sum([(s["end_ts"] - s["start_ts"]) for s in matching_sections])
print("matching_ts = %s, ground_truth ts = %s" % (matching_ts, (eval_section["end_ts"] - eval_section["start_ts"])))
matching_pct = matching_ts / (eval_section["end_ts"] - eval_section["start_ts"])
print(matching_pct)
print("section activity head")
ipyd.display(eval_section["motion_activity_df"].head(n=3))
print("section activity tail")
ipyd.display(eval_section["motion_activity_df"].tail(n=3))
section_end_ts = eval_section["end_ts"]
print("post-section end activity head")
ipyd.display(eval_range["motion_activity_df"].query("@section_end_ts <= ts <= @section_end_ts + 30 * 60").head())
def check_outlier_expanded(eval_range, trip_idx, section_id, base_mode):
eval_trip = eval_range["evaluation_trip_ranges"][trip_idx]
eval_range["motion_activity_df"]["fmt_time"] = eval_range["motion_activity_df"].ts.apply(lambda ts: fmt(ts))
eval_trip["motion_activity_df"]["fmt_time"] = eval_trip["motion_activity_df"].ts.apply(lambda ts: fmt(ts))
eval_section = [s for s in eval_trip["evaluation_section_ranges"] if s["trip_id"] == section_id][0]
print(fmt(eval_section["start_ts"]), "->", fmt(eval_section["end_ts"]))
print([(fmt(ssr["start_ts"]), fmt(ssr["end_ts"]), ssr["mode"]) for ssr in eval_trip["sensed_section_ranges"]])
trip_ma_df = eval_trip["motion_activity_df"]
# we may get some transitions after the trip ends
# let's expand the activity range to account for that
trip_end_ts = eval_trip["end_ts"]
extended_ma_df = eval_range["motion_activity_df"].query("@trip_end_ts <= ts <= @trip_end_ts + 30 * 60")
ma_df = pd.concat([trip_ma_df, extended_ma_df],
axis="index")
curr_trip_section_transitions = ems.find_section_transitions(
ma_df.query(ems.VALID_QUERIES_NO_STILL["android"]), ems.TRANSITION_FNS["android"])
ipyd.display(curr_trip_section_transitions)
last_section = eval_trip["evaluation_section_ranges"][-1]
last_section_gt = pv_ucb.spec_details.get_ground_truth_for_leg(eval_trip["trip_id_base"], last_section["trip_id_base"])
if last_section_gt["mode"] == "WALKING":
# For trip that end in walking, we need to include still transitions as valid
# otherwise, there is no end transition from walking to a valid mode
if len(curr_trip_section_transitions) > 0:
curr_last_transition_ts = curr_trip_section_transitions.iloc[-1].ts
else:
curr_last_transition_ts = 0
print("Trip ending in walking found, checking for any final still transitions > %s" % curr_last_transition_ts)
still_section_transitions = extended_ma_df.query("ts > @curr_last_transition_ts").query(ems.STILL_ENTRIES["android"])
if len(still_section_transitions) > 0:
curr_trip_section_transitions = curr_trip_section_transitions.append(still_section_transitions.iloc[0])
ipyd.display(curr_trip_section_transitions)
matching_section_map = ems.find_matching_segments(eval_trip["evaluation_section_ranges"], "trip_id", eval_trip["sensed_section_ranges"])
sensed_section_range = matching_section_map[section_id]["match"]
print([(fmt(cm["start_ts"]), fmt(cm["end_ts"]), cm["mode"]) for cm in sensed_section_range])
matching_sections = [s for s in sensed_section_range if s["mode"] == base_mode]
print("For %s (%s -> %s) %s, matching_sections = %s" %
(eval_section["trip_id"], eval_section["start_ts"], eval_section["end_ts"], base_mode,
matching_sections))
matching_ts = sum([(s["end_ts"] - s["start_ts"]) for s in matching_sections])
print("matching_ts = %s, ground_truth ts = %s" % (matching_ts, (eval_section["end_ts"] - eval_section["start_ts"])))
matching_pct = matching_ts / (eval_section["end_ts"] - eval_section["start_ts"])
print(matching_pct)
print("section activity head")
ipyd.display(eval_section["motion_activity_df"].head(n=3))
print("section activity tail")
ipyd.display(eval_section["motion_activity_df"].tail(n=3))
section_end_ts = eval_section["end_ts"]
print("post-section end activity head")
ipyd.display(eval_range["motion_activity_df"].query("@section_end_ts <= ts <= @section_end_ts + 30 * 60").head())
```
#### sections which have matching pct > 1
This is mainly caused by
- most of the highest values are from `walk_start` and `walk_end`. This is because we end up matching them with sections that correspond to the entire trip and not just the transitions. For e.g. `walk_end` is from `19:20:31 -> 19:20:57` but it matches the section from `19:01:53 -> 19:27:21` because it is all WALKING.
- looking at longer sections, the first "real" section is `walk to the bikeshare location_0`. Again, it was from `16:37:07 -> 2019-07-24T16:41:54` but we matched the entire `WALKING` range of `16:38:36 -> 17:21:13`
```
tradeoff_df.query("matching_pct > 0").sort_values(by="matching_pct")
check_outlier(pv_la.map()['ios']['ucb-sdb-ios-3']["evaluation_ranges"][0], 1, "walk_end_0", "WALKING")
check_outlier(pv_ucb.map()['ios']['ucb-sdb-ios-3']["evaluation_ranges"][0], 2, "walk to the bikeshare location_0", "WALKING")
```
#### sections which have 0 matching_pct
- suburb_city_driving_weekend_0: matches a walking trip, no motion activity until tracking actually stops. GT trip end for the `walk_start` section is `17:40:03`, first point in the motion activity df for the **range** is at `17:46:39`. The AUTOMOTIVE range GT end is `17:52:26`; the sensed range is from `18:33:45 -> 19:41:13`.
- similarly for `city_escooter`
```
Ground truth = 16:18:07 -> 16:38:14
(<Arrow [2019-07-22T16:11:09.955601-07:00]>, <Arrow [2019-07-22T16:59:30.826229-07:00]>, 'WALKING'
(<Arrow [2019-07-22T16:59:30.826229-07:00]>, <Arrow [2019-07-22T17:01:30.321116-07:00]>, 'AUTOMOTIVE'
(<Arrow [2019-07-22T17:01:30.321116-07:00]>, <Arrow [2019-07-22T17:02:54.217346-07:00]>, 'WALKING'
(<Arrow [2019-07-22T17:02:54.217346-07:00]>, <Arrow [2019-07-22T17:34:33.386226-07:00]>, 'AUTOMOTIVE'
(<Arrow [2019-07-22T17:34:33.386226-07:00]>, <Arrow [2019-07-22T17:46:59.568747-07:00]>, 'WALKING')
```
- for `commuter_rail_with_tunnels_0`
Phone ran out during this section. Need to exclude
- similarly for `inner_suburb_downtown_walk_0`
- for `suburb_city_driving_weekend_0`, classified as `CYCLING`
```
2019-07-27T17:40:03.318182-07:00 -> 2019-07-27T17:52:26.823849-07:00
[(<Arrow [2019-07-27T17:43:45.507000-07:00]>, <Arrow [2019-07-27T17:51:10.151000-07:00]>, 'CYCLING'
(<Arrow [2019-07-27T17:51:10.151000-07:00]>, <Arrow [2019-07-27T17:53:44.761000-07:00]>, 'AUTOMOTIVE')]
```
```
tradeoff_df.query("matching_pct == 0").head()
check_outlier(pv_la.map()['android']['ucb-sdb-android-3']["evaluation_ranges"][0], 0, "walk_start_0", "WALKING")
tradeoff_df.query("matching_pct == 0 & section_id != 'walk_start_0' and section_id != 'walk_end_0' & ((section_id != 'commuter_rail_with_tunnels_0' & section_id != 'inner_suburb_downtown_walk_0') | phone_os != 'android')")
check_outlier(pv_sj.map()['ios']['ucb-sdb-ios-3']["evaluation_ranges"][0], 1, "city_escooter_0", "CYCLING")
check_outlier_expanded(pv_ucb.map()['android']['ucb-sdb-android-2']["evaluation_ranges"][0], 2, "commuter_rail_with_tunnels_0", "AUTOMOTIVE")
check_outlier(pv_la.map()['android']['ucb-sdb-android-2']["evaluation_ranges"][0], 0, "suburb_city_driving_weekend_0", "AUTOMOTIVE")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/VHEX-LAB/VHEX-Tech/blob/main/client-iris.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Save/Load models
```
import joblib
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
model = LogisticRegression(random_state=0, max_iter=1000)
model.fit(X, y)
model.predict(X)
X[0]
model.predict([X[0]])
joblib.dump(model, "iris.model")
model = joblib.load("iris.model")
```
# Serve models
```
!pip install flask-ngrok -q
from flask import Flask
from flask_ngrok import run_with_ngrok # To use flask server in colab
app = Flask(__name__)
run_with_ngrok(app)
@app.route("/")
def hello():
return "hello"
app.run() # Use gunicorn in production evironment
from flask import Flask, request
from flask_ngrok import run_with_ngrok # To use flask server in colab
app = Flask(__name__)
run_with_ngrok(app)
@app.route("/", methods=["POST"])
def hello():
data = request.get_json(True)
sepal_length = data["sepal_length"]
sepal_width = data["sepal_width"]
petal_length = data["petal_length"]
petal_width = data["petal_width"]
input = [sepal_length, sepal_width, petal_length, petal_width]
species = model.predict([input])
return str(species[0])
app.run() # Use gunicorn in production evironment
```
# Cache(Single machine level)
```
from flask import Flask, request
from flask_ngrok import run_with_ngrok # To use flask server in colab
cache = {}
app = Flask(__name__)
run_with_ngrok(app)
@app.route("/", methods=["POST"])
def hello():
data = request.get_json(True)
sepal_length = data["sepal_length"]
sepal_width = data["sepal_width"]
petal_length = data["petal_length"]
petal_width = data["petal_width"]
key = (sepal_length, sepal_width, petal_length, petal_width)
if key in cache:
print("cached")
return cache[key]
input = list(key)
species = model.predict([input])
cache[key] = str(species[0])
return str(species[0])
app.run()
```
# High availability(Single machine Level)
```
%%writefile app.py
import joblib
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from flask import Flask, request
from flask_ngrok import run_with_ngrok # To use flask server in colab
model = joblib.load("iris.model")
cache = {}
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def hello():
data = request.get_json(True)
sepal_length = data["sepal_length"]
sepal_width = data["sepal_width"]
petal_length = data["petal_length"]
petal_width = data["petal_width"]
key = (sepal_length, sepal_width, petal_length, petal_width)
if key in cache:
print("cached")
return cache[key]
input = list(key)
species = model.predict([input])
cache[key] = str(species[0])
return str(species[0])
!pip install gunicorn -q
!gunicorn app:app -w 4
!nohup gunicorn app:app -w 4 &
!ps --forest
!curl 127.0.0.1:8000 \
-H 'Context-Type: application/json' \
--data-raw '{"sepal_length": 1.0,"sepal_width": 2.0,"petal_length": 2.0,"petal_width": 1.0}'
!kill 336
```
# BentoML
```
!pip install bentoml -q
%%writefile bento_service.py
import pandas as pd
from bentoml import env, artifacts, api, BentoService
from bentoml.adapters import DataframeInput
from bentoml.frameworks.sklearn import SklearnModelArtifact
@env(infer_pip_packages=True)
@artifacts([SklearnModelArtifact('model')])
class IrisClassifier(BentoService):
@api(input=DataframeInput(), batch=True)
def predict(self, df: pd.DataFrame):
return self.artifacts.model.predict(df)
from bento_service import IrisClassifier
iris_classifier_service = IrisClassifier()
iris_classifier_service.pack('model', model)
iris_classifier_service.predict([[1.1, 2.3, 2.3, 3.4]])
iris_classifier_service.start_dev_server()
!curl 127.0.0.1:5000/predict \
-H 'Context-Type: application/json' \
--data-raw '[{"sepal_length": 1.0,"sepal_width": 2.0,"petal_length": 2.0,"petal_width": 1.0}]'
iris_classifier_service.stop_dev_server()
iris_classifier_service.save()
!bentoml serve IrisClassifier:latest --run-with-ngrok
!bentoml serve-gunicorn IrisClassifier:latest --workers 4
```
| github_jupyter |
```
import tensorflow as tf
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
from glob import glob
import random
import numpy as np
import IPython.display as ipd
np.seterr(all='raise')
files = glob('../youtube/clean-wav/*.wav')
random.shuffle(files)
len(files)
noises = glob('../noise-44k/noise/*.wav') + glob('../noise-44k/clean-wav/*.wav')
basses = glob('HHDS/Sources/**/*bass.wav', recursive = True)
drums = glob('HHDS/Sources/**/*drums.wav', recursive = True)
others = glob('HHDS/Sources/**/*other.wav', recursive = True)
noises = noises + basses + drums + others
random.shuffle(noises)
def read_wav(f):
return malaya_speech.load(f, sr = 44100)
def random_sampling(s, length):
return augmentation.random_sampling(s, sr = 44100, length = length)
def combine_speakers(files, n = 5):
w_samples = random.sample(files, n)
w_samples = [
random_sampling(
read_wav(f)[0],
length = min(
random.randint(20000 // n, 240_000 // n), 100_000 // n
),
)
for f in w_samples
]
y = [w_samples[0]]
left = w_samples[0].copy() * random.uniform(0.5, 1.0)
for i in range(1, n):
right = w_samples[i].copy() * random.uniform(0.5, 1.0)
overlap = random.uniform(0.01, 1.25)
left_len = int(overlap * len(left))
padded_right = np.pad(right, (left_len, 0))
if len(left) > len(padded_right):
padded_right = np.pad(
padded_right, (0, len(left) - len(padded_right))
)
else:
left = np.pad(left, (0, len(padded_right) - len(left)))
y.append(padded_right)
left = left + padded_right
return left, y
def calc(signal, seed, add_uniform = False):
random.seed(seed)
choice = random.randint(0, 12)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain = random.randint(25, 50),
reverberance = random.randint(0, 80),
hf_damping = 10,
room_scale = random.randint(0, 50),
negate = 1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain = random.randint(25, 70),
reverberance = random.randint(0, 80),
hf_damping = 10,
room_scale = random.randint(0, 50),
negate = 0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain = random.randint(5, 30),
reverberance = random.randint(0, 80),
hf_damping = 10,
room_scale = random.randint(0, 50),
negate = random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high = random.randint(25, 70),
min_bass_gain_low = random.randint(5, 30),
reverberance = random.randint(0, 80),
hf_damping = 10,
room_scale = random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance = random.randint(10, 80),
hf_damping = 10,
room_scale = random.randint(10, 90),
)
if choice > 4:
x = signal
if random.random() > 0.7 and add_uniform:
x = augmentation.add_uniform_noise(
x, power = random.uniform(0.005, 0.015)
)
return x
def parallel(f):
y = random_sampling(
read_wav(f)[0], length = random.randint(30000, 100_000)
)
n = combine_speakers(noises, random.randint(1, 20))[0]
seed = random.randint(0, 100_000_000)
y = calc(y, seed)
n = calc(n, seed, True)
combined, noise = augmentation.add_noise(
y, n, factor = random.uniform(0.1, 0.75), return_noise = True
)
return combined, y, noise
r = parallel(files[0])
# sr = 44100
# ipd.Audio(r[0][:10 * sr], rate = sr)
# ipd.Audio(r[1][:10 * sr], rate = sr)
# ipd.Audio(r[2][:10 * sr], rate = sr)
from tqdm import tqdm
results = []
for i in tqdm(range(100)):
try:
results.append(parallel(files[i]))
except:
pass
import pickle
with open('test-set-noise-reduction.pkl', 'wb') as fopen:
pickle.dump(results, fopen)
```
| github_jupyter |
```
"""This area sets up the Jupyter environment.
Please do not modify anything in this cell.
"""
import os
import sys
import time
# Add project to PYTHONPATH for future use
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# Import miscellaneous modules
from IPython.core.display import display, HTML
# Set CSS styling
with open('../admin/custom.css', 'r') as f:
style = """<style>\n{}\n</style>""".format(f.read())
display(HTML(style))
```
# Multivariate Regression with Keras
<div class="alert alert-warning">
In this notebook we will get more familiar with the high-level artificial neural network package [Keras](https://keras.io/) by walking through a multivariate linear regression example.
</div>
## Dataset: Bike-Sharing System
### Background
Public bike-sharing systems are a new generation of traditional bike rentals where the process from membership, rental, and return back of bicycles have become automatic. Through these systems, a user is able to easily rent a bicycle from a particular position and return it back to another position. Currently, there are about 500 bike-sharing systems around the world which are composed of over 500 thousand bicycles. Today, there exist great interest in these systems due to their important role in traffic, environmental, and health issues.
Apart from interesting real-world applications of these kinds of bike-sharing systems, the data being generated by these systems make them desirable for research as well. As opposed to other transport services such as bus or subway, the duration of travel, departure, and arrival position is explicitly recorded. This feature turns bike-sharing into a virtual sensor network that can be used for sensing mobility in a city. Hence, it is expected that significant events in a city could be detected by monitoring these data.
The bike-sharing rental process is highly correlated to environmental and seasonal settings. For instance, weather conditions,
precipitation, day of the week, season, hour of the day, and more can affect rental behaviours. The core dataset is related to a two-year historical log between 2011 and 2012 from the Capital Bikeshare system (Washington D.C., USA) which is publicly available at http://capitalbikeshare.com/system-data. The data was aggregated hourly as well as daily and then combined with weather and seasonal information. Weather information was extracted from http://www.freemeteo.com.
We have already standardised some of the features, i.e. zero mean and unit variance.
### Task: Regression
Predict the hourly bicycle rental count based on the environmental and seasonal settings.
### Dataset Characteristics
`day.csv` - Bike-sharing counts aggregated on a daily basis (731 days)
**Features**:
- instance: record index
- dteday : date
- season : season (1:springer, 2:summer, 3:fall, 4:winter)
- yr : year (0: 2011, 1:2012)
- mnth : month ( 1 to 12)
- holiday : weather day is holiday or not (extracted from http://dchr.dc.gov/page/holiday-schedule)
- weekday : day of the week
- workingday : if day is neither weekend nor holiday is 1, otherwise is 0.
+ weathersit :
- 1: Clear, Few clouds, Partly cloudy, Partly cloudy
- 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
- 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
- 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
- temp : Normalized temperature in Celsius. The values are divided to 41 (max)
- atemp: Normalized feeling temperature in Celsius. The values are divided to 50 (max)
- hum: Normalized humidity. The values are divided to 100 (max)
- windspeed: Normalized wind speed. The values are divided to 67 (max)
- casual: count of casual users
- registered: count of registered users
- cnt: count of total rental bikes including both casual and registered
### License
This dataset was created and preprocessed in:
[1] Fanaee-T, Hadi, and Gama, Joao, "Event labeling combining ensemble detectors and background knowledge", Progress in Artificial Intelligence (2013): pp. 1-15, Springer Berlin Heidelberg, doi:10.1007/s13748-013-0040-3.
## Loading the Data
<div class="alert alert-info">
<strong>In the following code snippets we will:</strong>
<ul>
<li>Load the dataset from a slew of CSV files.</li>
</ul>
</div>
```
# Plots will be show inside the notebook
%matplotlib notebook
import matplotlib.pyplot as plt
# High-level package for creating and training artificial neural networks
import keras
# NumPy is a package for manipulating N-dimensional array objects
import numpy as np
# Pandas is a data analysis package
import pandas as pd
import admin.tools as tools
import problem_unittests as tests
```
Load *features* for training:
```
train_features = tools.load_csv_with_dates('resources/bike_training_features.csv', 'dteday')
```
Load *targets* for training:
```
train_targets = tools.load_csv_with_dates('resources/bike_training_targets.csv', 'dteday')
```
Load *features* for testing:
```
test_features = tools.load_csv_with_dates('resources/bike_test_features.csv', 'dteday')
```
Load *targets* for testing:
```
test_targets = tools.load_csv_with_dates('resources/bike_test_targets.csv', 'dteday')
test_dates = test_targets.index.strftime('%b %d')
print('\n', test_targets.head(n=5))
```
Unpack the Pandas DataFrames to NumPy arrays:
```
# Unpack features
X_train = train_features.values
X_test = test_features.values
# Unpack targets
y_train = train_targets['cnt'].values
y_test = test_targets['cnt'].values
# Record number of inputs and outputs
nb_features = X_train.shape[1]
nb_outputs = 1
```
# Task I: Build the Model
Now, using Keras we will build a multivariate regression model. Remember, these kinds of models can be represented as artifical neural networks, hence why we can implement them using Keras.
<img src="resources/linear-regression-net.png" alt="Linear regression as an artificial neural network" width="300" />
The model, an artificial neural network, will consist of a $d$ dimensional input that is fully- or densely-connected to a single output neuron.
The model will be made using the [Keras functional guide](https://keras.io/getting-started/functional-api-guide/), which allows us to take advantage of a functional API to create complex models with an arbitrary number of input and output neurons. Below is some example code for how to set up a simple model using this API with 32 inputs and 4 outputs:
```python
from keras.models import Model
from keras.layers import Input, Dense
a = Input(shape=(32,))
b = Dense(4)
model = Model(inputs=a, outputs=b)
```
Notice how this is the same setup we used for the previous notebook on linear regression. Make sure to revisit that notebook if you have trouble understanding the basic usage of this API.
<div class="alert alert-success">
**Task**: Build a model using the Keras functional guide for the bike-sharing dataset. Use the following functions to put together your model:
<ul>
<li><a href="https://keras.io/models/model/">Input()</a></li>
<li><a href="https://keras.io/models/model/">Dense()</a></li>
<li><a href="https://keras.io/models/model/">Model()</a></li>
</ul>
It may be helpful to browse other parts of the Keras documentation.
</div>
```
# Import what we need
from keras.layers import (Input, Dense)
from keras.models import Model
def simple_model(nb_inputs, nb_outputs):
"""Return a Keras Model.
"""
model = None
return model
### Do *not* modify the following line ###
# Test and see that the model has been created correctly
tests.test_simple_model(simple_model)
```
# Selecting Hyperparameters
As opposed to standard model parameters, such as the weights in a linear model, hyperparamters are user-specified parameters not learned by the training process, i.e. they are specified *a priori*. In the following section we will look at how we can define and evaluate a few different hyperparameters relevant to our previously defined model. The hyperparameters we will take a look at are:
* Learning rate
* Number of epochs
* Batch size
## Digression: Different Sets of Data
One of the ultimate goals of machine learning is for our models to *generalise well*. That is, we would like the performance of our model on the data we have trained on, i.e. the **in-sample** error, to be representative of the performance of our model on the data we are attempting to model, i.e. the **out-of-sample** error. Unfortunately, for most problems we are unable to test our model on all possible data that we have not trained on. This might be due to difficulties gathering new data or simply because the amount of possible data is very large.
For this reason, we have to settle for a different solution when we want to evaluate our trained models. The go-to solution is to gather a second set of data, in addtion to the training set, called a test set. For the test set to be useful it is important that it is representative of the data we have not trained on. In order words, the error we get on the test set should be close to the out-of-sample error.
Selecting appropriate hyperparameters can be seen as a sort of meta-optimisation task on top of the learning task. Now, we could train a model several times, alter some hyperparameters each time, and record the final performance on the test set, however, this will likely yield errors that are overly optimistic. This is because looking at the test set when making learning choices, i.e. selecting hyperparamters, introduces bias and causes the **estimated out-of-sample** error to diverge from the **true out-of-sample** error. Remember, this is the reason why we have a test set in the first place.
The solution to this problem is to create a third set: the *validation set*. This is typically a partition of the training set, however there exist several *cross validation* methodologies for how to create and use validation sets efficiently. By having this third set we can: (i) use the *training set* to train the trainable model parameters, (ii) use the *validation set* to select hyperparameters, and (iii) use the *test set* to estimate the out-of-sample error. This split ensures that the test set remains unbiased.
## Learning Rate
As we saw in the previous notebook, learning rate is an important parameter that decides how big of a jump we will make during gradient descent-based optimisation when moving in the negative gradient direction.
In order to select a *good* learning rate it is paramount that we track the state of the current error / loss / cost during training after each application of the gradient descent update rule. Below is a cartoon diagram illustrating the *loss* over the course of training. The shape of the error as training progresses can give a good indication as to what constitutes a *good* learning rate.
<img src="resources/learningrates.jpeg" alt="Choice of learning rate" width="400" />
[source](http://cs231n.github.io/neural-networks-3/)
<div class="alert alert-danger">
<strong>Ideally we would want:</strong>
<ul>
<li>Small training error</li>
<li>Little to no overfitting, i.e. *validation* performance measure matches the training performance measure (see figure below)</li>
</ul>
</div>
Validation error refers to the error taken over a validation *set* on the current model.
<img src="resources/validationset.jpeg" alt="Validation set overfitting" width="400" />
[source](http://cs231n.github.io/neural-networks-3/)
## Epochs
In artificial neural network terminology one *epoch* typically means that every example in the training set has been seen once by the learning algorithm. It is generally preferable to track the number of epochs as opposed to the number iterations, i.e. applications of an update rule, because the latter depends on the batch size.
In literature, iteration is *sometimes* used synonymously with epoch.
<div class="alert alert-danger">
<strong>Ideally we would want:</strong>
<ul>
<li>To avoid stopping the training too early</li>
<li>To avoid training for too long</li>
</ul>
</div>
## Batch Size
As we saw in the previous notebook, we typically sum over multiple examples for a single application of an update rule. The number of examples we include is the batch size.
The batch size allows us to control how much memory we need during training because we only need to sample examples for a single batch. This is important for when the entire dataset cannot fit in memory. The important thing to keeep in mind when it comes to batch size is that the smaller the batch size the less accurate the estimate of the gradient over the training set will be. In other words, moves done by the update rule in the space over all trainable parameters become [more noisy](https://stats.stackexchange.com/questions/153531/what-is-batch-size-in-neural-network) the smaller the batch size is.
<div class="alert alert-danger">
<strong>Ideally we would want:</strong>
<ul>
<li>To fit a number of examples in memory</li>
<li>Avoid unnecessary amounts of noise when updating trainable model parameters</li>
</ul>
</div>
## Plotting Error vs. Epoch with Keras
<div class="alert alert-info">
<strong>In the following code snippet we will:</strong>
<ul>
<li>Create a model using the `simple_model()` function we made earlier</li>
<li>Define all of the hyperparameters we will need</li>
<li>Train the network using gradient descent</li>
<li>Plot how the error evolves throughout training</li>
</ul>
</div>
Make sure you understand most of the code below before you continue.
```
"""Do not modify the following code. It is to be used as a refence for future tasks.
"""
# Create a simple model
model = simple_model(nb_features, nb_outputs)
#
# Define hyperparameters
#
lr = 0.2
nb_epochs = 10
batch_size = 10
# Fraction of the training data held as a validation set
validation_split = 0.1
# Define optimiser
optimizer = keras.optimizers.sgd(lr=lr)
# Compile model, use mean squared error
model.compile(loss='mean_squared_error', optimizer=optimizer)
# Print model
model.summary()
# Train and record history
logs = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=nb_epochs,
validation_split=validation_split,
verbose=2)
# Plot the error
fig, ax = plt.subplots(1,1)
pd.DataFrame(logs.history).plot(ax=ax)
ax.grid(linestyle='dotted')
ax.legend()
plt.show()
# Estimation on unseen data can be done using the `predict()` function, e.g.:
_y = model.predict(X_test)
```
### Analysis
* Neither of the errors seem very good
* The training performance (`loss`) does *not* seem to generalise well to the validation set (`val_loss`)
* The training performance (`loss`) does not improve
# Task II: Tuning Hyperparameters
In this task you will get the opportunity to play with the hyperparameters we discussed in the previous section.
<div class="alert alert-success">
**Task**: Tune the following hyperparameters until the `loss` (training error) and `val_loss` (validation error) both converge to low numbers:
<ul>
<li>Learning rate</li>
<li>Number of epochs</li>
<li>Batch size</li>
</ul>
Notice that there is no code for creating the optimiser nor for creating the model in the code below. Take a look in the previous code snippet for how to do this. Remember, it is better to write the missing components down manually rather than copy-pasting them.
</div>
```
# Create a simple model
model = None
#
# Define hyperparameters
#
lr = 0.2
nb_epochs = 10
batch_size = 10
# Fraction of the training data held as a validation set
validation_split = 0.1
# Define optimiser
# Compile model, use mean squared error
### Do *not* modify the following lines ###
# Print model
model.summary()
# Train our network and do live plots of loss
tools.assess_multivariate_model(model, X_train, y_train, X_test, y_test,
test_dates, nb_epochs, batch_size,
validation_split
)
```
# Task III: Adding Regularization
Regularisation is any modification made to a learning algorithm intended to reduce the generalisation error, i.e. the expected value of the error on an unseen example, but not the training error. Typically, this is interpreted as adjusting the complexity of the model by adding a regularisation term, or regulariser to the error function that we minimise:
$$
\begin{equation*}
\min_{h}\sum_{i=1}^{N}E(h(\mathbf{x}_i), y_i) + \lambda R(h)
\end{equation*}
$$
where $h$ is a hypothesis, $E$ is an error function, $R$ is the regularizer, and $\lambda$ is a parameter for controlling the aforementioned regularizer. There are other ways to control the model complexity as well, such as noise injection, data augmentation, and early stopping, but in this notebook we will focus on the type above.
In case you want to review regularization material you can refer to the following material:
* [What is regularization in plain english?](https://stats.stackexchange.com/questions/4961/what-is-regularization-in-plain-english)
* [Recommended video lecture 1](https://www.youtube.com/watch?v=PKXpaLUigA8)
* [Recommended video lecture 2](https://youtu.be/hrIad1RVFV0?t=2237)
## Adding $L^2$ Regularization to Our Model
$L^2$ regularization, otherwise known as weight decay, ridge regression, or Tikhonov regularization, is a popular form of regularization that penalises the norm of the model parameters. This is done by letting $R(h) = \frac{1}{2}\lVert\mathbf{w}\rVert_{2}^{2}$, which drives the weights towards the origin. Any point can be selected, but the origin is a good choice if we do not know the correct value. By multiplying with a factor of $\frac{1}{2}$ we will simplify the gradient of $R(h)$.
<div class="alert alert-success">
**Task**: Build a model using the Keras functional guide for the bike-sharing dataset, however, this time you will have to add $L^2$ regularization. Use the following functions to put together your model:
<ul>
<li><a href="https://keras.io/models/model/">Input()</a></li>
<li><a href="https://keras.io/models/model/">Dense()</a> - Take a look at <a href="https://keras.io/regularizers/">kernel_regularizer</a> for how to regularize the weights of a layer</li>
<li><a href="https://keras.io/models/model/">Model()</a></li>
</ul>
As before, it may be helpful to browse other parts of the Keras documentation.
</div>
```
# Import what we need
from keras import regularizers
def simple_model_l2(nb_inputs, nb_outputs, reg_factor):
"""Return a L2 regularized Keras Model.
"""
model = None
return model
### Do *not* modify the following line ###
# Test and see that the model has been created correctly
tests.test_simple_model_regularized(simple_model_l2)
```
Now, with this model, let's try to optimize the regularization factor $\lambda$. This adjusts the strength of the regularizer.
<div class="alert alert-success">
**Task**: Alter the regularization factor and assess the performance over 100 epochs using a batch size of 128. At a minimum, test out the following regularization strengths:
<ul>
<li> $\lambda = 0.01$</li>
<li> $\lambda = 0.005$</li>
<li> $\lambda = 0.0005$</li>
<li> $\lambda = 0.00005$</li>
</ul>
Similarly to the task where you had to tune hyperparameters, you will have to write down Keras code for creating an optimiser as well as the model. Remember, it is better to write the missing components down manually rather than copy-pasting them.
</div>
```
# Regularization factor (lambda)
reg_factor = 0.005
# Create a simple model
model = None
#
# Define hyperparameters
#
lr = 0.0005
nb_epochs = 100
batch_size = 128
reg_factor = 0.0005
# Fraction of the training data held as a validation set
validation_split = 0.1
# Define optimiser
# Compile model, use mean squared error
### Do *not* modify the following lines ###
# Print model
model.summary()
# Train our network and do live plots of loss
tools.assess_multivariate_model(model, X_train, y_train, X_test, y_test,
test_dates, nb_epochs, batch_size,
validation_split
)
```
### Topics to Think About
* Which of the models above performance better?
* How can we improve the performance even further?
| github_jupyter |
```
###### Set Up #####
# verify our folder with the data and module assets is installed
# if it is installed make sure it is the latest
!test -e ds-assets && cd ds-assets && git pull && cd ..
# if it is not installed clone it
!test ! -e ds-assets && git clone https://github.com/lutzhamel/ds-assets.git
# point to the folder with the assets
home = "ds-assets/assets/"
import sys
sys.path.append(home) # add home folder to module search path
```
# Image Compression: Color Quantization using k-Means
In computer graphics, color quantization or color image quantization is a process that reduces the number of distinct colors used in an image, usually with the intention that the new image should be as visually similar as possible to the original image.
An example image in 24-bit RGB color, <br>
<img src="https://upload.wikimedia.org/wikipedia/commons/e/e3/Dithering_example_undithered.png">
The same image reduced to a palette of 16 colors specifically chosen to best represent the image; the selected palette is shown by the squares on the bottom of the image,<br>
<img src="https://upload.wikimedia.org/wikipedia/en/4/48/Dithering_example_undithered_16color_palette.png">
The palette is chosen using the k-means algorithm in RGB color space,<br>
<img src="https://upload.wikimedia.org/wikipedia/commons/3/3d/Rosa_Gold_Glow_2_small_noblue_color_space.png" height="300" width="300">
(source: [Wikipedia](https://en.wikipedia.org/wiki/Color_quantization))
# An Example
Perform a pixel-wise Vector Quantization (VQ) of an image of Yellowstone, reducing the number of colors required to show the image from 96,615
unique colors to 16, while preserving the overall appearance quality as much as possible.
Based on the [quantization example from SKlearn](https://scikit-learn.org/stable/auto_examples/cluster/plot_color_quantization.html#sphx-glr-auto-examples-cluster-plot-color-quantization-py)
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
# number of colors to use for compression
n_colors = 8
```
## Image Data
We load a JPEG image from our folder. The technique we present here will work with any JPEG image.
```
# load the image
img = plt.imread(home+"yellowstone.jpg")
img.shape
type(img)
# take a look at the top-left, four pixels
img[:4,:4,]
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves well on float data (needs to
# be in the range [0-1])
img = np.array(img, dtype=np.float64) / 255
img[:4,:4,]
# show the image
plt.clf()
plt.axis('off')
plt.title('Yellowstone')
plt.imshow(img)
plt.show()
```
## Training Data
Turn the image into training data for our k-means algorithm. We need to transform the 3D image structure into a 2D training dataset.
```
# transform 3D image structure to a 2D numpy array for training the k-means model
# by listing all pixels as a continuous list of color vectors
w, h, d = img.shape
assert d == 3
# the array is now a list of color values: list of points/pixels in RGB space.
pixel_array = np.reshape(img, (w * h, 3))
pixel_array[:16,]
len(pixel_array)
```
Our image has more than a quarter million pixels. This is way too much data to train our k-means algorithm. We sample the pixels to create our training data.
```
# Fitting k-means model on a 1% sub-sample of the data for speed reasons
assert(len(pixel_array) > 100)
pixel_array_sample = shuffle(pixel_array, random_state=0)[:len(pixel_array)//100]
pixel_array_hex = np.array([to_hex(pixel_array_sample[i]) for i in range(len(pixel_array_sample))])
# plot the colors of training data - plot training RGB space
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(pixel_array_sample[:,0], pixel_array_sample[:,1], pixel_array_sample[:,2],c=pixel_array_hex);
plt.show()
```
### Model Building
```
# train the model with n_colors
model = KMeans(n_clusters=n_colors, random_state=0)
model.fit(pixel_array_sample)
# the cluster centers now represent the new colors
compressed_colors = model.cluster_centers_
compressed_colors
compressed_colors_hex = np.array([to_hex(compressed_colors[i]) for i in range(len(compressed_colors))])
# plot the colors of the k-means model
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(compressed_colors[:,0], compressed_colors[:,1], compressed_colors[:,2],c=compressed_colors_hex);
plt.show()
```
### Build Compressed Image
```
# map each pixel in the image into a color cluster
new_color_ix = model.predict(pixel_array)
# show a sample of the label/centroid ids
shuffle(new_color_ix)[:32]
# create a new pixel array based on the new colors
new_pixel_array = np.zeros((w*h,3))
for i in range(w*h):
new_pixel_array[i] = compressed_colors[new_color_ix[i]]
# reshape the new pixel array into a new image with the same dimensions as the original image.
new_img = np.reshape(new_pixel_array,(w,h,3))
```
### Image Comparison
Not only do we show the original and the compressed image but we also save them to files so we can look at the effect that color quantization has on image file size.
```
# original image
plt.clf()
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(img)
plt.savefig("original.png")
plt.show()
# quantized image
plt.clf()
plt.axis('off')
plt.title('Quantized image ({} colors, K-Means)'.format(n_colors))
plt.imshow(new_img)
plt.savefig("compressed.png")
plt.show()
```
The sizes of the files are shown in KBytes.
```
!ls -s -k original.png
!ls -s -k compressed.png
```
| github_jupyter |
# Mall Customers Clustering Analysis
> Learn about K-means clustering analysis
- toc: true
- badges: true
- comments: true
- categories: [clustering]
- image: images/mall-customer.jpg
**Installing the Libraries**
```
# for basic mathematics operation
import numpy as np
import pandas as pd
from pandas import plotting
# for visualizations
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('fivethirtyeight')
# for interactive visualizations
import plotly.offline as py
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
from plotly import tools
init_notebook_mode(connected = True)
import plotly.figure_factory as ff
```
**Reading the Dataset**
```
# importing the dataset
data = pd.read_csv('data/Mall_Customers.csv')
data
```
Alright, we got the data to work with, now let's check for the dataset columns to check what kind of data type they are.
```
data.info()
```
Looks like we do not have any missing values in the rows, but we can still check for it using isnull() method from pandas dataframe
```
# checking if there is any NULL data
data.isnull().any().any()
```
## Data Visualization
Let's start some EDA (Explanatory Data Analysis) by graphing the data and looking for patterns. First, I want to take a look at the distributions of age and annual income to see if we can find any interesting pattern.
```
import warnings
warnings.filterwarnings('ignore')
plt.rcParams['figure.figsize'] = (18, 8)
plt.subplot(1, 2, 1)
sns.set(style = 'whitegrid')
sns.distplot(data['Annual Income (k$)'])
plt.title('Distribution of Annual Income', fontsize = 20)
plt.xlabel('Range of Annual Income ($ k)')
plt.ylabel('Count')
plt.subplot(1, 2, 2)
sns.set(style = 'whitegrid')
sns.distplot(data['Age'], color = 'red')
plt.title('Distribution of Age', fontsize = 20)
plt.xlabel('Range of Age')
plt.ylabel('Count')
plt.show()
```
> Taking inference about annual income
>> There are few people who earn more than 100 k US Dollars.
>> Most of the people have an earning of around 50-75 k US Dollars.
>> Also, we can say that the least annual income is around 20 k US Dollars.
> Taking inferences about the Customers.
>> The most regular customers for the Mall has age around 30-35 years of age.
>> Whereas the the senior citizens age group is the least frequent visitor in the Mall.
>> Youngsters are lesser in number as compared to the Middle aged people.
We could also take a look at the gender distrubution.
```
labels = ['Female', 'Male']
size = data['Gender'].value_counts()
colors = ['lightgreen', 'orange']
explode = [0, 0.1]
plt.rcParams['figure.figsize'] = (9, 9)
plt.pie(size, colors = colors, explode = explode, labels = labels, shadow = True, autopct = '%.2f%%')
plt.title('Gender', fontsize = 20)
plt.axis('off')
plt.legend()
plt.show()
```
> By looking at the above pie chart which explains about the distribution of Gender in the Mall
>> Interestingly, The Females are in the lead with a share of 56% whereas the Males have a share of 44%, that's a huge gap specially when the population of Males is comparatively higher than Females.
Let's do more distribution on the Age, Annual Income and Spending Score.
```
plt.rcParams['figure.figsize'] = (15, 8)
sns.countplot(data['Age'], palette = 'hsv')
plt.title('Distribution of Age', fontsize = 20)
plt.show()
```
> This Graph shows a more Interactive Chart about the distribution of each Age Group in the Mall for more clariy about the Visitor's Age Group in the Mall.
>> By looking at the above graph-, It can be seen that the Ages from 27 to 39 are very much frequent but there is no clear pattern, we can only find some group wise patterns such as the the older age groups are lesser frequent in comparison.
>> Interesting Fact, There are equal no. of Visitors in the Mall for the Agee 18 and 67.
>> People of Age 55, 56, 69, 64 are very less frequent in the Malls.
>> People at Age 32 are the Most Frequent Visitors in the Mall.
```
plt.rcParams['figure.figsize'] = (20, 8)
sns.countplot(data['Annual Income (k$)'], palette = 'rainbow')
plt.title('Distribution of Annual Income', fontsize = 20)
plt.show()
```
> Again, This is also a chart to better explain the Distribution of Each Income level, Interesting there are customers in the mall with a very much comparable freqyuency with their Annual Income ranging from 15 US Dollars to 137K US Dollars.
> There are more Customers in the Mall whoc have their Annual Income as 54k US Dollars or 78 US Dollars.
```
plt.rcParams['figure.figsize'] = (20, 8)
sns.countplot(data['Spending Score (1-100)'], palette = 'copper')
plt.title('Distribution of Spending Score', fontsize = 20)
plt.show()
```
> This is the Most Important Chart in the perspective of Mall, as It is very Important to have some intuition and idea about the Spending Score of the Customers Visiting the Mall.
>> On a general level, we may conclude that most of the Customers have their Spending Score in the range of 35-60.
>> Interesting there are customers having I spending score also, and 99 Spending score also, Which shows that the mall caters to the variety of Customers with Varying needs and requirements available in the Mall.
We can take a look at which variables are correlated using pairplot and heatmaps
```
sns.pairplot(data)
plt.title('Pairplot for the Data', fontsize = 20)
plt.show()
plt.rcParams['figure.figsize'] = (15, 8)
sns.heatmap(data.corr(), cmap = 'Wistia', annot = True)
plt.title('Heatmap for the Data', fontsize = 20)
plt.show()
```
> The Above Graph for Showing the correlation between the different attributes of the Mall Customer Segementation Dataset, This Heat map reflects the most correlated features with Orange Color and least correlated features with yellow color.
>> We can clearly see that these attributes do not have good correlation among them, that's why we will proceed with all of the features.
Let's take a look at each of the variables that are correlated
```
# Gender vs Spendscore
plt.rcParams['figure.figsize'] = (18, 7)
sns.boxenplot(data['Gender'], data['Spending Score (1-100)'], palette = 'Blues')
plt.title('Gender vs Spending Score', fontsize = 20)
plt.show()
```
> Bi-variate Analysis between Gender and Spending Score,
>> It is clearly visible that the most of the males have a Spending Score of around 25k US Dollars to 70k US Dollars whereas the Females have a spending score of around 35k US Dollars to 75k US Dollars. which again points to the fact that women are Shopping Leaders.
```
#Gender vs Annual Income
plt.rcParams['figure.figsize'] = (18, 7)
sns.violinplot(data['Gender'], data['Annual Income (k$)'], palette = 'rainbow')
plt.title('Gender vs Spending Score', fontsize = 20)
plt.show()
```
> Again a Bivariate Analysis between the Gender and the Annual Income, to better visualize the Income of the different Genders.
>> There are more number of males who get paid more than females. But, The number of males and females are equal in number when it comes to low annual income.
```
#Annual Income vs Age and Spending Score
x = data['Annual Income (k$)']
y = data['Age']
z = data['Spending Score (1-100)']
sns.lineplot(x, y, color = 'blue')
sns.lineplot(x, z, color = 'pink')
plt.title('Annual Income vs Age and Spending Score', fontsize = 20)
plt.show()
```
> The above Plot Between Annual Income and Age represented by a blue color line, and a plot between Annual Income and the Spending Score represented by a pink color. shows how Age and Spending Varies with Annual Income.
## Clustering Analysis
```
x = data.iloc[:, [3, 4]].values
# let's check the shape of x
print(x.shape)
```
## Kmeans Algorithm
**The Elbow Method to find the No. of Optimal Clusters**
```
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
km = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
km.fit(x)
wcss.append(km.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method', fontsize = 20)
plt.xlabel('No. of Clusters')
plt.ylabel('wcss')
plt.show()
```
**Visualizaing the Clusters**
```
km = KMeans(n_clusters = 5, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_means = km.fit_predict(x)
plt.scatter(x[y_means == 0, 0], x[y_means == 0, 1], s = 100, c = 'pink', label = 'miser')
plt.scatter(x[y_means == 1, 0], x[y_means == 1, 1], s = 100, c = 'yellow', label = 'general')
plt.scatter(x[y_means == 2, 0], x[y_means == 2, 1], s = 100, c = 'cyan', label = 'target')
plt.scatter(x[y_means == 3, 0], x[y_means == 3, 1], s = 100, c = 'magenta', label = 'spendthrift')
plt.scatter(x[y_means == 4, 0], x[y_means == 4, 1], s = 100, c = 'orange', label = 'careful')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:, 1], s = 50, c = 'blue' , label = 'centeroid')
plt.style.use('fivethirtyeight')
plt.title('K Means Clustering', fontsize = 20)
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.grid()
plt.show()
```
> This Clustering Analysis gives us a very clear insight about the different segments of the customers in the Mall. There are clearly Five segments of Customers namely Miser, General, Target, Spendthrift, Careful based on their Annual Income and Spending Score which are reportedly the best factors/attributes to determine the segments of a customer in a Mall.
## Hierarchial Clustering
>Hierarchical clustering, also known as hierarchical cluster analysis, is an algorithm that groups similar objects into groups called clusters. The endpoint is a set of clusters, where each cluster is distinct from each other cluster, and the objects within each cluster are broadly similar to each other
**Using Dendrograms to find the no. of Optimal Clusters**
```
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(x, method = 'ward'))
plt.title('Dendrogam', fontsize = 20)
plt.xlabel('Customers')
plt.ylabel('Ecuclidean Distance')
plt.show()
```
**Visualizing the Clusters of Hierarchial Clustering**
```
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')
y_hc = hc.fit_predict(x)
plt.scatter(x[y_hc == 0, 0], x[y_hc == 0, 1], s = 100, c = 'pink', label = 'miser')
plt.scatter(x[y_hc == 1, 0], x[y_hc == 1, 1], s = 100, c = 'yellow', label = 'general')
plt.scatter(x[y_hc == 2, 0], x[y_hc == 2, 1], s = 100, c = 'cyan', label = 'target')
plt.scatter(x[y_hc == 3, 0], x[y_hc == 3, 1], s = 100, c = 'magenta', label = 'spendthrift')
plt.scatter(x[y_hc == 4, 0], x[y_hc == 4, 1], s = 100, c = 'orange', label = 'careful')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:, 1], s = 50, c = 'blue' , label = 'centeroid')
plt.style.use('fivethirtyeight')
plt.title('Hierarchial Clustering', fontsize = 20)
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.grid()
plt.show()
```
**Clusters of Customers Based on their Ages**
```
x = data.iloc[:, [2, 4]].values
x.shape
```
**K-means Algorithm**
```
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
plt.rcParams['figure.figsize'] = (15, 5)
plt.plot(range(1, 11), wcss)
plt.title('K-Means Clustering(The Elbow Method)', fontsize = 20)
plt.xlabel('Age')
plt.ylabel('Count')
plt.grid()
plt.show()
kmeans = KMeans(n_clusters = 4, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
ymeans = kmeans.fit_predict(x)
plt.rcParams['figure.figsize'] = (10, 10)
plt.title('Cluster of Ages', fontsize = 30)
plt.scatter(x[ymeans == 0, 0], x[ymeans == 0, 1], s = 100, c = 'pink', label = 'Usual Customers' )
plt.scatter(x[ymeans == 1, 0], x[ymeans == 1, 1], s = 100, c = 'orange', label = 'Priority Customers')
plt.scatter(x[ymeans == 2, 0], x[ymeans == 2, 1], s = 100, c = 'lightgreen', label = 'Target Customers(Young)')
plt.scatter(x[ymeans == 3, 0], x[ymeans == 3, 1], s = 100, c = 'red', label = 'Target Customers(Old)')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 50, c = 'black')
plt.style.use('fivethirtyeight')
plt.xlabel('Age')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.grid()
plt.show()
```
>According to my own intuition by looking at the above clustering plot between the age of the customers and their corresponding spending scores, I have aggregated them into 4 different categories namely Usual Customers, Priority Customers, Senior Citizen Target Customers, Young Target Customers. Then after getting the results we can accordingly make different marketing strategies and policies to optimize the spending scores of the customer in the Mall.
This below code is just to display 3D graph of the final findings
```
x = data[['Age', 'Spending Score (1-100)', 'Annual Income (k$)']].values
km = KMeans(n_clusters = 5, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
km.fit(x)
labels = km.labels_
centroids = km.cluster_centers_
data['labels'] = labels
trace1 = go.Scatter3d(
x= data['Age'],
y= data['Spending Score (1-100)'],
z= data['Annual Income (k$)'],
mode='markers',
marker=dict(
color = data['labels'],
size= 10,
line=dict(
color= data['labels'],
width= 12
),
opacity=0.8
)
)
df = [trace1]
layout = go.Layout(
title = 'Character vs Gender vs Alive or not',
margin=dict(
l=0,
r=0,
b=0,
t=0
),
scene = dict(
xaxis = dict(title = 'Age'),
yaxis = dict(title = 'Spending Score'),
zaxis = dict(title = 'Annual Income')
)
)
fig = go.Figure(data = df, layout = layout)
py.iplot(fig)
```
| github_jupyter |
```
from __future__ import absolute_import, division, print_function
import pandas as pd
import os
import sys
import datetime
# import data analysis modules
import openbadge_analysis as ob
import openbadge_analysis.core
# Bokeh
from bokeh.io import output_notebook
from bokeh.charts import show
import openbadge_analysis.visualization.contribution as contribution
import openbadge_analysis.visualization.participation as participation
import openbadge_analysis.visualization.participation_interactive as participation_interactive
output_notebook()
# Matplotlib for additional customization
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set(style="whitegrid")
log_file_path = '../data/meeting_data/sample_meeting.txt'
# Generate names for badge (or you can enter manually)
meeting_name = "Sample"
# Read data
df_meeting = ob.core.sample2data(log_file_path)
# Create speaking intervals
df_stitched = ob.core.make_df_stitched(df_meeting) # Data for speaking intervals
print("Meeting data:")
print(df_meeting.head())
print("\n\n")
print("Stitched data:")
print(df_stitched.head())
# Generate names for badge (or you can enter manually)
member_keys = list(df_stitched.columns.values)
member_names = {x[1]:'member_'+str(x[0]) for x in enumerate(member_keys)}
'''
member_names = {
'B7H6NPMKQT': 'Brittany',
'8JB8GZYFVW': 'Shirley',
'TYCN86EZ5F': 'Don',
'TZPQ1AKVD2': 'Aaron',
'9LLE6DZUPU': 'Alexander',
'QNT3BBQ0X1': 'Otto'
}
'''
member_names
```
# Seaborn Examples
Simple examples using Seaborn/Matplotlib
```
# Calculate percentage of speaking time
speaking_time = df_stitched.sum()
speaking_time = speaking_time.to_frame('intervals').reset_index()#.pivot(columns='member',values='intervals')
speaking_intervals_total = speaking_time.intervals.sum()
speaking_time['p'] = speaking_time['intervals']/speaking_intervals_total
# add member name
member_names_df = pd.DataFrame(index=member_names.keys(), data=member_names.values(), columns=['member_name'])
speaking_time = speaking_time.join(member_names_df, on='member')
speaking_time
ax = sns.barplot(x="member_name", y="p", data=speaking_time)
ax.set_xticklabels(labels = ax.get_xticklabels(), rotation=60, ha='right')
```
# Bokeh Examples
More complex examples that uses the Bokeh library. Note that the Bokeh version we use for these examples are quite old, and that newer versions might not support these types of figures.
```
# Generate contribution graph
plot = contribution.contribution_plot(df_stitched, meeting_name, member_names=member_names)
show(plot)
plot = participation.participation_plot(df_stitched_all=[[df_stitched]], labels=[meeting_name],
member_names=member_names)
show(plot)
```
| github_jupyter |
**Important: This notebook will only work with fastai-0.7.x. Do not try to run any fastai-1.x code from this path in the repository because it will load fastai-0.7.x**
# Intro to Random Forests
## About this course
### Teaching approach
This course is being taught by Jeremy Howard, and was developed by Jeremy along with Rachel Thomas. Rachel has been dealing with a life-threatening illness so will not be teaching as originally planned this year.
Jeremy has worked in a number of different areas - feel free to ask about anything that he might be able to help you with at any time, even if not directly related to the current topic:
- Management consultant (McKinsey; AT Kearney)
- Self-funded startup entrepreneur (Fastmail: first consumer synchronized email; Optimal Decisions: first optimized insurance pricing)
- VC-funded startup entrepreneur: (Kaggle; Enlitic: first deep-learning medical company)
I'll be using a *top-down* teaching method, which is different from how most math courses operate. Typically, in a *bottom-up* approach, you first learn all the separate components you will be using, and then you gradually build them up into more complex structures. The problems with this are that students often lose motivation, don't have a sense of the "big picture", and don't know what they'll need.
If you took the fast.ai deep learning course, that is what we used. You can hear more about my teaching philosophy [in this blog post](http://www.fast.ai/2016/10/08/teaching-philosophy/) or [in this talk](https://vimeo.com/214233053).
Harvard Professor David Perkins has a book, [Making Learning Whole](https://www.amazon.com/Making-Learning-Whole-Principles-Transform/dp/0470633719) in which he uses baseball as an analogy. We don't require kids to memorize all the rules of baseball and understand all the technical details before we let them play the game. Rather, they start playing with a just general sense of it, and then gradually learn more rules/details as time goes on.
All that to say, don't worry if you don't understand everything at first! You're not supposed to. We will start using some "black boxes" such as random forests that haven't yet been explained in detail, and then we'll dig into the lower level details later.
To start, focus on what things DO, not what they ARE.
### Your practice
People learn by:
1. **doing** (coding and building)
2. **explaining** what they've learned (by writing or helping others)
Therefore, we suggest that you practice these skills on Kaggle by:
1. Entering competitions (*doing*)
2. Creating Kaggle kernels (*explaining*)
It's OK if you don't get good competition ranks or any kernel votes at first - that's totally normal! Just try to keep improving every day, and you'll see the results over time.
To get better at technical writing, study the top ranked Kaggle kernels from past competitions, and read posts from well-regarded technical bloggers. Some good role models include:
- [Peter Norvig](http://nbviewer.jupyter.org/url/norvig.com/ipython/ProbabilityParadox.ipynb) (more [here](http://norvig.com/ipython/))
- [Stephen Merity](https://smerity.com/articles/2017/deepcoder_and_ai_hype.html)
- [Julia Evans](https://codewords.recurse.com/issues/five/why-do-neural-networks-think-a-panda-is-a-vulture) (more [here](https://jvns.ca/blog/2014/08/12/what-happens-if-you-write-a-tcp-stack-in-python/))
- [Julia Ferraioli](http://blog.juliaferraioli.com/2016/02/exploring-world-using-vision-twilio.html)
- [Edwin Chen](http://blog.echen.me/2014/10/07/moving-beyond-ctr-better-recommendations-through-human-evaluation/)
- [Slav Ivanov](https://blog.slavv.com/picking-an-optimizer-for-style-transfer-86e7b8cba84b) (fast.ai student)
- [Brad Kenstler](https://hackernoon.com/non-artistic-style-transfer-or-how-to-draw-kanye-using-captain-picards-face-c4a50256b814) (fast.ai and USF MSAN student)
### Books
The more familiarity you have with numeric programming in Python, the better. If you're looking to improve in this area, we strongly suggest Wes McKinney's [Python for Data Analysis, 2nd ed](https://www.amazon.com/Python-Data-Analysis-Wrangling-IPython/dp/1491957662/ref=asap_bc?ie=UTF8).
For machine learning with Python, we recommend:
- [Introduction to Machine Learning with Python](https://www.amazon.com/Introduction-Machine-Learning-Andreas-Mueller/dp/1449369413): From one of the scikit-learn authors, which is the main library we'll be using
- [Python Machine Learning: Machine Learning and Deep Learning with Python, scikit-learn, and TensorFlow, 2nd Edition](https://www.amazon.com/Python-Machine-Learning-scikit-learn-TensorFlow/dp/1787125939/ref=dp_ob_title_bk): New version of a very successful book. A lot of the new material however covers deep learning in Tensorflow, which isn't relevant to this course
- [Hands-On Machine Learning with Scikit-Learn and TensorFlow](https://www.amazon.com/Hands-Machine-Learning-Scikit-Learn-TensorFlow/dp/1491962291/ref=pd_lpo_sbs_14_t_0?_encoding=UTF8&psc=1&refRID=MBV2QMFH3EZ6B3YBY40K)
### Syllabus in brief
Depending on time and class interests, we'll cover something like (not necessarily in this order):
- Train vs test
- Effective validation set construction
- Trees and ensembles
- Creating random forests
- Interpreting random forests
- What is ML? Why do we use it?
- What makes a good ML project?
- Structured vs unstructured data
- Examples of failures/mistakes
- Feature engineering
- Domain specific - dates, URLs, text
- Embeddings / latent factors
- Regularized models trained with SGD
- GLMs, Elasticnet, etc (NB: see what James covered)
- Basic neural nets
- PyTorch
- Broadcasting, Matrix Multiplication
- Training loop, backpropagation
- KNN
- CV / bootstrap (Diabetes data set?)
- Ethical considerations
Skip:
- Dimensionality reduction
- Interactions
- Monitoring training
- Collaborative filtering
- Momentum and LR annealing
## Imports
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.imports import *
from fastai.structured import *
from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from IPython.display import display
from sklearn import metrics
PATH = "data/bulldozers/"
!ls {PATH}
```
# Introduction to *Blue Book for Bulldozers*
## About...
### ...our teaching
At fast.ai we have a distinctive [teaching philosophy](http://www.fast.ai/2016/10/08/teaching-philosophy/) of ["the whole game"](https://www.amazon.com/Making-Learning-Whole-Principles-Transform/dp/0470633719/ref=sr_1_1?ie=UTF8&qid=1505094653). This is different from how most traditional math & technical courses are taught, where you have to learn all the individual elements before you can combine them (Harvard professor David Perkins call this *elementitis*), but it is similar to how topics like *driving* and *baseball* are taught. That is, you can start driving without [knowing how an internal combustion engine works](https://medium.com/towards-data-science/thoughts-after-taking-the-deeplearning-ai-courses-8568f132153), and children begin playing baseball before they learn all the formal rules.
### ...our approach to machine learning
Most machine learning courses will throw at you dozens of different algorithms, with a brief technical description of the math behind them, and maybe a toy example. You're left confused by the enormous range of techniques shown and have little practical understanding of how to apply them.
The good news is that modern machine learning can be distilled down to a couple of key techniques that are of very wide applicability. Recent studies have shown that the vast majority of datasets can be best modeled with just two methods:
- *Ensembles of decision trees* (i.e. Random Forests and Gradient Boosting Machines), mainly for structured data (such as you might find in a database table at most companies)
- *Multi-layered neural networks learnt with SGD* (i.e. shallow and/or deep learning), mainly for unstructured data (such as audio, vision, and natural language)
In this course we'll be doing a deep dive into random forests, and simple models learnt with SGD. You'll be learning about gradient boosting and deep learning in part 2.
### ...this dataset
We will be looking at the Blue Book for Bulldozers Kaggle Competition: "The goal of the contest is to predict the sale price of a particular piece of heavy equiment at auction based on it's usage, equipment type, and configuration. The data is sourced from auction result postings and includes information on usage and equipment configurations."
This is a very common type of dataset and prediciton problem, and similar to what you may see in your project or workplace.
### ...Kaggle Competitions
Kaggle is an awesome resource for aspiring data scientists or anyone looking to improve their machine learning skills. There is nothing like being able to get hands-on practice and receiving real-time feedback to help you improve your skills.
Kaggle provides:
1. Interesting data sets
2. Feedback on how you're doing
3. A leader board to see what's good, what's possible, and what's state-of-art.
4. Blog posts by winning contestants share useful tips and techniques.
## The data
### Look at the data
Kaggle provides info about some of the fields of our dataset; on the [Kaggle Data info](https://www.kaggle.com/c/bluebook-for-bulldozers/data) page they say the following:
For this competition, you are predicting the sale price of bulldozers sold at auctions. The data for this competition is split into three parts:
- **Train.csv** is the training set, which contains data through the end of 2011.
- **Valid.csv** is the validation set, which contains data from January 1, 2012 - April 30, 2012. You make predictions on this set throughout the majority of the competition. Your score on this set is used to create the public leaderboard.
- **Test.csv** is the test set, which won't be released until the last week of the competition. It contains data from May 1, 2012 - November 2012. Your score on the test set determines your final rank for the competition.
The key fields are in train.csv are:
- SalesID: the unique identifier of the sale
- MachineID: the unique identifier of a machine. A machine can be sold multiple times
- saleprice: what the machine sold for at auction (only provided in train.csv)
- saledate: the date of the sale
*Question*
What stands out to you from the above description? What needs to be true of our training and validation sets?
```
df_raw = pd.read_csv(f'{PATH}Train.csv', low_memory=False,
parse_dates=["saledate"])
```
In any sort of data science work, it's **important to look at your data**, to make sure you understand the format, how it's stored, what type of values it holds, etc. Even if you've read descriptions about your data, the actual data may not be what you expect.
```
def display_all(df):
with pd.option_context("display.max_rows", 1000, "display.max_columns", 1000):
display(df)
display_all(df_raw.tail().T)
display_all(df_raw.describe(include='all').T)
```
It's important to note what metric is being used for a project. Generally, selecting the metric(s) is an important part of the project setup. However, in this case Kaggle tells us what metric to use: RMSLE (root mean squared log error) between the actual and predicted auction prices. Therefore we take the log of the prices, so that RMSE will give us what we need.
```
df_raw.SalePrice = np.log(df_raw.SalePrice)
```
### Initial processing
```
m = RandomForestRegressor(n_jobs=-1)
# The following code is supposed to fail due to string values in the input data
m.fit(df_raw.drop('SalePrice', axis=1), df_raw.SalePrice)
```
This dataset contains a mix of **continuous** and **categorical** variables.
The following method extracts particular date fields from a complete datetime for the purpose of constructing categoricals. You should always consider this feature extraction step when working with date-time. Without expanding your date-time into these additional fields, you can't capture any trend/cyclical behavior as a function of time at any of these granularities.
```
add_datepart(df_raw, 'saledate')
df_raw.saleYear.head()
```
The categorical variables are currently stored as strings, which is inefficient, and doesn't provide the numeric coding required for a random forest. Therefore we call `train_cats` to convert strings to pandas categories.
```
train_cats(df_raw)
```
We can specify the order to use for categorical variables if we wish:
```
df_raw.UsageBand.cat.categories
df_raw.UsageBand.cat.set_categories(['High', 'Medium', 'Low'], ordered=True, inplace=True)
```
Normally, pandas will continue displaying the text categories, while treating them as numerical data internally. Optionally, we can replace the text categories with numbers, which will make this variable non-categorical, like so:.
```
df_raw.UsageBand = df_raw.UsageBand.cat.codes
```
We're still not quite done - for instance we have lots of missing values, which we can't pass directly to a random forest.
```
display_all(df_raw.isnull().sum().sort_index()/len(df_raw))
```
But let's save this file for now, since it's already in format can we be stored and accessed efficiently.
```
os.makedirs('tmp', exist_ok=True)
df_raw.to_feather('tmp/bulldozers-raw')
```
### Pre-processing
In the future we can simply read it from this fast format.
```
df_raw = pd.read_feather('tmp/bulldozers-raw')
```
We'll replace categories with their numeric codes, handle missing continuous values, and split the dependent variable into a separate variable.
```
df, y, nas = proc_df(df_raw, 'SalePrice')
```
We now have something we can pass to a random forest!
```
m = RandomForestRegressor(n_jobs=-1)
m.fit(df, y)
m.score(df,y)
```
In statistics, the coefficient of determination, denoted R2 or r2 and pronounced "R squared", is the proportion of the variance in the dependent variable that is predictable from the independent variable(s). https://en.wikipedia.org/wiki/Coefficient_of_determination
Wow, an r^2 of 0.98 - that's great, right? Well, perhaps not...
Possibly **the most important idea** in machine learning is that of having separate training & validation data sets. As motivation, suppose you don't divide up your data, but instead use all of it. And suppose you have lots of parameters:
<img src="images/overfitting2.png" alt="" style="width: 70%"/>
<center>
[Underfitting and Overfitting](https://datascience.stackexchange.com/questions/361/when-is-a-model-underfitted)
</center>
The error for the pictured data points is lowest for the model on the far right (the blue curve passes through the red points almost perfectly), yet it's not the best choice. Why is that? If you were to gather some new data points, they most likely would not be on that curve in the graph on the right, but would be closer to the curve in the middle graph.
This illustrates how using all our data can lead to **overfitting**. A validation set helps diagnose this problem.
```
def split_vals(a,n): return a[:n].copy(), a[n:].copy()
n_valid = 12000 # same as Kaggle's test set size
n_trn = len(df)-n_valid
raw_train, raw_valid = split_vals(df_raw, n_trn)
X_train, X_valid = split_vals(df, n_trn)
y_train, y_valid = split_vals(y, n_trn)
X_train.shape, y_train.shape, X_valid.shape
```
# Random Forests
## Base model
Let's try our model again, this time with separate training and validation sets.
```
def rmse(x,y): return math.sqrt(((x-y)**2).mean())
def print_score(m):
res = [rmse(m.predict(X_train), y_train), rmse(m.predict(X_valid), y_valid),
m.score(X_train, y_train), m.score(X_valid, y_valid)]
if hasattr(m, 'oob_score_'): res.append(m.oob_score_)
print(res)
m = RandomForestRegressor(n_jobs=-1)
%time m.fit(X_train, y_train)
print_score(m)
```
An r^2 in the high-80's isn't bad at all (and the RMSLE puts us around rank 100 of 470 on the Kaggle leaderboard), but we can see from the validation set score that we're over-fitting badly. To understand this issue, let's simplify things down to a single small tree.
## Speeding things up
```
df_trn, y_trn, nas = proc_df(df_raw, 'SalePrice', subset=30000, na_dict=nas)
X_train, _ = split_vals(df_trn, 20000)
y_train, _ = split_vals(y_trn, 20000)
m = RandomForestRegressor(n_jobs=-1)
%time m.fit(X_train, y_train)
print_score(m)
```
## Single tree
```
m = RandomForestRegressor(n_estimators=1, max_depth=3, bootstrap=False, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
draw_tree(m.estimators_[0], df_trn, precision=3)
```
Let's see what happens if we create a bigger tree.
```
m = RandomForestRegressor(n_estimators=1, bootstrap=False, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
```
The training set result looks great! But the validation set is worse than our original model. This is why we need to use *bagging* of multiple trees to get more generalizable results.
## Bagging
### Intro to bagging
To learn about bagging in random forests, let's start with our basic model again.
```
m = RandomForestRegressor(n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
```
We'll grab the predictions for each individual tree, and look at one example.
```
preds = np.stack([t.predict(X_valid) for t in m.estimators_])
preds[:,0], np.mean(preds[:,0]), y_valid[0]
preds.shape
plt.plot([metrics.r2_score(y_valid, np.mean(preds[:i+1], axis=0)) for i in range(10)]);
```
The shape of this curve suggests that adding more trees isn't going to help us much. Let's check. (Compare this to our original model on a sample)
```
m = RandomForestRegressor(n_estimators=20, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=40, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=80, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
```
### Out-of-bag (OOB) score
Is our validation set worse than our training set because we're over-fitting, or because the validation set is for a different time period, or a bit of both? With the existing information we've shown, we can't tell. However, random forests have a very clever trick called *out-of-bag (OOB) error* which can handle this (and more!)
The idea is to calculate error on the training set, but only include the trees in the calculation of a row's error where that row was *not* included in training that tree. This allows us to see whether the model is over-fitting, without needing a separate validation set.
This also has the benefit of allowing us to see whether our model generalizes, even if we only have a small amount of data so want to avoid separating some out to create a validation set.
This is as simple as adding one more parameter to our model constructor. We print the OOB error last in our `print_score` function below.
```
m = RandomForestRegressor(n_estimators=40, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
```
This shows that our validation set time difference is making an impact, as is model over-fitting.
## Reducing over-fitting
### Subsampling
It turns out that one of the easiest ways to avoid over-fitting is also one of the best ways to speed up analysis: *subsampling*. Let's return to using our full dataset, so that we can demonstrate the impact of this technique.
```
df_trn, y_trn, nas = proc_df(df_raw, 'SalePrice')
X_train, X_valid = split_vals(df_trn, n_trn)
y_train, y_valid = split_vals(y_trn, n_trn)
```
The basic idea is this: rather than limit the total amount of data that our model can access, let's instead limit it to a *different* random subset per tree. That way, given enough trees, the model can still see *all* the data, but for each individual tree it'll be just as fast as if we had cut down our dataset as before.
```
set_rf_samples(20000)
m = RandomForestRegressor(n_jobs=-1, oob_score=True)
%time m.fit(X_train, y_train)
print_score(m)
```
Since each additional tree allows the model to see more data, this approach can make additional trees more useful.
```
m = RandomForestRegressor(n_estimators=40, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
```
### Tree building parameters
We revert to using a full bootstrap sample in order to show the impact of other over-fitting avoidance methods.
```
reset_rf_samples()
```
Let's get a baseline for this full set to compare to.
```
def dectree_max_depth(tree):
children_left = tree.children_left
children_right = tree.children_right
def walk(node_id):
if (children_left[node_id] != children_right[node_id]):
left_max = 1 + walk(children_left[node_id])
right_max = 1 + walk(children_right[node_id])
return max(left_max, right_max)
else: # leaf
return 1
root_node_id = 0
return walk(root_node_id)
m = RandomForestRegressor(n_estimators=40, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
t=m.estimators_[0].tree_
dectree_max_depth(t)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
t=m.estimators_[0].tree_
dectree_max_depth(t)
```
Another way to reduce over-fitting is to grow our trees less deeply. We do this by specifying (with `min_samples_leaf`) that we require some minimum number of rows in every leaf node. This has two benefits:
- There are less decision rules for each leaf node; simpler models should generalize better
- The predictions are made by averaging more rows in the leaf node, resulting in less volatility
```
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
```
We can also increase the amount of variation amongst the trees by not only use a sample of rows for each tree, but to also using a sample of *columns* for each *split*. We do this by specifying `max_features`, which is the proportion of features to randomly select from at each split.
- None
- 0.5
- 'sqrt'
- 1, 3, 5, 10, 25, 100
```
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
```
We can't compare our results directly with the Kaggle competition, since it used a different validation set (and we can no longer to submit to this competition) - but we can at least see that we're getting similar results to the winners based on the dataset we have.
The sklearn docs [show an example](http://scikit-learn.org/stable/auto_examples/ensemble/plot_ensemble_oob.html) of different `max_features` methods with increasing numbers of trees - as you see, using a subset of features on each split requires using more trees, but results in better models:

| github_jupyter |
<a href="https://colab.research.google.com/github/b-whitman/DS-Unit-2-Linear-Models/blob/master/module3-ridge-regression/Ben_Whitman_213_assignment_regression_classification_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science
*Unit 2, Sprint 1, Module 3*
---
# Ridge Regression
## Assignment
We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.
But not just for condos in Tribeca...
- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.
- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
- [ ] Do one-hot encoding of categorical features.
- [ ] Do feature selection with `SelectKBest`.
- [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html). Use the scaler's `fit_transform` method with the train set. Use the scaler's `transform` method with the test set.
- [ ] Fit a ridge regression model with multiple features.
- [ ] Get mean absolute error for the test set.
- [ ] As always, commit your notebook to your fork of the GitHub repo.
The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
## Stretch Goals
Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.
- [ ] Add your own stretch goal(s) !
- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥
- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).
- [ ] Learn more about feature selection:
- ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance)
- [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)
- [mlxtend](http://rasbt.github.io/mlxtend/) library
- scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)
- [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.
- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.
- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.
- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# slice dataframe
df = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS') & (df['SALE_PRICE'] > 100000) & (df['SALE_PRICE'] < 2000000)]
df.head()
df.dtypes
# Make 'LAND_SQUARE_FEET' numeric
df['LAND_SQUARE_FEET'] = pd.to_numeric(df['LAND_SQUARE_FEET'].str.replace(',' , ''))
# Train/test split
import datetime
df['SALE_DATETIME'] = pd.to_datetime(df['SALE_DATE'])
df.head()
train = df[(df['SALE_DATETIME'].dt.month == 1) | (df['SALE_DATETIME'].dt.month == 2) | (df['SALE_DATETIME'].dt.month == 3)]
train.describe(exclude='number')
test = df[df['SALE_DATETIME'].dt.month == 4]
test.describe(exclude='number')
# One-hot encoding of categorical features
target = 'SALE_PRICE'
exclude = ['BUILDING_CLASS_CATEGORY', 'ADDRESS', 'APARTMENT_NUMBER',
'SALE_DATE', 'SALE_DATETIME', 'EASE-MENT']
features = train.columns.drop([target] + exclude)
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
X_train.head()
y_train.head()
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
print(X_train.shape)
X_train.head()
```
Ultimately, here I chose to exclude the 'SALE_DATE' column from one-hot encoding. I don't know if this was the right choice, but I felt that there were just too many resultant categories.
```
# Feature selection using SelectKBest
from sklearn.feature_selection import f_regression, SelectKBest
selector = SelectKBest(score_func=f_regression, k=15)
X_train_selected = selector.fit_transform(X_train, y_train)
# Q: Why does the fit_transform selector function need X and y but transform needs only X?
X_test_selected = selector.transform(X_test)
X_train_selected.shape
all_names = X_train.columns
selected_mask = selector.get_support()
selected_names = all_names[selected_mask]
unselected_names = all_names[~selected_mask]
print('Features Selected:')
for name in selected_names:
print(name)
print('')
print('Features Not Selected:')
for name in unselected_names:
print(name)
# Feature scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Fit to ridge regression model
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)
features = ['BLOCK', 'GROSS_SQUARE_FEET', 'BUILDING_CLASS_AT_TIME_OF_SALE_A3']
model = Ridge(alpha = 100)
model.fit(X_train_scaled[features], y_train)
y_pred = model.predict(X_test_scaled[features])
# Get MAE for test set
mae = mean_absolute_error(y_test, y_pred)
print(f'Mean Absolute Error: {mae}')
# Code from lecture
%matplotlib inline
from IPython.display import display, HTML
from ipywidgets import interact
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error
# Code from lecture
for alpha in [10**1, 10**2, 10**3, 10**4, 10**5, 10**6]:
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
display(HTML(f'Ridge Regression, with alpha={alpha}'))
model = Ridge(alpha=alpha)
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
mae = mean_absolute_error(y_test, y_pred)
display(HTML(f'Test Mean Absolute Error: ${mae:,.0f}'))
coefficients = pd.Series(model.coef_, X_train.columns)
plt.figure(figsize=(16,8))
coefficients.sort_values().plot.barh(color='grey')
# plt.xlim(-400,700)
plt.show()
```
| github_jupyter |
```
!nvidia-smi -L
import sys
sys.path.append('/content/drive/MyDrive/sign_language/lcrnet-v2-improved-ppi')
#from lcr_net_ppi_improved import LCRNet_PPI_improved
sys.path.append('/content/drive/MyDrive/sign_language/DOPE')
import gzip
import pickle
import sys, os
import argparse
import os.path as osp
from PIL import Image
import cv2
import numpy as np
import time
import torch
import torch.nn as nn
from torchvision.transforms import ToTensor
from model import dope_resnet50, num_joints
import postprocess
import matplotlib.pyplot as plt
```
# Load features files PHOENIX
```
def load_dataset_file(filename):
with gzip.open(filename, "rb") as f:
loaded_object = pickle.load(f)
return loaded_object
def extract(tmp):
samples = {}
for s in tmp:
seq_id = s["name"]
if seq_id in samples:
assert samples[seq_id]["name"] == s["name"]
assert samples[seq_id]["signer"] == s["signer"]
assert samples[seq_id]["gloss"] == s["gloss"]
assert samples[seq_id]["text"] == s["text"]
samples[seq_id]["sign"] = torch.cat(
[samples[seq_id]["sign"], s["sign"]], axis=1
)
else:
samples[seq_id] = {
"name": s["name"],
"signer": s["signer"],
"gloss": s["gloss"],
"text": s["text"],
"sign": s["sign"],
}
return samples
```
## TRAIN
```
tmp = load_dataset_file('/content/drive/MyDrive/sign_language/data/phoenix14t.pami0.train')
samples_train = extract(tmp)
```
## DEV
```
tmp = load_dataset_file('/content/drive/MyDrive/sign_language/data/phoenix14t.pami0.dev')
samples_dev = extract(tmp)
```
## TEST
```
tmp = load_dataset_file('/content/drive/MyDrive/sign_language/data/phoenix14t.pami0.test')
samples_test = extract(tmp)
```
# Pararaph Dataset
```
max_length = 440
```
## Train
```
names_per_prefix = {}
for key in samples_train.keys():
root = key.split('-')[0]
num = int(key.split('-')[1])
try:
names_per_prefix[root].append(num)
except KeyError:
names_per_prefix[root] = [num]
paragraphs_all_train = []
for prefix in names_per_prefix.keys():
paragraphs = []
current = names_per_prefix[prefix][0]
length = samples_train[prefix + '-' + str(current)]['sign'].shape[0]
current_paragraph = [prefix + '-' + str(current)]
for indice, num in enumerate(names_per_prefix[prefix]):
if indice == 0:
continue
length += samples_train[prefix + '-' + str(num)]['sign'].shape[0]
if num - current > 1 or length > max_length:
paragraphs.append(current_paragraph)
length = samples_train[prefix + '-' + str(num)]['sign'].shape[0]
current_paragraph = [prefix + '-' + str(num)]
else:
current_paragraph.append(prefix + '-' + str(num))
current = num
paragraphs.append(current_paragraph)
paragraphs_all_train += paragraphs
len(paragraphs_all_train)
```
## Test
```
names_per_prefix = {}
for key in samples_test.keys():
root = key.split('-')[0]
num = int(key.split('-')[1])
try:
names_per_prefix[root].append(num)
except KeyError:
names_per_prefix[root] = [num]
paragraphs_all_test = []
for prefix in names_per_prefix.keys():
paragraphs = []
current = names_per_prefix[prefix][0]
length = samples_test[prefix + '-' + str(current)]['sign'].shape[0]
current_paragraph = [prefix + '-' + str(current)]
for indice, num in enumerate(names_per_prefix[prefix]):
if indice == 0:
continue
length += samples_test[prefix + '-' + str(num)]['sign'].shape[0]
if num - current > 1 or length > max_length:
paragraphs.append(current_paragraph)
length = samples_test[prefix + '-' + str(num)]['sign'].shape[0]
current_paragraph = [prefix + '-' + str(num)]
else:
current_paragraph.append(prefix + '-' + str(num))
current = num
paragraphs.append(current_paragraph)
paragraphs_all_test += paragraphs
len(paragraphs_all_test)
```
## Dev
```
names_per_prefix = {}
for key in samples_dev.keys():
root = key.split('-')[0]
num = int(key.split('-')[1])
try:
names_per_prefix[root].append(num)
except KeyError:
names_per_prefix[root] = [num]
paragraphs_all_dev = []
for prefix in names_per_prefix.keys():
paragraphs = []
current = names_per_prefix[prefix][0]
length = samples_dev[prefix + '-' + str(current)]['sign'].shape[0]
current_paragraph = [prefix + '-' + str(current)]
for indice, num in enumerate(names_per_prefix[prefix]):
if indice == 0:
continue
length += samples_dev[prefix + '-' + str(num)]['sign'].shape[0]
if num - current > 1 or length > max_length:
paragraphs.append(current_paragraph)
length = samples_dev[prefix + '-' + str(num)]['sign'].shape[0]
current_paragraph = [prefix + '-' + str(num)]
else:
current_paragraph.append(prefix + '-' + str(num))
current = num
paragraphs.append(current_paragraph)
paragraphs_all_dev += paragraphs
len(paragraphs_all_dev)
samples_train['train/01April_2010_Thursday_heute-6694']
paragraphs_all_train[1]
```
# Generate data paragraphs
```
def save(object, filename, protocol = 0):
"""Saves a compressed object to disk
"""
file = gzip.GzipFile(filename, 'wb')
file.write(pickle.dumps(object, protocol))
file.close()
```
## Train
```
paragraphs_train = {}
for names in paragraphs_all_train:
data = samples_train[names[0]]
gloss, text, sign, signer = data['gloss'], data['text'], data['sign'], data['signer']
for i, name in enumerate(names):
if i == 0:
continue
data = samples_train[name]
gloss += " " + data['gloss']
text += " " + data['text']
sign = torch.cat([sign, data['sign']])
paragraphs_train[names[0]] = {'gloss': gloss, 'name': names[0], 'sign': sign,
'signer': signer, 'text': text}
len(paragraphs_train)
save(paragraphs_train, '/content/drive/MyDrive/sign_language/data/phoenix14t.pami0.trainparagraph')
```
## Test
```
paragraphs_test = {}
for names in paragraphs_all_test:
data = samples_test[names[0]]
gloss, text, sign, signer = data['gloss'], data['text'], data['sign'], data['signer']
for i, name in enumerate(names):
if i == 0:
continue
data = samples_test[name]
gloss += " " + data['gloss']
text += " " + data['text']
sign = torch.cat([sign, data['sign']])
paragraphs_test[names[0]] = {'gloss': gloss, 'name': names[0], 'sign': sign,
'signer': signer, 'text': text}
len(paragraphs_all_test)
len(paragraphs_test)
save(paragraphs_test, '/content/drive/MyDrive/sign_language/data/phoenix14t.pami0.testparagraph')
```
## Dev
```
paragraphs_dev = {}
for names in paragraphs_all_dev:
data = samples_dev[names[0]]
gloss, text, sign, signer = data['gloss'], data['text'], data['sign'], data['signer']
for i, name in enumerate(names):
if i == 0:
continue
data = samples_dev[name]
gloss += " " + data['gloss']
text += " " + data['text']
sign = torch.cat([sign, data['sign']])
paragraphs_dev[names[0]] = {'gloss': gloss, 'name': names[0], 'sign': sign,
'signer': signer, 'text': text}
len(paragraphs_dev)
len(paragraphs_all_dev)
save(paragraphs_dev, '/content/drive/MyDrive/sign_language/data/phoenix14t.pami0.devparagraph')
```
| github_jupyter |
# First steps with SYGMA
Prepared by Christian Ritter
A simple stellar population is a population of stars born out of the same gas cloud.
This notebook explains how the basic chemical evolution parameter lead to the ejecta of stellar matter.
We will use (artificial) yields out of pure h1 yields.
You can find the documentation <a href="http://nugrid.github.io/NuPyCEE/SPHINX/build/html/sygma.html">here</a>.
```
%matplotlib nbagg
import sygma as s
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad
import os
import sys
```
## 1. The initial mass function
### a) Basics
The IMF allows to calculate the number of stars $N_{12}$ in the mass interval [m1,m2] with
(I) $N_{12}$ = k_N $\int _{m1}^{m2} m^{-2.35} dm$
Where k_N is the normalization constant. It can be derived from the total amount of mass of the system $M_{tot}$
since the total mass $M_{12}$ in the mass interval above can be estimated with
(II) $M_{12}$ = k_N $\int _{m1}^{m2} m^{-1.35} dm$
With a total mass interval of [1,30] and $M_{tot}=1e11$ the $k_N$ can be derived:
$1e11 = k_N/0.35 * (1^{-0.35} - 30^{-0.35})$
The total number of stars $N_{tot}$ is then:
```
k_N=1e11*0.35/ (1**-0.35 - 30**-0.35) #(I)
N_tot=k_N/1.35 * (1**-1.35 - 30**-1.35) #(II)
print (N_tot)
```
With a yield ejected of $0.1 Msun$, the total amount ejected is:
```
Yield_tot=0.1*N_tot
print (Yield_tot/1e11)
```
compared to the simulation:
```
s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,imf_type='salpeter',imf_bdys=[1,30],iniZ=0.02,hardsetZ=0.02,
table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab_h1.ppn',pop3_table='yield_tables/popIII_h1.txt')
Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
```
Note: In this setup iniZ=0.02 refers to one of the metallicities available in the yield table file agb_and_massive_stars_h1.txt. Table headers in this table file such as 'Table: (M=1.0,Z=0.02)' indicate a metallicity of Z=0.02 and are selected as input. Since the metallicity of our peculiar yields is actually Z=0 (H only!) we use hardsetZ=0.02 to make the code work. We introduce hardsetZ only for this demonstration notebook and it should not be used.
Compare both results:
```
print (Yield_tot_sim)
print (Yield_tot)
print ('ratio should be 1 : ',Yield_tot_sim/Yield_tot)
```
### b) Selection of different initial mass intervals
##### Select imf_bdys=[5,20]
```
k_N_5_20=1e11*0.35/ (5**-0.35 - 20**-0.35)
N_tot=k_N_5_20/1.35 * (5**-1.35 - 20**-1.35)
Yield_tot=0.1*N_tot
s1_5_20=s.sygma(iolevel=0,mgal=1e11,dt=1e9,tend=1.3e10,imf_type='salpeter',
imf_bdys=[5,20],iniZ=0.02,hardsetZ=0.02,table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
Yield_tot_sim=s1_5_20.history.ism_iso_yield[-1][0]
print ('Sould be 1:' ,Yield_tot_sim/Yield_tot)
```
Now you could try to calculate your own total mass ejected for a mass range beween 1Msun and 5Msun.
### c) Distinguishing between massive and AGB sources:
Boundaries between AGB and massive for Z=0 (1e-4) at 8 (transitionmass parameter)
```
Yield_agb= ( k_N/1.35 * (1**-1.35 - 8.**-1.35) ) * 0.1
print (Yield_agb)
Yield_massive= ( k_N/1.35 * (8.**-1.35 - 30**-1.35) ) * 0.1
print ('Should be 1:',Yield_agb/s1.history.ism_iso_yield_agb[-1][0])
print ('Should be 1:',Yield_massive/s1.history.ism_iso_yield_massive[-1][0])
```
### d) Time evolution
```
s1.plot_totmasses(fig=1,source='agb')
s1.plot_totmasses(fig=1,source='massive')
s1.plot_totmasses(fig=1,source='all')
```
For plotting, take the lifetimes/masses from the yield grid:
$
Initial Mass [Msun] & Age [yr]
1 & 5.67e9
1.65 & 1.211e9
2 & 6.972e8
3 & 2.471e8
4 & 1.347e8
5 & 8.123e7
6 & 5.642e7
7 & 4.217e7
12 & 1.892e7
15 & 1.381e7
20 & 9.895e6
25 & 7.902e6
$
```
s1_evol=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,
imf_bdys=[1,30],iniZ=0,hardsetZ=0.0001,table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
k_N_evol=1e11*0.35/ (1**-0.35 - 30**-0.35) #(I)
s1_evol.plot_mass(fig=2,specie='H',label='H, sim',color='k',shape='-',marker='o',markevery=800)
m=[1,1.65,2,3,4,5,6,7,12,15,20,25]
ages=[5.67e9,1.211e9,6.972e8,2.471e8,1.347e8,8.123e7,5.642e7,4.217e7,1.892e7,1.381e7,9.895e6,7.902e6]
def yields(m,k_N_evol):
return ( k_N_evol/1.35 * (m**-1.35 - 30.**-1.35) ) * 0.1
yields1=[]
for m1 in m:
yields1.append(yields(m1,k_N_evol))
plt.plot(ages,yields1,marker='+',linestyle='',markersize=15,label='H, semi')
plt.legend(loc=4)
```
Visible is the agreement of the semi-analytical calculations with the result from SYGMA.
### d) Other IMF types: Chabrier IMF
#### Chabrier:
M<1: $IMF(m) = \frac{0.158}{m} * \exp{ \frac{-(log(m) - log(0.08))^2}{2*0.69^2}}$
else: $IMF(m) = m^{-2.3}$
```
def imf_times_m(mass):
if mass<=1:
return 0.158 * np.exp( -np.log10(mass/0.079)**2 / (2.*0.69**2))
else:
return mass*0.0443*mass**(-2.3)
k_N_ch= 1e11/ (quad(imf_times_m,0.01,30)[0] )
N_tot=k_N_ch/1.3 * 0.0443* (1**-1.3 - 30**-1.3)
Yield_tot=N_tot * 0.1
s1_chabrier=s.sygma(iolevel=0,mgal=1e11,dt=1e9,tend=1.3e10,imf_type='chabrier',imf_bdys=[0.01,30],
hardsetZ=0.0001,table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
Yield_tot_sim=s1_chabrier.history.ism_iso_yield[-1][0]
print (Yield_tot)
print (Yield_tot_sim)
print ('Should be 1 :',Yield_tot/Yield_tot_sim)
s1_chabrier.plot_mass(fig=3,specie='H',label='H',color='k',shape='-',marker='o',markevery=800)
m=[1,1.65,2,3,4,5,6,7,12,15,20,25]
ages=[5.67e9,1.211e9,6.972e8,2.471e8,1.347e8,8.123e7,5.642e7,4.217e7,1.892e7,1.381e7,9.895e6,7.902e6]
def yields(m,k_N_ch):
return ( k_N_ch/1.3 * 0.0443*(m**-1.3 - 30.**-1.3) ) * 0.1
yields1=[]
for m1 in m:
yields1.append(yields(m1,k_N_ch))
plt.plot(ages,yields1,marker='+',linestyle='',markersize=20,label='semi')
plt.legend(loc=4)
```
Simulation should agree with semi-analytical calculations for Chabrier IMF.
## 2. Supernova
The element production due to core-collapse supernova are included in the yields of massive stars.
Supernova Ia have their origin in white dwarfs and their nucleosynthesis products are treated seperately (via a extra yield table loaded with the sn1a_table variable).
$N_{Ia} = A_{Ia} * \int_t^{t+\Delta t} f_{WD}(t^{'})\Psi_{Ia}(t^{'})dt^{'} $
```
s1_snIa=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,sn1a_on=True,sn1a_rate='maoz',
imf_type='salpeter',imf_bdys=[1,30],hardsetZ=0.0001,table='yield_tables/agb_and_massive_stars_h1.txt',
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
s1_snIa.plot_mass(fig=4,specie='H-1',source='sn1a')
s1_snIa.plot_mass(fig=4,specie='H-1',source='agb')
```
Comparison of total (final) SNIa ejecta of H, H-1 with the total amount of H ejected:
```
print (s1_snIa.history.ism_elem_yield_1a[-1],s1_snIa.history.ism_iso_yield_1a[-1],s1_snIa.history.ism_elem_yield[-1])
```
## 3. Basic parameter
### a) Total mass
Change of the total mass via mgal variable:
```
s1_1e7=s.sygma(iolevel=0,mgal=1e7,dt=1e7,tend=1.3e10,hardsetZ=0.0001,
table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
s1_1e9=s.sygma(iolevel=0,mgal=1e9,dt=1e9,tend=1.3e10,hardsetZ=0.0001,
table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
```
Final mass fraction of the total ejecta (H only) should not depend on the total mass:
```
print (sum(s1_1e7.history.ism_elem_yield[-1])/1e7,sum(s1_1e9.history.ism_elem_yield[-1])/1e9)
```
### b) Transition mass
The transition from AGB stars to massive stars, including the super-AGB stage are still not fully understood.
Here we test the difference between the choice of 8Msun and 10Msun.
```
s1_8=s.sygma(iolevel=0,imf_bdys=[1,30],imf_type='salpeter',transitionmass=8,mgal=1e11,dt=1e7,
tend=1.3e10,iniZ=0.02,hardsetZ=0.0001,
table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
s1_10=s.sygma(iolevel=0,imf_bdys=[1,30],imf_type='salpeter',transitionmass=10,mgal=1e11,dt=1e7,
tend=1.3e10,iniZ=0.02,hardsetZ=0.0001,
table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
Yield_agb_sim_8=s1_8.history.ism_iso_yield_agb[-1][0]
Yield_agb_sim_10=s1_10.history.ism_iso_yield_agb[-1][0]
s1_8.plot_totmasses(fig=5,source='agb',label='AGB, M=8')
s1_8.plot_totmasses(fig=5,source='massive',label='Massive, M=8')
s1_10.plot_totmasses(fig=5,source='agb',label='AGB, M=10')
s1_10.plot_totmasses(fig=5,source='massive',label='Massive, M=10')
alphaimf=2.35
k_N_t=1e11*(alphaimf-2.)/ (1.**-(alphaimf-2.) - 30.**-(alphaimf-2.))
N_agb_8=k_N_t/(alphaimf-1.) * (1.**-(alphaimf-1.) - 8.**-(alphaimf-1.))
Yield_agb_8=0.1*N_agb_8
N_agb_10=k_N_t/(alphaimf-1) * (1.**-(alphaimf-1.) - 10.**-(alphaimf-1.))
Yield_agb_10=0.1*N_agb_10
print ('Should be 1:',Yield_agb_sim_8/Yield_agb_8)
print ('Should be 1:',Yield_agb_sim_10/Yield_agb_10)
```
### c) Time resolution
#### First constant timestep size of 1e7; then with special_timesteps 200 log steps.
```
s1_dt_const=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,special_timesteps=-1,imf_type='salpeter',
imf_bdys=[1,30],hardsetZ=0.0001,table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn',
stellar_param_on=False)
s1_dt_log=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,special_timesteps=200,imf_type='salpeter',
imf_bdys=[1,30],hardsetZ=0.0001,table='yield_tables/agb_and_massive_stars_h1.txt',sn1a_on=False,
sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab_h1.ppn')
print ('should be 1 ',s1_dt_const.history.ism_iso_yield[-1][0]/s1_dt_log.history.ism_iso_yield[-1][0])
plt.figure(6)
plt.plot(s1_dt_const.history.age[1:],s1_dt_const.history.timesteps,label='linear (constant) scaled',marker='+')
plt.plot(s1_dt_log.history.age[1:],s1_dt_log.history.timesteps,label='log scaled',marker='+')
plt.yscale('log')
plt.xscale('log')
plt.xlabel('age/years')
plt.ylabel('timesteps/years')
plt.legend(loc=4)
```
Test the total isotopic and elemental ISM matter at first and last timestep.
## 4. Neutron stars
Neutron stars are born from collapsing massive stars. Neutron stars (NS) in binary systems can produce heavy elements due to their highly neutron-rich matter. The the amount of neutron stars is set by different factors while the ejecta per neutron star is set via an input table (variable nsmerger_table).
To calculate the amount of neutron stars $N_{NS}$ in the interval $[t,t+\Delta t]$ one needs
$N_{NS} = A_{NS} \int_t^{t+\Delta t} \Psi_{NS}(t^{'},Z)dt^{'}$
To constrain the formula above the binary fraction of all massive stars, the fraction of merger of massive-star binary systems and the initial mass interval for potential merger need to be known.
```
s1_ns_merger=s.sygma(mgal=1e11,dt=1e7,iniZ=0.02,hardsetZ=0.02,special_timesteps=-1,
ns_merger_on=True, nsmerger_table = 'yield_tables/r_process_rosswog_2014.txt')
s1_default=s.sygma(mgal=1e11,dt=1e7,iniZ=0.02,hardsetZ=0.02,special_timesteps=-1,ns_merger_on=False,bhns_merger_on=False)
```
The number of NS in time bins of 1e7yrs decrease over time.
```
plt.figure(7)
plt.plot(s1_ns_merger.history.age[1:],s1_ns_merger.nsm_numbers,marker='x')
plt.xscale('log');plt.yscale('log');
plt.ylabel('Number of NS merger'); plt.xlabel('Age [yr]')
plt.xlim(1e7,13e9)
```
The origin of gold (Au) is not well understood. The most promising candidate are NS merger. By plotting the total ejecta of our SSP with NS and without NS we see the strong contribution of NS to Au.
```
s1_ns_merger.plot_mass(fig=8,specie='Au',label='Au with NS')
s1_default.plot_mass(fig=8,specie='Au',label='Au without NS',marker='x',color='r',shape='--')
plt.xlim(5e6,13e9)
```
| github_jupyter |
```
import os
import sys
import gym
import eplus_env
import argparse
from numpy import genfromtxt
import numpy as np
import pickle
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append('../')
sys.path.append('../')
from diff_mpc import mpc
from diff_mpc.mpc import QuadCost, LinDx
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DEVICE
seed = 42
lr = 5e-4
T = 12
step = 900
eta_int = 5
batch_size = 256
save_name = 'rl'
torch.manual_seed(seed)
# Modify here: Outputs from EnergyPlus; Match the variables.cfg file.
obs_name = ["Outdoor Temp.", "Outdoor RH", "Wind Speed", "Wind Direction", "Diff. Solar Rad.", "Direct Solar Rad.", "Htg SP", "Clg SP", "Indoor Temp.", "Indoor Temp. Setpoint", "PPD", "Occupancy Flag", "Coil Power", "HVAC Power", "Sys In Temp.", "Sys In Mdot", "OA Temp.", "OA Mdot", "MA Temp.", "MA Mdot", "Sys Out Temp.", "Sys Out Mdot"]
# Modify here: Change based on the specific control problem
state_name = ["Indoor Temp."]
dist_name = ["Outdoor Temp.", "Outdoor RH", "Wind Speed", "Wind Direction", "Diff. Solar Rad.", "Direct Solar Rad.", "Occupancy Flag"]
# Caveat: The RL agent controls the difference between Supply Air Temp. and Mixed Air Temp., i.e. the amount of heating from the heating coil. But, the E+ expects Supply Air Temp. Setpoint.
ctrl_name = ["Delta T"]
target_name = ["Indoor Temp. Setpoint"]
n_state = len(state_name)
n_ctrl = len(ctrl_name)
n_dist = len(dist_name)
eta = [0.1, eta_int] # eta: Weight for comfort during unoccupied and occupied mode
step = step # step: Timestep; Unit in seconds
T = T # T: Number of timesteps in the planning horizon
tol_eps = 90 # tol_eps: Total number of episodes; Each episode is a natural day
# Read Historical Data
dataset = pd.read_pickle("simulation_results/Sim-TMY3.pkl")
target = dataset[target_name]
disturbance = dataset[dist_name]
# Min-Max Normalization
disturbance = (disturbance-disturbance.min())/(disturbance.max()-disturbance.min())
dataset["Delta T"] = dataset["Sys Out Temp."]-dataset["MA Temp."]
# Train-Test Split
n_samples = len(dataset)
n_train = int(0.7*n_samples)
n_test = n_samples - n_train
train_set = dataset[:n_train]
test_set = dataset[n_train:]
class Learner():
def __init__(self, n_state, n_ctrl, n_dist, disturbance, target, u_upper, u_lower):
self.n_state = n_state
self.n_ctrl = n_ctrl
self.n_dist = n_dist
self.disturbance = disturbance
self.target = target
# My Initial Guess
self.F_hat = torch.ones((self.n_state, self.n_state+self.n_ctrl))
self.F_hat[0, 0] = 0.9
self.F_hat[0, 1] = 0.3
self.F_hat = self.F_hat.double().requires_grad_()
self.Bd_hat = np.random.rand(self.n_state, self.n_dist)
self.Bd_hat = torch.tensor(self.Bd_hat).requires_grad_()
self.optimizer = optim.Adam([self.F_hat, self.Bd_hat], lr=lr)
self.u_lower = u_lower * torch.ones(T, 1, n_ctrl).double()
self.u_upper = u_upper * torch.ones(T, 1, n_ctrl).double()
def Cost_function(self, cur_time):
diag = torch.zeros(T, self.n_state + self.n_ctrl)
occupied = self.disturbance["Occupancy Flag"][cur_time:cur_time + pd.Timedelta(seconds = (T-1) * step)]
occupied = np.array(occupied)
if len(occupied)<T:
occupied = np.pad(occupied, ((0, T-len(occupied)), ), 'edge')
eta_w_flag = torch.tensor([eta[int(flag)] for flag in occupied]).unsqueeze(1).double() # Tx1
diag[:, :n_state] = eta_w_flag
diag[:, n_state:] = 0.001
C = []
for i in range(T):
C.append(torch.diag(diag[i]))
C = torch.stack(C).unsqueeze(1) # T x 1 x (m+n) x (m+n)
x_target = self.target[cur_time : cur_time + pd.Timedelta(seconds = (T-1) * step)] # in pd.Series
x_target = np.array(x_target)
if len(x_target)<T:
x_target = np.pad(x_target, ((0, T-len(x_target)), (0, 0)), 'edge')
x_target = torch.tensor(x_target)
c = torch.zeros(T, self.n_state+self.n_ctrl) # T x (m+n)
c[:, :n_state] = -eta_w_flag*x_target
c[:, n_state:] = 1 # L1-norm now! Check
c = c.unsqueeze(1) # T x 1 x (m+n)
return C, c
def forward(self, x_init, C, c, cur_time):
dt = np.array(self.disturbance[cur_time : cur_time + pd.Timedelta(seconds = (T-2) * step)]) # T-1 x n_dist
if len(dt)<T-1:
dt = np.pad(dt, ((0, T-1-len(dt)), (0, 0)), 'edge')
dt = torch.tensor(dt).transpose(0, 1) # n_dist x T-1
ft = torch.mm(self.Bd_hat, dt).transpose(0, 1) # T-1 x n_state
ft = ft.unsqueeze(1) # T-1 x 1 x n_state
x_pred, u_pred, _ = mpc.MPC(n_state=self.n_state,
n_ctrl=self.n_ctrl,
T=T,
u_lower = self.u_lower,
u_upper = self.u_upper,
lqr_iter=20,
verbose=0,
exit_unconverged=False,
)(x_init, QuadCost(C.double(), c.double()),
LinDx(self.F_hat.repeat(T-1, 1, 1, 1), ft))
return x_pred[1, 0, :], u_pred[0, 0, :] # Dim.
def predict(self, x_init, action, cur_time):
dt = np.array(self.disturbance.loc[cur_time]) # n_dist
dt = torch.tensor(dt).unsqueeze(1) # n_dist x 1
ft = torch.mm(self.Bd_hat, dt) # n_state x 1
tau = torch.stack([x_init, action]) # (n_state + n_ctrl) x 1
next_state = torch.mm(self.F_hat, tau) + ft # n_state x 1
return next_state
def update_parameters(self, x_true, u_true, x_pred, u_pred):
# Every thing in T x Dim.
state_loss = torch.mean((x_true.double() - x_pred)**2)
action_loss = torch.mean((u_true.double() - u_pred)**2)
# Note: eta balances the importance between predicting states and predicting actions
traj_loss = eta_int*state_loss + action_loss
print("From state {}, From action {}".format(state_loss, action_loss))
self.optimizer.zero_grad()
traj_loss.backward()
self.optimizer.step()
# print(self.F_hat)
# print(self.Bd_hat)
return state_loss.detach(), action_loss.detach()
def evaluate_performance(x_true, u_true, x_pred, u_pred):
state_loss = torch.mean((x_true.double() - x_pred)**2)
action_loss = torch.mean((u_true.double() - u_pred)**2)
return state_loss, action_loss
def main():
dir = 'results_offline_raw'
if not os.path.exists(dir):
os.mkdir(dir)
if not os.path.exists('results_offline_raw/weights'):
os.mkdir('results_offline_raw/weights')
perf = []
n_step = 96 # n_step: Number of Steps per Day
numOfEpoches = 20
timeStamp = []
record_name =["Learner nState", "Expert nState", "Learner action", "Expert action"]
losses = []
losses_name = ["train_state_loss", "train_action_loss", "val_state_loss", "val_action_loss"]
# Initialize the learner
u_upper = 5
u_lower = 0
learner = Learner(n_state, n_ctrl, n_dist, disturbance, target, u_upper, u_lower)
for epoch in range(numOfEpoches):
x_true = []
u_true = []
x_pred = []
u_pred = []
train_state_loss = []
train_action_loss = []
for i in range(n_train): # By number of entries in the historical data
idx = np.random.randint(n_train)
cur_time = train_set.index[idx]
expert_moves = train_set[cur_time:cur_time+pd.Timedelta(seconds = step)]
if len(expert_moves)<2:
print(cur_time)
continue
expert_state = torch.tensor(expert_moves[state_name].values).reshape(-1, n_state) # 2 x n_state
expert_action = torch.tensor(expert_moves[ctrl_name].values).reshape(-1, n_ctrl) # 2 x n_ctrl
x_true.append(expert_state[-1])
u_true.append(expert_action[0])
obs = train_set.loc[cur_time]
x_init = torch.tensor(np.array([obs[name] for name in state_name])).unsqueeze(0) # n_batch x n_state, i.e. 1 x n_state
C, c = learner.Cost_function(cur_time)
learner_state, learner_action = learner.forward(x_init, C, c, cur_time)
# Predict next state based on expert's action
next_state = learner.predict(x_init.squeeze(0), expert_action[0], cur_time)
x_pred.append(next_state)
u_pred.append(learner_action)
if (i % batch_size == 0) & (i>0):
x_true = torch.stack(x_true).reshape(-1, n_state)
u_true = torch.stack(u_true).reshape(-1, n_ctrl)
x_pred = torch.stack(x_pred).reshape(-1, n_state)
u_pred = torch.stack(u_pred).reshape(-1, n_ctrl)
b_state_loss, b_action_loss = learner.update_parameters(x_true, u_true, x_pred, u_pred)
train_state_loss.append(b_state_loss)
train_action_loss.append(b_action_loss)
x_true = []
u_true = []
x_pred = []
u_pred = []
# Evaluate performance at the end of each epoch
x_true = []
u_true = []
x_pred = []
u_pred = []
timeStamp = []
for idx in range(n_test):
cur_time = test_set.index[idx]
expert_moves = test_set[cur_time:cur_time+pd.Timedelta(seconds = step)]
if len(expert_moves)<2:
print(cur_time)
continue
expert_state = torch.tensor(expert_moves[state_name].values).reshape(-1, n_state) # 2 x n_state
expert_action = torch.tensor(expert_moves[ctrl_name].values).reshape(-1, n_ctrl) # 2 x n_ctrl
x_true.append(expert_state[-1])
u_true.append(expert_action[0])
timeStamp.append(cur_time+pd.Timedelta(seconds = step))
obs = test_set.loc[cur_time]
x_init = torch.tensor(np.array([obs[name] for name in state_name])).unsqueeze(0) # 1 x n_state
C, c = learner.Cost_function(cur_time)
learner_state, learner_action = learner.forward(x_init, C, c, cur_time)
next_state = learner.predict(x_init.squeeze(0), expert_action[0], cur_time)
x_pred.append(next_state.detach())
u_pred.append(learner_action.detach())
x_true = torch.stack(x_true).reshape(-1, n_state)
u_true = torch.stack(u_true).reshape(-1, n_ctrl)
x_pred = torch.stack(x_pred).reshape(-1, n_state)
u_pred = torch.stack(u_pred).reshape(-1, n_ctrl)
val_state_loss, val_action_loss = evaluate_performance(x_true, u_true, x_pred, u_pred)
print("At Epoch {0}, the loss from the state is {1} and from the action is {2}".format(epoch, val_state_loss, val_action_loss))
losses.append((np.mean(train_state_loss), np.mean(train_action_loss), val_state_loss, val_action_loss))
record = pd.DataFrame(torch.cat((x_pred, x_true, u_pred, u_true), dim = 1).numpy(), index = np.array(timeStamp), columns = record_name)
record_df = pd.DataFrame(np.array(record), index = np.array(timeStamp), columns = record_name)
record_df.to_pickle("results_offline_raw/Imit_{}_{}.pkl".format(save_name, epoch))
# Save weights
F_hat = learner.F_hat.detach().numpy()
Bd_hat = learner.Bd_hat.detach().numpy()
np.save("results_offline_raw/weights/F-{}.npy".format(epoch), F_hat)
np.save("results_offline_raw/weights/Bd-{}.npy".format(epoch), Bd_hat)
# Save losses at each epoch
losses_df = pd.DataFrame(np.array(losses), index = np.arange(numOfEpoches), columns = losses_name)
losses_df.to_pickle("results_offline_raw/Imit_loss_"+save_name+".pkl")
main()
```
# Plots
```
import matplotlib.pyplot as plt
def plotPerf(rl, baseline, start_time, end_time):
fig = plt.figure(figsize=(20,6))
plt.subplot(2,1,1)
plt.plot(baseline["Indoor Temp."], 'b-', label="EnergyPlus")
plt.plot(rl["Indoor Temp."],'r-', label="Gnu-RL")
plt.plot(rl["Indoor Temp. Setpoint"], 'k--')
plt.legend()
plt.ylabel("State\nIndoor Temp.", fontsize = 16)
plt.xlim([start_time, end_time])
plt.subplot(2,1,2)
plt.plot(baseline["Sys Out Temp."],'b', label ="EnergyPlus")
plt.plot(rl["Sys Out Temp."], 'r', label ="Gnu-RL")
plt.plot(baseline["Occupancy Flag"]*30, 'k--', label= "Occupancy Flag")
plt.xlim([start_time, end_time])
plt.ylabel("Action\nSupply Air Temp.", fontsize = 16)
plt.legend()
results_dir = 'results_offline_raw'
filePath = "{}/".format(results_dir)
lam = 5
imit_loss = pd.read_pickle(filePath+"Imit_loss_rl.pkl")
fig = plt.figure(figsize=(16,4))
plt.subplot(1,3,1)
plt.title("$\mathcal{L}_{state}$")
plt.plot(imit_loss["train_state_loss"], label="train")
plt.plot(imit_loss["val_state_loss"], label = "val")
plt.legend()
plt.xlim((0, 19))
plt.xlabel("Number of Epoches")
plt.subplot(1,3,2)
plt.title("$\mathcal{L}_{action}$")
plt.plot(imit_loss["train_action_loss"], label="train")
plt.plot(imit_loss["val_action_loss"], label = "val")
plt.legend()
plt.xlim((0, 19))
plt.xlabel("Number of Epoches")
plt.subplot(1,3,3)
plt.title("$\mathcal{L}_{Imit}$")
plt.plot(lam* imit_loss["train_state_loss"]+ imit_loss["train_action_loss"], label="train")
plt.plot(lam* imit_loss["val_state_loss"]+imit_loss["val_action_loss"], label = "val")
plt.legend()
plt.xlim((0, 19))
plt.xlabel("Number of Epoches")
# Selected Epoch
epoch = 16
imit_record = pd.read_pickle(filePath+"Imit_rl_{}.pkl".format(epoch))
imit_record["Hour"] = pd.Series([time.hour for time in imit_record.index], index = imit_record.index)
imit_record["Occupancy Flag"] = (imit_record["Hour"]>=8) & (imit_record["Hour"]< 18)
imit_record["Indoor Temp. Setpoint"] = pd.Series([22.5 if occupied==1 else 12.8 for occupied in imit_record["Occupancy Flag"]], index = imit_record.index)
start_time = pd.datetime(year = 1991, month = 3, day = 6)
end_time = start_time + pd.Timedelta(days = 7)
fig = plt.figure(figsize=(20,5))
plt.subplot(2,1,1)
plt.plot(imit_record["Expert nState"], label = "Expert")
plt.plot(imit_record["Learner nState"], label = "Learner")
plt.plot(imit_record["Indoor Temp. Setpoint"], "k--", label = "Heating Setpoint")
plt.ylabel("Next State", fontsize = 16)
plt.xlim((start_time, end_time))
plt.legend()
plt.subplot(2,1,2)
plt.plot(imit_record["Expert action"], label = "Expert")
plt.plot(imit_record["Learner action"], label = "Learner")
plt.plot(imit_record["Occupancy Flag"]*5, 'k--', label = "Occupancy Flag")
plt.ylabel("Action", fontsize = 16)
plt.xlim((start_time, end_time))
plt.legend()
```
| github_jupyter |
# EECS C106A HW 0: Python Intro
### EECS C106A: Introduction to Robotics, Fall 2019
# Introduction
We will be using the Python programming language for labs in EECS C106a. Some hw assignments will entail matrix calculations where Python will come in handy, but you are welcome to use something like Matlab instead. This hw is meant to be a mini bootcamp on Python for students who have had experience programming in another language already (e.g. Matlab) or need a quick refresher on Python. We will be using a few popular libraries (numpy, scipy) that are very useful. If you have experience with Matlab but not Python, we recommend checking out the [numpy for Matlab users guide](https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html).
# Table of Contents
* [IPython Basics](#IPython-Basics)
* [Python Data Types](#Data-Types)
* [Python Functions](#Functions)
# How to Submit the Notebook
Every place in the notebook that you are required to answer a question will be marked with "TODO". When you are completed with the notebook, `cltr + p (or cmd + p)` and save as a pdf. Submit the pdf file to Gradescope. Make sure to periodically save your notebook process by clicking File/Save and Checkpoint.
# IPython Basics
For those who have never used IPython, it is a command shell for interactive Python programming. You can imagine it as allowing you to run blocks of Python code that you would execute in a single python script (python [script_name].py) in the terminal. Benefits of using IPython include easier visualization and debugging. The purpose of this bootcamp in IPython is to give you an idea of basic Python syntax and semantics. For labs you are still expected to write and execute actual Python scripts to work with ROS.
### Executing Cells
ipython notebooks are constituted of cells, that each have text, code, or html scripts. To run a cell, click on the cell and press Shift+Enter or the run button on the toolbar. Run the cell below, you should expect an output of 6:
```
1+2+3
```
### Stopping or Restarting Kernel
To debug, or terminate a process, you can interupt the kernel by clicking Kernel/interrupt on the toolbar. If interuppting doesn't work, or you would like to restart all the processes in the notebook, click Kernel/restart. Try interrupting the following block:
```
import time
while True:
print("bug")
time.sleep(1.5)
```
### Import a library
To import a certain library `x`, just type `import x`
Calling function `y` from that library is simply `x.y`
To give the library a different name (e.g. abbreviation), type `import x as z`
```
import numpy as np
np.add(3, 4)
```
# Python
## Data Types
### Integers and Floats
In Python2, integer division returns the floor. In Python3, there is no floor unless you specify using double slahes. The differences between Python2 and Python3 you can [check out](https://wiki.python.org/moin/Python2orPython3), but we will be using Python2 in this class.
```
5 / 4
5.0 / 4
```
### Booleans
Python implements all usual operators for Boolean logic. English, though, is used rather than symbols (no &, ||, etc.). Pay attention to the following syntax, try to guess what the output for each print statement should be before running the cell.
```
print(0 == False)
t = True
print(1 == t)
print(0 != t)
print(t is not 1)
if t is True:
print(0 != 0)
```
### Strings
Strings are supported very well. To concatenate strings we can do the following:
```
hello = 'hello'
robot = 'robot'
print(hello + ' ' + robot + str(1))
```
To find the length of a string use `len(...)`
```
print(len(hello + robot))
```
### Lists
A list is a mutable array of data, meaning we can alter it after insantiating it. To create a list, use the square brackets [] and fill it with elements.
Key operations:
- `'+'` appends lists
- `len(y)` to get length of list y
- `y[0]` to index into 1st element of y **Python indices start from 0
- `y[1:6]` to slice elements 1 through 5 of y
```
y = ["Robots are c"] + [0, 0, 1]
y
len(y)
y[0]
# TODO: slice the first three elements of list 'y' and
# store in a new list, then print the 2nd element of this
# new list
```
### Loops
You can loop over the elements of a list like this:
```
robots = ['baxter', 'sawyer', 'turtlebot']
for robot in robots:
print(robot)
# Prints "baxter", "sawyer", "turtlebot", each on its own line.
```
If you want access to the index of each element within the body of a loop, use the built-in [`enumerate`](https://docs.python.org/2.7/library/functions.html#enumerate) function:
```
robots = ['baxter', 'sawyer', 'turtlebot']
# TODO: Using a for loop and the python built-in enumerate function,
# Print "#1: baxter", "#2: sawyer", "#3: turtlebot",
# each on its own line
```
### Numpy Array
The numpy array is like a list with multidimensional support and more functions (which can all be found [here](https://docs.scipy.org/doc/numpy/reference/index.html)).
NumPy arrays can be manipulated with all sorts of arithmetic operations. You can think of them as more powerful lists. Many of the functions that already work with lists extend to numpy arrays.
To use functions in NumPy, we have to import NumPy to our workspace. by declaring `import numpy`, which we have done previously above in this notebook already. We typically rename `numpy` as `np` for ease of use.
### Making a Numpy Array
```
x = np.array([[1, 2, 3], [4 , 5, 6], [7, 8, 9]])
print(x)
# x is a 3x3 matrix here
```
### Finding the shape of a Numpy Array
```
x.shape # returns the dimensions of the numpy array
```
### Elementwise operations
Arithmetic operations on numpy arrays correspond to elementwise operations.
```
print(x)
print
print(x * 5) # numpy carries operation on all elements!
```
### Matrix multiplication
```
print(np.dot(x, x))
```
### Slicing numpy arrays
Numpy uses pass-by-reference semantics so it creates views into the existing array, without implicit copying. This is particularly helpful with very large arrays because copying can be slow. Although be wary that you may be mutating an array when you don't intend to, so make sure to make a copy in these situations.
```
orig = np.array([0, 1, 2, 3, 4, 5])
print(orig)
```
Slicing an array is just like slicing a list
```
sliced = orig[1:4]
print(sliced)
```
Note, since slicing does not copy the array, mutating `sliced` mutates `orig`. Notice how the 4th element in `orig` changes to 9 as well.
```
sliced[2] = 9
print(orig)
print(sliced)
```
We should use `np.copy()` to actually copy `orig` if we don't want to mutate it.
```
orig = np.array([0, 1, 2, 3, 4, 5])
copy = np.copy(orig)
sliced_copy = copy[1:4]
sliced_copy[2] = 9
print(orig)
print(sliced_copy)
A = np.array([[5, 6, 8], [2, 4, 5], [3, 1, 10]])
B = np.array([[3, 5, 0], [3, 1, 1]])
# TODO: multiply matrix A with matrix B padded with 1's to the
# same dimensions as A; sum this result with the identiy matrix
# (you may find np.concatenate, np.vstack, np.hstack, or np.eye useful).
# Make sure you don't alter the original contents of B. Print the result
```
### Handy Numpy function: arange
We use `arange` to instantiate integer sequences in numpy arrays. It's similar to the built-in range function in Python for lists. However, it returns the result as a numpy array, rather a simple list.
`arange(0,N)` instantiates an array listing every integer from 0 to N-1.
`arange(0,N,i)` instantiates an array listing every `i` th integer from 0 to N-1 .
```
print(np.arange(-3,4)) # every integer from -3 ... 3
# TODO: print every other integer from 0 ... 6 multiplied by 2
# as a list
```
## Functions
Python functions are defined using the `def` keyword. For example:
```
def hello_robot(robot_name, yell=True):
if yell:
print('HELLO, %s!' % robot_name.upper())
else:
print('hello, %s' % robot_name)
hello_robot('Baxter') # Prints "HELLO, BAXTER!"
hello_robot('Sawyer', yell=False) # Prints "hello, Sawyer"
```
## Rodrigues' Formula
The Rodrigues' Formula is a useful formula that allows us to calculate the corresponding rotation matrix R when given an axis $\omega$ an angle $\theta$ of rotation:
$$R = I_{3} + \frac{\hat{\omega}}{\left\|\omega\right\|}\sin{(\left\|\omega\right\|\theta)} + \frac{\hat{\omega}^{2}}{\left\|\omega\right\|^{2}}(1 - \cos{(\left\|\omega\right\|\theta}))$$
where
$$\hat{\omega} = \hat{
\begin{bmatrix} \omega_{1} \\
\omega_{2} \\
\omega_{3}
\end{bmatrix}}
= \begin{bmatrix}
0 & -\omega_{3} & \omega_{2} \\
\omega_{3} & 0 & -\omega{1} \\
-\omega_{2} & \omega{1} & 0
\end{bmatrix}$$
$\hat{\omega}$ is known as the skey-symmetric matrix form of $\omega$. For now, you don't have to worry about the exact details and derivation of this formula since it will be discussed in class and the given formula alone should be enough to complete this problem. A sanity check for your rodrigues implementation is provided for your benefit.
```
# TODO: define a function that converts a rotation vector in 3D
# of shape (3,) to its coressponding skew-symmetric representation
# of shape (3, 3). This funciton will prove useful in the next question.
def skew_3d(omega):
"""
Converts a rotation vector in 3D to its corresponding skew-symmetric matrix.
Args:
omega - (3,) ndarray: the rotation vector
Returns:
omega_hat - (3,3) ndarray: the corresponding skew symmetric matrix
"""
if not omega.shape == (3,):
raise TypeError('omega must be a 3-dim column vector')
# YOUR CODE HERE
# TODO: define a function that when given an axis of rotation omega
# and angle of rotation theta, uses the Rodrigues' Formula to compute
# and return the corresponding 3D rotation matrix R.
# The Function has already been partially defined out for you below.
def rodrigues(omega, theta):
"""
Computes a 3D rotation matrix given a rotation axis and angle of rotation.
Args:
omega - (3,) ndarray: the axis of rotation
theta: the angle of rotation
Returns:
R - (3,3) ndarray: the resulting rotation matrix
"""
if not omega.shape == (3,):
raise TypeError('omega must be a 3-dim column vector')
# YOUR CODE HERE
arg1 = np.array([2.0, 1, 3])
arg2 = 0.587
ret_desired = np.array([[-0.1325, -0.4234, 0.8962],
[ 0.8765, -0.4723, -0.0935],
[ 0.4629, 0.7731, 0.4337]])
print("sanity check for rodrigues:")
if np.allclose(rodrigues(arg1, arg2), ret_desired, rtol=1e-2):
print("passed")
```
# References
- [1] EE 120 lab1
- [2] EECS 126 Lab01
- [3] EE 16a Python Bootcamp
- [4] CS 231n Python Numpy Tutorial. [Link](http://cs231n.github.io/python-numpy-tutorial/)
| github_jupyter |
# Read and write CSV files with pandas DataFrames
You can load data from a CSV file directly into a pandas DataFrame
```
import pandas as pd
```
## Reading a CSV file into a pandas DataFrame
**read_csv** allows you to read the contents of a csv file into a DataFrame
airports.csv contains the following:
Name,City,Country
Seattle-Tacoma,Seattle,USA
Dulles,Washington,USA
Heathrow,London,United Kingdom
Schiphol,Amsterdam,Netherlands
Changi,Singapore,Singapore
Pearson,Toronto,Canada
Narita,Tokyo,Japan
```
airports_df = pd.read_csv('airports.csv')
airports_df
```
## Handling rows with errors
By default rows with an extra , or other issues cause an error
Note the extra , in the row for Heathrow London in airportsInvalidRows.csv:
Name,City,Country
Seattle-Tacoma,Seattle,USA
Dulles,Washington,USA
Heathrow,London,,United Kingdom
Schiphol,Amsterdam,Netherlands
Changi,Singapore,Singapore
Pearson,Toronto,Canada
Narita,Tokyo,Japan
```
airports_df = pd.read_csv('airportsInvalidRows.csv', error_bad_lines=False)
airports_df
```
Specify **error_bad_lines=False** to skip any rows with errors
```
airports_df = pd.read_csv(
'Data/airportsInvalidRows.csv',
error_bad_lines=False
)
airports_df
```
## Handling files which do not contain column headers
If your file does not have the column headers in the first row by default, the first row of data is treated as headers
airportsNoHeaderRows.csv contains airport data but does not have a row specifying the column headers:
Seattle-Tacoma,Seattle,USA
Dulles,Washington,USA
Heathrow,London,United Kingdom
Schiphol,Amsterdam,Netherlands
Changi,Singapore,Singapore
Pearson,Toronto,Canada
Narita,Tokyo,Japan
```
airports_df = pd.read_csv('airportsNoHeaderRows.csv',
header = None,
names=['Name', 'City', 'Country'])
airports_df
```
Specify **header=None** if you do not have a Header row to avoid having the first row of data treated as a header row
```
airports_df = pd.read_csv(
'airportsNoHeaderRows.csv',
header=None
)
airports_df
```
If you do not have a header row you can use the **names** parameter to specify column names when data is loaded
```
airports_df = pd.read_csv(
'airportsNoHeaderRows.csv',
header=None,
names=("Name", "City", "Country")
)
airports_df
```
## Missing values in Data files
Missing values appear in DataFrames as **NaN**
There is no city listed for Schiphol airport in airportsBlankValues.csv :
Name,City,Country
Seattle-Tacoma,Seattle,USA
Dulles,Washington,USA
Heathrow,London,United Kingdom
Schiphol,,Netherlands
Changi,Singapore,Singapore
Pearson,Toronto,Canada
Narita,Tokyo,Japan
```
airports_df = pd.read_csv('airportsBlankValues.csv')
airports_df
```
## Writing DataFrame contents to a CSV file
**to_csv** will write the contents of a pandas DataFrame to a CSV file
```
airports_df
airports_df.to_csv('Data/MyNewCSVFile.csv')
```
The index column is written to the csv file
Specify **index=False** if you do not want the index column to be included in the csv file
```
airports_df.to_csv(
'Data/MyNewCSVFileNoIndex.csv',
# index=False
)
no_index_file = pd.read_csv("Data/MyNewCSVFileNoIndex.csv")
no_index_file
no_index_file.iloc[2]
no_index_file.iloc[0:2]
no_index_file.iloc[1:2, 1:]
```
| github_jupyter |
## W3 - UNC Example:
Author: Chris Kennedy
```
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, auc
from sklearn import tree
from sklearn.tree import export_graphviz
from graphviz import Source
```
### Data Preparation and Loading
```
df = pd.read_excel(r'W3 - UNC Choice Data.xlsx')
y = df['UNC?']
X = df.drop(columns=['Applicant','Choice','UNC?'])
X.head()
```
## Logistic Regression
### Model Build
```
clr = LogisticRegression(solver="lbfgs", penalty='none', random_state=42)
clr.fit(X, y)
```
Output coefficients
```
print("[Intercept] ", X.columns)
print(clr.intercept_, clr.coef_)
```
Prediction and scoring
```
yp = clr.predict(X)
y_score = clr.decision_function(X)
print(y_score)
```
### Performance Metrics
```
tn, fp, fn, tp = confusion_matrix(y, yp).ravel()
print("Confusion Matrix:")
print("%32s" % "Predicted")
print("%17s" % " ", "%8s" % "UNC", "%8s" % "Duke")
print("%8s" % "Actual", "%8s" % "UNC", "%8i" % tp, "%8i" % fn)
print("%8s" % " ", "%8s" % "Duke", "%8i" % fp, "%8i" % tn)
print("")
print("Accuracy: %6.1f%%" % ((tp+tn)/(tp+tn+fp+fn)*100))
print("Sensitivity: %6.1f%%" % (tp/(tp+fn)*100))
print("Specificity: %6.1f%%" % (tn/(tn+fp)*100))
fpr, tpr, thresholds = roc_curve(y, y_score)
roc_auc = auc(fpr, tpr)
```
### Plots
```
import matplotlib.pyplot as plt
plt.figure()
plt.plot([0, 1],[1,1], color='black', linestyle='--', lw=1)
plt.plot(fpr,tpr, color='darkorange', label='ROC curve (area = %0.2f)' % roc_auc, lw=2)
plt.plot([0, 1],[0,1], color='navy', linestyle='--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC)')
plt.legend(loc="lower right")
plt.show()
```
## Decision Trees
```
dt = tree.DecisionTreeClassifier(random_state = 42, criterion="gini", splitter="best", max_depth=2)
dt = dt.fit(X, y)
```
#### Describe the tree
```
export_graphviz(dt, out_file="outfile.dot", feature_names=X.columns)
Source.from_file("outfile.dot")
```
For each box [a, b] corresponds to counts for [false, true] or [0, 1]
#### Prediction and Scoring
```
ypt = dt.predict(X)
ypt_raw = dt.predict_proba(X)
```
#### Performance Metrics
```
tnt, fpt, fnt, tpt = confusion_matrix(y, ypt).ravel()
print("Confusion Matrix:")
print("%32s" % "Predicted")
print("%17s" % " ", "%8s" % "UNC", "%8s" % "Duke")
print("%8s" % "Actual", "%8s" % "UNC", "%8i" % tpt, "%8i" % fnt)
print("%8s" % " ", "%8s" % "Duke", "%8i" % fpt, "%8i" % tnt)
print("")
print("Accuracy: %6.1f%%" % ((tpt+tnt)/(tpt+tnt+fpt+fnt)*100))
print("Sensitivity: %6.1f%%" % (tpt/(tpt+fnt)*100))
print("Specificity: %6.1f%%" % (tnt/(tnt+fpt)*100))
fprt, tprt, thresholdst = roc_curve(y, ypt_raw[:,1])
roc_auct = auc(fprt, tprt)
```
#### Plots
```
plt.figure()
plt.plot([0, 1],[1,1], color='black', linestyle='--', lw=1)
plt.plot(fprt,tprt, color='darkorange', label='ROC curve (area = %0.2f)' % roc_auct, lw=2)
plt.plot([0, 1],[0,1], color='navy', linestyle='--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC)')
plt.legend(loc="lower right")
plt.show()
```
### End of Notebook!
| github_jupyter |
# 1 - Введение в Pandas
**Pandas** это очень мощная библиотека с множеством полезных функций, ею можно пользаться много лет так и не использовав весь ее потенциал. Цель воркшопа ознакомить вас основами, это:
- Чтение и запись данных.
- Пониманимание разных типов данных в Pandas.
- Работа с текстовыми данными и timeseries.
- Выбор данных.
- Группировка.
## Dataset
Мы будем использовать датасет [Amazon Product](http://jmcauley.ucsd.edu/data/amazon/) с отзывами о продуктах на Амазоне, его собрал [Julian McAuley](http://cseweb.ucsd.edu/~jmcauley/). <br/>
Датасет выглядит таким образом:
reviewerID - ID of the reviewer, e.g. A2SUAM1J3GNN3B
asin - ID of the product, e.g. 0000013714
reviewerName - name of the reviewer
helpful - helpfulness rating of the review, e.g. 2/3
reviewText - text of the review
overall - rating of the product
summary - summary of the review
unixReviewTime - time of the review (unix time)
reviewTime - time of the review (raw)
## Импорт pandas
```
import pandas as pd
print("Pandas version: {}".format(pd.__version__))
# опции отображения
pd.options.display.max_rows = 6
pd.options.display.max_columns = 6
pd.options.display.width = 100
```
## Чтение и запись данных
```
import gzip
# датасет на 47 мегабайт, мы возьмем только 10
review_lines = gzip.open('data/reviews/reviews_Clothing_Shoes_and_Jewelry_5.json.gz', 'rt').readlines(10*1024*1024)
len(review_lines)
```
Теперь мы получили `list` с текстовыми строками, нам нужно преобразовать их в `dict` и передать в `DataFrame`. <br/>
Здесь `json.loads` - преобразует текстовые строки в `dict`.
```
import json
df = pd.DataFrame(list(map(json.loads, review_lines)))
```
Теперь мы можем взглянуть, что собой представляют наши данные. DataFrame позволяет их вывести в такой наглядной таблице.
```
df
```
Данные вначале нашего `df`
```
df.head()
```
Данные вконце df
```
df.tail()
df.describe()
```
### Упражнение: Сохраните и загрузите датасет в разные форматы (CSV, JSON...)
```
# ваш код здесь, используйте tab для того, чтобы увидеть список доступных для вызова функций
```
http://pandas.pydata.org/pandas-docs/stable/io.html
Pandas I/O API это набор высокоуровневых функций, которые можно вызвать как pd.read_csv().
to_csv
to_excel
to_hdf
to_sql
to_json
...
read_csv
read_excel
read_hdf
read_sql
read_json
...
## Типы данных Pandas и их преобразование
**`df.info`** позволяет нам получить сводную информацию про **`df`**: сколько в нем строк, названия и типы столбцов, сколько он занимает памяти...
<br/>
Мы видим, что столбец **`unixReviewTime`** (время, когда ревью было оставленно) имеет тип **`int64`**, давайте преобразуем его в **`datetime64`** для более удобной работы с временными данными.
```
df.info()
df['unixReviewTime'] = pd.to_datetime(df['unixReviewTime'], unit='s')
pd.to_datetime?
```
Теперь мы видим, что столбец был преобразован в нужный нам тип данных.
```
df.info()
```
## Работа с текстовыми данными.
http://pandas.pydata.org/pandas-docs/stable/text.html
### .str accessor
.str accessor - позволяет вызывать методы для работы с текстовыми строками для всего столбца сразу. <br/><br/>
Это очень мощная штука, так как она позволяет легко создавать новые `features`, которые могут как-то описывать ваши данные.<br/>
```
df.summary
```
Таким простым вызовом мы получаем новый столбец с длинной строки описания товара, который может быть хорошим индикатором для вашей модели.
```
df.summary.str.len()
```
### Упражнение: Попробуйте использовать разные строковые методы: lower(), upper(), strip()...
http://pandas.pydata.org/pandas-docs/stable/text.html#method-summary
```
# Your code here
```
**Нижний регистр**.
```
df.summary.str.lower()
```
**Верхний регистр**.
```
df.summary.str.upper()
```
Поиск строк, которые содержат определенную подстроку или **regex**
```
pattern = 'durable'
df.summary.str.contains(pattern)
```
## Работа с timeseries
### .dt accessor
Также как и .str, .dt позволяет вызывать методы для работы с временными данными для всего столбца.
**День недели**
```
df.unixReviewTime.dt.dayofweek
```
**Неделя в году**
```
df.unixReviewTime.dt.weekofyear
```
### Упражнение: Получите столбец с кварталом года, в котором был оставлен отзыв. (qua...)
```
# ваш код
```
## Выбор данных
**DataFrame** имеет очень мощный функционал для поиска необходимых данных. <br/>
Таким простым вызовом мы можем выбрать индексы всех строк отзывов, у которых оценка ниже 5.
```
df.overall < 5
```
Передав их как ключ, мы получим сами строки.
```
df[df.overall < 5]
```
Полученные индексы мы можем передать в метод `loc`, вторым аргументом он принимает список столбцов, которые мы хотим видеть.
```
df.loc[df.overall < 5, ['overall', 'reviewText']]
```
Также мы можем передать более сложные условия для выборки, например, здесь мы выбираем отзывы с оценкой 5, содержащие слово `awesome` и отзывы с оценкой 1, содержащие слово `terrible`.
```
df.loc[((df.overall == 5) & (df.reviewText.str.contains('awesome'))) | ((df.overall == 1) & (df.reviewText.str.contains('terrible'))), ['overall', 'reviewText']]
```
### Упражнение: Выберите строки с оценкой 5, которые были написанны во вторник и содержат слово `love` в summary.
```
# Your code here
```
## `isin`
`isin` работает по такому принцип: мы ему передаем набор значений, а он выбирает строки, которые им соответствуют.
```
# возвращает столбец, содержащий количество уникальных значений asin
products = df.asin.value_counts()
products
products[0:3].index
```
Выбираем строки, которые содержат топ 3 популярные товары.
```
df[df.asin.isin(products[0:3].index)]
# df[df.asin.isin(['B0000C321X', 'B0001ZNZJM', 'B00012O12A'])] - даст тот же результат
```
### Упражнение: Выберите отзывы, которые были оставленны в дни, когда было оставленно больше всего отзывов :D
```
# ваш код
days = df.unixReviewTime.value_counts()
days
df[df.unixReviewTime.isin(days[0:1].index)]
```
## Группировка
http://pandas.pydata.org/pandas-docs/stable/groupby.html
**groupby** работает по такому принципу:
- Таблица делится на группы
- К каждой группе применяется определенная функция
- Результаты объединяются
```
df.groupby( grouper ).agg('mean')
```
```
df.groupby('asin')['reviewText'].agg('count').sort_values()
```
### Упражнение: Вычислите среднюю оценку по каждому уникальному продукту.
```
# ваш код
```
### Упражение: Вычислите среднюю оценку, которую оставил каждый уникальный пользователь.
```
# ваш код
```
pd.Grouper
```
df.groupby([pd.Grouper(key='unixReviewTime',freq='D')])['reviewerID'].count()
df.groupby([pd.Grouper(key='unixReviewTime',freq='M')])['reviewerID'].count()
```
## Plotting
```
%matplotlib inline
import seaborn as sns; sns.set()
df.groupby([pd.Grouper(key='unixReviewTime',freq='A')])['reviewerID'].count().plot(figsize=(6,6))
```
### EXERCISE: Plot the number of reviews timeseries by month, year
```
# Your code here
```
### EXERCISE: Draw two plots to compare average review rating per day of the week between 2013 and 2014
```
# Your code here
import matplotlib.pyplot as plt
by_weekday = df.groupby([df.unixReviewTime.dt.year,
df.unixReviewTime.dt.dayofweek]).mean()
by_weekday.columns.name = None # remove label for plot
fig, ax = plt.subplots(1, 2, figsize=(16, 6), sharey=True)
by_weekday.loc[2013].plot(title='Average Reviews Rating by Day of Week (2013)', ax=ax[0]);
by_weekday.loc[2014].plot(title='Average Reviews Rating by Day of Week (2014)', ax=ax[1]);
for axi in ax:
axi.set_xticklabels(['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'])
```
### EXERCISE: Draw two plots to compare number of reviews per day of the month between 2012 and 2013
```
import matplotlib.pyplot as plt
by_month = df.groupby([df.unixReviewTime.dt.year,
df.unixReviewTime.dt.day])['reviewerID'].count()
fig, ax = plt.subplots(1, 2, figsize=(16, 6), sharey=True)
by_month.loc[2012].plot(title='Average Reviews by Month (2012)', ax=ax[0]);
by_month.loc[2013].plot(title='Average Reviews by Month (2013)', ax=ax[1]);
```
## Summary
### Learning objectives:
- Reading and writing data
```
pd.DataFrame()
pd.read_json()
pd.read_csv()
df.to_csv()
df.to_json()
...
```
- Understading and formatting pandas data types
```
df.info()
df.to_datetime()
df.to_categoricals()
...
```
- Working with text data
```
.str accessor
.str.len()
...
```
- Working with timeseries data
```
.dt accessor
.str.dayofweek
...
```
- Indexing
```
df.loc[]
df.iloc[]
.isin()
```
- Grouping
```
df.groupby(grouper).agg('mean')
...
```
| github_jupyter |
```
%matplotlib inline
```
迁移学习教程
==========================
**作者**: `Sasank Chilamkurthy <https://chsasank.github.io>`_
这个教程将教你如何使用迁移学习训练你的网络.
你可以在 `cs231n 笔记 <http://cs231n.github.io/transfer-learning/>`__ 中
阅读更多有关迁移学习的信息.
引用自该笔记,
事实上, 很少有人从头(随机初始化)开始训练一个卷积网络, 因为拥有一个足够大的数据库是比较少见的.
替代的是, 通常会从一个大的数据集(例如 ImageNet, 包含120万的图片和1000个分类)预训练一个卷积网络,
然后将这个卷积网络作为初始化的网络, 或者是感兴趣任务的固定的特征提取器.
如下是两种主要的迁移学习的使用场景:
- **微调卷积网络**: 取代随机初始化网络, 我们从一个预训练的网络初始化,
比如从 imagenet 1000 数据集预训练的网络. 其余的训练就像往常一样.
- **卷积网络作为固定的特征提取器**: 在这里, 我们固定网络中的所有权重, 最后的全连接层除外.
最后的全连接层被新的随机权重替换, 并且, 只有这一层是被训练的.
```
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
```
加载数据
---------
我们用 torchvision 和 torch.utils.data 包加载数据.
我们今天要解决的问题是, 训练一个可以区分 **ants** (蚂蚁) 和 **bees** (蜜蜂) 的模型.
用于训练的 ants 和 bees 图片各120张. 每一类用于验证的图片各75张.
通常, 如果从头开始训练, 这个非常小的数据集不足以进行泛化.
但是, 因为我们使用迁移学习, 应该可以取得很好的泛化效果.
这个数据集是一个非常小的 imagenet 子集
.. Note ::
从`这里 <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_ 下载数据, 然后解压到当前目录.
```
# 训练要做数据增强和数据标准化
# 验证只做数据标准化
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
use_gpu = torch.cuda.is_available()
```
显示一些图片
^^^^^^^^^^^^^^^^^^^^^^
让我们显示一些训练中的图片, 以便了解数据增强.
```
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # 暂停一会, 让 plots 更新
# 获得一批训练数据
inputs, classes = next(iter(dataloaders['train']))
# 从这批数据生成一个方格
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
```
训练模型
------------------
现在, 让我们写一个通用的函数来训练模型. 这里, 我们将会举例说明:
- 调度学习率
- 保存最佳的学习模型
下面函数中, ``scheduler`` 参数是 ``torch.optim.lr_scheduler`` 中的 LR scheduler 对象.
```
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 每一个迭代都有训练和验证阶段
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train(True) # 设置 model 为训练 (training) 模式
else:
model.train(False) # 设置 model 为评估 (evaluate) 模式
running_loss = 0.0
running_corrects = 0
# 遍历数据
for data in dataloaders[phase]:
# 获取输入
inputs, labels = data
# 用 Variable 包装输入数据
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# 设置梯度参数为 0
optimizer.zero_grad()
# 正向传递
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# 如果是训练阶段, 向后传递和优化
if phase == 'train':
loss.backward()
optimizer.step()
# 统计
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# 深拷贝 model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 加载最佳模型的权重
model.load_state_dict(best_model_wts)
return model
```
显示模型的预测结果
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
写一个处理少量图片, 并显示预测结果的通用函数
```
def visualize_model(model, num_images=6):
images_so_far = 0
fig = plt.figure()
for i, data in enumerate(dataloaders['val']):
inputs, labels = data
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
return
```
调整卷积网络
----------------------
加载一个预训练的网络, 并重置最后一个全连接层.
```
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
if use_gpu:
model_ft = model_ft.cuda()
criterion = nn.CrossEntropyLoss()
# 如你所见, 所有参数都将被优化
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# 每 7 个迭代, 让 LR 衰减 0.1 因素
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
```
训练和评估
^^^^^^^^^^^^^^^^^^
如果使用 CPU, 这将花费 15-25 分钟. 但使用 GPU 的话, 需要的时间将少于1分钟.
```
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
visualize_model(model_ft)
```
卷积网络作为固定的特征提取器
----------------------------------
这里, 我们固定网络中除最后一层外的所有权重. 为了固定这些参数, 我们需要设置 ``requires_grad == False`` ,
然后在 ``backward()`` 中就不会计算梯度.
你可以在 `这里 <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__ 阅读更多相关信息.
```
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# 新构建的 module 的参数中, 默认设置了 requires_grad=True.
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
if use_gpu:
model_conv = model_conv.cuda()
criterion = nn.CrossEntropyLoss()
# 如你所见, 和我们前面提出的一样, 只有最后一层的参数被优化.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# 每 7 个迭代, 让 LR 衰减 0.1 因素
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
```
训练和评估
^^^^^^^^^^^^^^^^^^
在使用 CPU 的情况下, 和前一个方案相比, 这将花费的时间是它的一半.
期望中, 网络的大部分是不需要计算梯度的. 前向传递依然要计算梯度.
```
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=25)
visualize_model(model_conv)
plt.ioff()
plt.show()
```
| github_jupyter |
```
#import
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
#import lightgbm as lgb
from scipy import stats
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
import math
import scipy
from scipy.stats import stats
from numpy.random import choice
import random
from google.colab import drive
drive.mount('/content/gdrive')
map_object_cols={}
map_object_cols['ProductCD']='object'
map_object_cols['DeviceInfo']='object'
map_object_cols['DeviceType']='object'
map_object_cols['addr1']='object'
map_object_cols['addr2']='object'
map_object_cols['device_name']='object'
map_object_cols['had_id']='object'
map_object_cols['P_emaildomain']='object'
map_object_cols['P_emaildomain_bin']='object'
map_object_cols['P_emaildomain_suffix']='object'
map_object_cols['R_emaildomain']='object'
map_object_cols['R_emaildomain_bin']='object'
map_object_cols['R_emaildomain_suffix']='object'
map_object_cols['_Month']='object'
map_object_cols['_Weekdays']='object'
map_object_cols['_Days']='object'
map_object_cols['_Hours']='object'
for i in range(12,39):
col_name= 'id_'+str(i)
map_object_cols[col_name]='object'
for i in range(1,10):
col_name= 'M'+str(i)
map_object_cols[col_name]='object'
for i in range(1,7):
col_name= 'card'+str(i)
map_object_cols[col_name]='object'
train=pd.read_csv('gdrive/My Drive/Colab Notebooks/Fraud/Data/fraud_data_filteredColumnsWithHigherThank85PercentMissing.csv',compression='gzip',dtype=map_object_cols)
train.shape
count_integers=len(train.select_dtypes(include=['integer']).columns)
print('There are ' + str(count_integers) +' columns of type integer')
count_float=len(train.select_dtypes(include=['floating']).columns)
print('There are ' + str(count_float) +' columns of type float')
count_category=len(train.select_dtypes(include=['object']).columns)
print('There are ' + str(count_category) +' columns of type object')
print('Total ' + str(count_category+count_float+count_integers))
train
train.isnull().sum().sum()
train3=train.copy()
int_cols=set(train.select_dtypes(include=['integer']).columns)
int_cols
int_cols.remove('isFraud')
float_cols = train.select_dtypes(include=['floating']).columns
len(float_cols)
float_to_int_cols=set()
for col in float_cols:
col_df = train[col].dropna()
col_should_be_int = col_df.map(float.is_integer).all()
if col_should_be_int:
float_to_int_cols.add(col)
len(float_to_int_cols)
int_cols=int_cols.union(float_to_int_cols)
len(int_cols)
df_integers= train[list(int_cols)]
df_integers.shape
df_integers
len(df_integers)
train3=train.copy()
#values=df_integers['V36'].dropna().unique().tolist()
df_integers_dropna=df_integers['V36'].dropna()
values=df_integers_dropna.unique().tolist()
values__probs = df_integers.groupby('V36').size().div(len(df_integers_dropna))
randomnumber = choice(values, p=values__probs)
randomnumber
for col in int_cols:
values=df_integers[col].unique().tolist()
values__probs = df_integers.groupby(col).size().div(len(df_integers))
randomnumber = choice(values, p=values__probs)
train4[col] = train4[col].fillna(randomnumber)
# TODO: check if this code is ok
#for col in int_cols:
# df_integers_dropna=df_integers[col].dropna()
# values=df_integers_dropna.unique().tolist()
#print('values size: '+ str(len(values)))
# values__probs = df_integers.groupby(col).size().div(len(df_integers_dropna))
#print('values prob size: '+ str(len(values__probs)))
#randomnumber = choice(values, p=values__probs)
# df_nan=train.loc[(train[col].isnull())]
# indexes=set(df_nan.index)
#values_list=random.sample(choice(values, p=values__probs),k=len(indexes))
# values_to_fill={}
# n=0;
# for i in indexes:
# values_to_fill[i]=choice(values, p=values__probs)
# n+=1
# train3[col] = train3[col].fillna(value=values_to_fill)
#train3[col] = train3[col].fillna(randomnumber)
train3.head()
float_cols=set(train.select_dtypes(include=['floating']).columns) - int_cols
len(float_cols)
for col in float_cols:
vals=pd.Series(train[col][train[col].notnull()])
#print(vals)
density = scipy.stats.gaussian_kde(vals)
df_nan=train.loc[(train[col].isnull())]
indexes=set(df_nan.index)
sample=density.resample(len(indexes)).T[:,0]
values_list=random.sample(set(pd.Series(sample)),k=len(indexes))
values_to_fill={}
n=0;
for i in indexes:
values_to_fill[i]=values_list[n]
n+=1
train3[col] = train3[col].fillna(value=values_to_fill)
train3.isnull().sum().sum()
train3
train3.to_csv('gdrive/My Drive/Colab Notebooks/Fraud/Data/fraud_data_remove_nulls_fill_random_distribution.csv',index=False,compression='gzip')
```
| github_jupyter |
# Week 8 worksheet: Numerical solution of hyperbolic PDEs
$$\newcommand{\vect}[1]{\bm #1}
\newcommand{\grad}{\nabla}
\newcommand{\pderiv}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\pdderiv}[2]{\frac{\partial^2 #1}{\partial #2^2}}
\newcommand{\deriv}[2]{\frac{\mathrm{d} #1}{\mathrm{d} #2}}
\newcommand{\Deriv}[3]{\frac{\mathrm{d}^#3 #1}{\mathrm{d} #2^#3}}$$
In this laboratory we will look at hyperbolic problems, solving the one dimensional, linear advection equation
$$\pderiv{u}{t}+a\pderiv{u}{x}=0,$$
where $u$ is a scalar quantity being advected at a uniform speed, $a$, over a periodic domain, $x\in[0,1]$.
The initial conditions are
$$u(x,0) = \begin{cases}
\sin^4 2\pi x & 0 \le x \le \frac12 \\
1 & \frac46 \le x \le \frac56 \\
0 & \text{otherwise}
\end{cases}$$
If the advection speed is an integer then every second the wave form will be back at the starting position. This means we can make a direct comparison between the initial conditions and the results from the solver.
Your are going to develop a simple one dimensional solver using Python to solve the problem. In this question you are going to write the key parts of the driver routines needed to create a grid, calculate the time step, set up the initial conditions, solve the equations using the first order upwind scheme and plot the results. The code starts with the following imports and definitions:
```
import numpy as np
from matplotlib import pyplot as plt
N = 500 # Number of grid points being used
AdvectionSpeed = 1.0 # Advection speed (celarity) of the wave
CourantNumber = 0.95 # Courant number
%run scripts/create_widgets.py W07
```
*How it works: You will see cells located below each exercise, each containing a command starting with `%run scripts/show_solutions.py`. You don't need to run those yourself; the command above runs a script which automatically runs these specific cells for you. The commands in each of these cells each create the button for the corresponding exercise. The Python code to achieve this is contained in `scripts/show_solutions.py`, and relies on [IPython widgets](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html) --- feel free to take a look at the code if you are curious.*
```
%%javascript
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
```
## Exercise 1 - Grid generation
### Part a)
Write a function _GridGen(N)_ which creates an array $x$ which contains a uniformly spaced grid of $N$ grid points over the interval $[0,1]$. The function must return both the mesh spacing $\Delta x$ and the $N$ ordinates $x_i$.
You should fill in the following template.
```
def GridGen(N):
'''generate a 1D grid from over [0.0,1.0] with N grid points.'''
return dx, x_ord
%run scripts/show_solutions.py W08_ex1_parta
```
Write a function _InitialConditions(c)_ which returns an array $u$ so that
$$u = \begin{cases}
\sin^4 2\pi x & 0 \le x \le \frac12 \\
1 & \frac46 \le x \le \frac56 \\
0 & \text{otherwise}
\end{cases}$$
You should fill in the following template.
def InitialConditions(x_ord):
''' set up the intial conditions so that u(x)=sin^4(2 Pi x) for x
in [0.0,0.5] and u(x) = 1 for x in [4/6,5/6] and u(x)=0
otherwise. These are the initial conditions for the classical
test problem.'''
return u
```
%run scripts/show_solutions.py W08_ex1_partb
```
Write a function _TimeStep($\Delta x$)_ which computes the stable time step, $$\Delta t = \nu \frac{\Delta x}{|a|}$$ where $\nu$ is the Courant number, and $a$ is the advection speed. Remember that the advection speed, $a$, is given by the Python constant <code>AdvectionSpeed</code> and the Courant number, $\nu$ by the Python constant <code>CourantNumber</code>.
```
%run scripts/show_solutions.py W08_ex1_partc
```
### Part b)
The following Python function is the driver for the 1D solver. It is passed the parameters <code>t\_stop</code> the time at which the solution is required, <code>u</code> a vector containing the initial conditions, <code>dx</code> the mesh spacing $\Delta x$ and <code>method</code>, a function with parameters <code>(u,dx,dt)</code> which performs **one** time step with an advection solver and which returns both the updated solution $u^{n+1}$ and the name of the method used.
```
def OneDSolver(t_stop, u, dx, method):
'''One dimensional advection solver. This calculates the time
step and then performs the appropriate number of time steps to
reach t_stop using the method passed in. '''
# calculate the time step
dt = TimeStep(dx)
MaxStep = int(t_stop / dt) + 1 # maximum number of time steps
print('1D solver - performing {} time steps, dt = {:.4e}'
.format(MaxStep, dt))
time = 0.0
for i in range(MaxStep):
dt = min(dt, t_stop - time)
# There is no need to implement periodic boundary conditions
# here as these will be dealt with within the method using the
# np.roll() function wich rotates the contents of an array.
# call the solver
u, scheme = method(u,dx,dt)
# update time
time = time + dt
if (time >= t_stop):
print('Done. {} seconds simulated using the {} scheme.'
.format(time, scheme))
break
return u, scheme
```
Implement the first order upwind scheme
$$u^{n+1}_i = u^n_i - \frac{a\Delta t}{\Delta x}\left(u^n_i - u^n_{i-1}\right)$$
You should use the following template and should make use of the <code>np.roll()</code> function to obtain the values of $u^n_{i-1}$. Remember that the advection speed, $a$, is given by the Python constant <code>AdvectionSpeed</code>.
```
def FOU(u,dx,dt):
''' this is the classical First Order Upwind scheme, this is first
order in time and space and has a stability condition that
nu<=1.
The periodic boundary conditions are handled using np.roll() which
shifts an array left or right rolling round the end values.'''
return u, 'FOU'
%run scripts/show_solutions.py W08_ex1_partd
```
The final step is to write the <code>main</code> program this needs to set up the gird and the initial conditions, save a copy of the initial conditions, call the solver and plot the results. It can be coded as follows:
```
'''Main Program'''
# set up the grid and the initial conditions
dx,x = GridGen(N)
u = InitialConditions(x)
u0 = u.copy()
# call the solver
tOut=5
u, scheme = OneDSolver(tOut, u, dx, FOU)
# Plot the results
fig = plt.figure()
plt.plot(x,u0,'-r')
if (N>120):
plt.plot(x,u,'-b')
else:
plt.plot(x,u,'ob')
plt.title('Linear advection {} scheme, N={}, t={}'.format(scheme,N,tOut))
plt.show()
```
Use the solver to explore what happens if the solution is evolved for 0.5, 1.0, 10.0 and 50.0 seconds on a grid with $N=500$ points. Briefly comment on what you observe.
```
%run scripts/show_solutions.py W08_ex1_parte
```
## Exercise 2
Implement the Lax-Friedrichs scheme,
$$u^{n+1}_i = \frac{u^n_{i+1}+u^n_{i-1}}{2} - \frac{a\Delta t}{2\Delta x}\left(u^n_{i+1}-u^n_{i-1}\right).$$
You are reminded that the <code>np.roll()</code> function should be used to obtain the values of $u^n_{i-1}$ and $u^n_{i+1}$. Remember that the advection speed, $a$, is given by the Python constant <code>AdvectionSpeed</code>. You should complete the following Python template
```
def LaxFriedrichs(u,dx,dt):
'''Classical Lax-Friedrichs scheme'''
return u, 'Lax-Friedrichs'
```
Calculate the solution at 1.0, 10.0 and 50.0 seconds on a grid with $N=500$ points. Briefly comment on what you observe.
```
%run scripts/show_solutions.py W08_ex2_parta
```
Calculate the solution at 1.0 on a grid with $N=50$ points. What do you observe?
```
%run scripts/show_solutions.py W08_ex2_partb
```
## Exercise 3
Implement the Lax-Wendroff scheme,
$$u^{n+1}_i = u^n_i-\frac{a\Delta t}{2\Delta x}\left(u^n_{i+1}+u^n_{i-1}\right)
- \frac{a^2\Delta t^2}{2\Delta x^2}\left(u^n_{i+1}-2u^n_i+u^n_{i-1}\right)$$
You are reminded that the <code>np.roll()</code> function should be used to obtain the values of $u^n_{i-1}$ and $u^n_{i+1}$. Remember that the advection speed, $a$, is given by the Python constant <code>AdvectionSpeed</code>. You should complete the following Python template
```
def LaxWendroff(u,dx,dt):
'''Classical, 2nd order, Lax-Wendroff scheme'''
return u, 'Lax-Wendroff'
```
Calculate the solution at 1.0, 10.0 and 50.0 seconds on a grid with $N=500$ points. Briefly comment on what you observe.
```
%run scripts/show_solutions.py W08_ex3_parta
```
## Exercise 4
Implement the MacCormack predictor-correctorscheme,
$$\begin{align*}
u^p_i &= u ^n_i - \frac{a\Delta t}{\Delta x}\left(u^n_i - u^n_{i-1}\right) \\
u^{n+1}_i &= \frac{u^p_i+u^n_i}{2} - \frac{a\Delta t}{2\Delta x}\left(u^p_{i+1} - u^p_i\right)
\end{align*}
$$
You are reminded that the <code>np.roll()</code> function should be used to obtain the values of $u^n_{i-1}$ and $u^n_{i+1}$. Remember that the advection speed, $a$, is given by the Python constant <code>AdvectionSpeed</code>. You should complete the following Python template
```
def MacCormack(u,dx,dt):
'''2nd order MacCormack predictor corrector method which is
equivalent to the classical Lax-Wendroff scheme.'''
# Predictor
up =
# Corrector
u =
return u, 'MacCormack'
```
Check that the solution at 10.0 seconds on a grid with $N=500$ points is equivalent to the solution obtained with the Lax-Wendroff Scheme.
```
%run scripts/show_solutions.py W08_ex4_parta
### Part b)
Using either the MacCormack or the Lax-Wendroff scheme see what happens when $a=-1$,
compare the solution at $t=10.0$ seconds with the case where $a=1$
%run scripts/show_solutions.py W08_ex4_partb
```
| github_jupyter |
# MPC Tensor
### With Duet
In this tutorial we will show you how to perform secure multiparty computation with data you cannot see. There are three /notebooks:
* [POC-MPCTensor-Duet-Alice](POC-MPCTensor-Duet-Alice.ipynb). Alice will store data in his Duet server and will be available for the data-scientist.
* [POC-MPCTensor-Duet-Bob](POC-MPCTensor-Duet-Bob.ipynb). Bob will store data in his Duet server and will be available for the data-scientist.
* [POC-MPCTensor-Duet-DS](POC-MPCTensor-Duet-DS.ipynb) (this notebook). The data-scientist will be the responsible of perform any secure computation.
## 0 - Libraries
Import main libraries
```
import syft as sy
```
## 1 - Connect to the Data owners
### 1.1 - Connect to Alice
```
duet_alice = sy.join_duet("f6c13409f9336891b26d6be3c23a09c5")
```
## 1.2 - Connect to Bob
```
duet_bob = sy.join_duet("637630d01b9e2217e1bd007933c7d3ed")
```
## 2 - Secure MultiParty Computation
### 2.1 - Create a session
The session is used to send some config information only once between the parties.
This information can be:
* the ring size in which we do the computation
* the precision and base
* the approximation methods we are using for different functions (TODO)
```
from sympc.session import Session
from sympc.tensor import MPCTensor
session = Session(parties=[duet_alice, duet_bob])
print(session)
```
### 2.2 - Send the session to all the parties
```
Session.setup_mpc(session)
```
## 2.3 - Private Operations
Now we are ready to perform private operations. First of all let's check which datasets are stored in the Data Owner Duet server
```
duet_alice.store.pandas
duet_bob.store.pandas
```
### 2.3.1 - Sum, Substract and Multiply operations
Let's first do some basic operations. Notice that the difference here is that these operations are performed via SMPC, so the raw data is not leaving the data owner server!
```
x_secret = duet_alice.store[0] # secret data to test sum, substract and multiply
y_secret = duet_bob.store[0]
x = MPCTensor(secret=x_secret, shape=(1,), session=session) # MPC Tensor from x_secret
y = MPCTensor(secret=y_secret, shape=(1,), session=session) # MPC Tensor from y_secret
print("[Priv + Priv] X - Y =", (x + y).reconstruct())
print("[Priv - Priv] X - Y =", (x - y).reconstruct())
print("[Priv * Priv] X * Y =", (x * y).reconstruct())
```
### 2.3.2 - Matrix multiplication
Bit more complex operations such as matrix multiplications are valid as well.
Remember that linear algebra is the basis of Deep Learning!
```
x_secret = duet_alice.store[1] # secret data to test matmul
y_secret = duet_bob.store[1]
x = MPCTensor(secret=x_secret, shape=(2,2), session=session) # MPC Tensor from x_secret
y = MPCTensor(secret=y_secret, shape=(2,2), session=session) # MPC Tensor from y_secret
print("X @ X =\n", (x @ x).reconstruct())
```
## Congratulations!!! - Time to Join the Community!
Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
### Star PySyft and SyMPC on GitHub
The easiest way to help our community is just by starring the GitHub repos! This helps raise awareness of the cool tools we're building.
* [Star PySyft](https://github.com/OpenMined/PySyft)
* [Star SyMPC](https://github.com/OpenMined/SyMPC/)
### Join our Slack!
The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at http://slack.openmined.org
### Join a Code Project!
The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft GitHub Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for GitHub issues marked "good first issue".
* [PySyft Good First Issue Tickets](https://github.com/OpenMined/PySyft/labels/Good%20first%20issue%20%3Amortar_board%3A)
* [SyMPC Good First Issue Tickets](https://github.com/OpenMined/SyMPC/labels/good%20first%20issue)
### Donate
If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
* [OpenMined's Open Collective Page](https://opencollective.com/openmined)
| github_jupyter |
### Set GPU clocks
```
!sudo nvidia-persistenced
!sudo nvidia-smi -ac 877,1530
from core import *
from torch_backend import *
```
### Network definition
```
def conv_bn(c_in, c_out, bn_weight_init=1.0, **kw):
return {
'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False),
'bn': batch_norm(c_out, bn_weight_init=bn_weight_init, **kw),
'relu': nn.ReLU(True)
}
def residual(c, **kw):
return {
'in': Identity(),
'res1': conv_bn(c, c, **kw),
'res2': conv_bn(c, c, **kw),
'add': (Add(), [rel_path('in'), rel_path('res2', 'relu')]),
}
def basic_net(channels, weight, pool, **kw):
return {
'prep': conv_bn(3, channels['prep'], **kw),
'layer1': dict(conv_bn(channels['prep'], channels['layer1'], **kw), pool=pool),
'layer2': dict(conv_bn(channels['layer1'], channels['layer2'], **kw), pool=pool),
'layer3': dict(conv_bn(channels['layer2'], channels['layer3'], **kw), pool=pool),
'pool': nn.MaxPool2d(4),
'flatten': Flatten(),
'linear': nn.Linear(channels['layer3'], 10, bias=False),
'classifier': Mul(weight),
}
def net(channels=None, weight=0.125, pool=nn.MaxPool2d(2), extra_layers=(), res_layers=('layer1', 'layer3'), **kw):
channels = channels or {'prep': 64, 'layer1': 128, 'layer2': 256, 'layer3': 512}
n = basic_net(channels, weight, pool, **kw)
for layer in res_layers:
n[layer]['residual'] = residual(channels[layer], **kw)
for layer in extra_layers:
n[layer]['extra'] = conv_bn(channels[layer], channels[layer], **kw)
return n
losses = {
'loss': (nn.CrossEntropyLoss(reduce=False), [('classifier',), ('target',)]),
'correct': (Correct(), [('classifier',), ('target',)]),
}
```
### Download and preprocess data
```
DATA_DIR = './data'
dataset = cifar10(root=DATA_DIR)
t = Timer()
print('Preprocessing training data')
train_set = list(zip(transpose(normalise(pad(dataset['train']['data'], 4))), dataset['train']['labels']))
print(f'Finished in {t():.2} seconds')
print('Preprocessing test data')
test_set = list(zip(transpose(normalise(dataset['test']['data'])), dataset['test']['labels']))
print(f'Finished in {t():.2} seconds')
```
### Network visualisation
```
display(DotGraph(net()))
```
### Training
NB: on the first run, the first epoch will be slower as initialisation and Cudnn benchmarking take place.
```
epochs=24
lr_schedule = PiecewiseLinear([0, 5, epochs], [0, 0.4, 0])
batch_size = 512
transforms = [Crop(32, 32), FlipLR(), Cutout(8, 8)]
N_runs = 5
train_batches = Batches(Transform(train_set, transforms), batch_size, shuffle=True, set_random_choices=True, drop_last=True)
test_batches = Batches(test_set, batch_size, shuffle=False, drop_last=False)
lr = lambda step: lr_schedule(step/len(train_batches))/batch_size
summaries = []
for i in range(N_runs):
print(f'Starting Run {i} at {localtime()}')
model = Network(union(net(), losses)).to(device).half()
opt = SGD(trainable_params(model), lr=lr, momentum=0.9, weight_decay=5e-4*batch_size, nesterov=True)
summaries.append(train(model, opt, train_batches, test_batches, epochs, loggers=(TableLogger(),)))
test_accs = np.array([s['test acc'] for s in summaries])
print(f'mean test accuracy: {np.mean(test_accs):.4f}')
print(f'median test accuracy: {np.median(test_accs):.4f}')
print(f'{np.sum(test_accs>=0.94)}/{N_runs} >= 94%')
```
| github_jupyter |
```
# libraries
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
from glob import glob
train_path = r"******your training path ******"
test_path = r"*******your test path *******"
img = load_img(train_path + "Apple Braeburn/0_100.jpg")
plt.imshow(img)
plt.axis("off")
plt.show()
x = img_to_array(img)
print(x.shape)
className = glob(train_path + '/*' )
numberOfClass = len(className)
print("NumberOfClass: ",numberOfClass)
# CNN Model
model = Sequential()
model.add(Conv2D(32,(3,3),input_shape = x.shape))
model.add(Activation("relu"))
model.add(MaxPooling2D())
model.add(Conv2D(32,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D())
model.add(Conv2D(64,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(numberOfClass)) # output
model.add(Activation("softmax"))
model.compile(loss = "categorical_crossentropy",
optimizer = "rmsprop",
metrics = ["accuracy"])
batch_size = 32
# Data Generation - Train - Test
train_datagen = ImageDataGenerator(rescale= 1./255,
shear_range = 0.3,
horizontal_flip=True,
zoom_range = 0.3)
test_datagen = ImageDataGenerator(rescale= 1./255)
train_generator = train_datagen.flow_from_directory(
train_path,
target_size=x.shape[:2],
batch_size = batch_size,
color_mode= "rgb",
class_mode= "categorical")
test_generator = test_datagen.flow_from_directory(
test_path,
target_size=x.shape[:2],
batch_size = batch_size,
color_mode= "rgb",
class_mode= "categorical")
hist = model.fit_generator(
generator = train_generator,
steps_per_epoch = 1600 // batch_size,
epochs=100,
validation_data = test_generator,
validation_steps = 800 // batch_size)
# model save
model.save("fruit.h5")
# model evaluation
print(hist.history.keys())
plt.plot(hist.history["loss"], label = "Train Loss")
plt.plot(hist.history["val_loss"], label = "Validation Loss")
plt.legend()
plt.show()
plt.figure()
plt.plot(hist.history["accuracy"], label = "Train acc")
plt.plot(hist.history["val_accuracy"], label = "Validation acc")
plt.legend()
plt.show()
# example of converting an image with the Keras API
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import array_to_img
from keras.models import load_model
model = load_model("fruit.h5")
# load the image
img = load_img('10_100.jpg')# , grayscale=False, target_size = (100,100)) if we want, add different parameters
print("Orignal image type:" ,type(img))
img.show()
# convert to numpy array
img_array = img_to_array(img)
print("original is converted to " + str(type(img_array)))
print("type:",img_array.dtype)
print("shape:",img_array.shape)
# convert back to image
img_pil = array_to_img(img_array)
print("converting NumPy array to :",type(img_pil))
img_array = np.reshape(img_array,[1,100,100,3])
cl = model.predict(img_array)
classes = np.argmax(model.predict(img_array), axis = -1)
print(cl)
print(classes)
```
| github_jupyter |
TSG090 - Yarn nodemanager logs
==============================
Steps
-----
### Parameters
```
import re
tail_lines = 2000
pod = None # All
container = "hadoop"
log_files = [ "/var/log/supervisor/log/nodemanager*.log" ]
expressions_to_analyze = [
re.compile(".{23} WARN "),
re.compile(".{23} ERROR ")
]
```
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
config.load_kube_config()
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'SUGGEST: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
```
### Get the namespace for the big data cluster
Get the namespace of the big data cluster from the Kuberenetes API.
NOTE: If there is more than one big data cluster in the target
Kubernetes cluster, then set \[0\] to the correct value for the big data
cluster.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'SUGGEST: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'SUGGEST: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'SUGGEST: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
```
### Get tail for log
```
# Display the last 'tail_lines' of files in 'log_files' list
pods = api.list_namespaced_pod(namespace)
entries_for_analysis = []
for p in pods.items:
if pod is None or p.metadata.name == pod:
for c in p.spec.containers:
if container is None or c.name == container:
for log_file in log_files:
print (f"- LOGS: '{log_file}' for CONTAINER: '{c.name}' in POD: '{p.metadata.name}'")
try:
output = stream(api.connect_get_namespaced_pod_exec, p.metadata.name, namespace, command=['/bin/sh', '-c', f'tail -n {tail_lines} {log_file}'], container=c.name, stderr=True, stdout=True)
except Exception:
print (f"FAILED to get LOGS for CONTAINER: {c.name} in POD: {p.metadata.name}")
else:
for line in output.split('\n'):
for expression in expressions_to_analyze:
if expression.match(line):
entries_for_analysis.append(line)
print(line)
print("")
print(f"{len(entries_for_analysis)} log entries found for further analysis.")
```
### Analyze log entries and suggest relevant Troubleshooting Guides
```
# Analyze log entries and suggest further relevant troubleshooting guides
from IPython.display import Markdown
tsgs = []
suggestions = 0
for entry in entries_for_analysis:
print (entry)
for tsg in tsgs:
if entry.find(tsg[0]) != -1:
display(Markdown(f'SUGGEST: Use [{tsg[2]}](tsg[1]) to resolve this issue.'))
suggestions = suggestions + 1
print("")
print(f"{len(entries_for_analysis)} log entries analyzed. {suggestions} further troubleshooting suggestions made inline.")
print('Notebook execution complete.')
```
| github_jupyter |
1. Import pandas; import TfidfVectorizer, split_train_test, confusion_matrix, classification_report, LogisticRegression from scikit-learn; and import yellowbrick
```
import warnings
from collections import Counter
import pandas as pd
import numpy as np
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import Imputer
from yellowbrick.classifier import ClassPredictionError, ClassificationReport, ConfusionMatrix
import matplotlib
matplotlib.rcParams['figure.figsize'] = [8, 6]
warnings.filterwarnings('ignore')
```
2. Read customer_complaint.csv into dataframe using pandas
```
customer_complaint_df = pd.read_csv('Consumer_Complaints.csv')
customer_complaint_df.head()
```
3. Create new dataframe with Issue and Product only from question
```
df = customer_complaint_df[['Issue','Product']]
df.sample(5)
```
4. Check for missing values in the dataframe from question 3
```
df.isnull().sum()
```
5. Impute all missing values in Issue and Product from question 4 dataframe
```
def create_mode(val):
return Counter(val).most_common(1)[0][0]
df['Issue'] = df['Issue'].fillna(create_mode(df['Issue']))
df.isnull().sum()
```
6. Visualize the distribution of Product variable in the dataframe from question 5
```
print(df['Product'].value_counts())
df.groupby(['Product'])['Issue'].count().plot(kind='bar')
```
7. Encode Issue from question 5 into features using TfidfVectorizer, limit the features to 100 and write it into new dataframe
```
tfidf_vectorizer = TfidfVectorizer(analyzer='word',
stop_words='english',
lowercase=True,
max_features=100,
min_df=5,
norm='l2',
sublinear_tf=True)
issue_vectorizer = tfidf_vectorizer.fit_transform(df['Issue'])
feature_name = tfidf_vectorizer.get_feature_names()
vectorised_df = pd.DataFrame(issue_vectorizer.toarray(), columns=feature_name)
vectorised_df.head()
```
8. Factorise submitted_via and add it to new dataframe from question 8 with a name ‘target’
```
vectorised_df['target'] = pd.factorize(df['Product'])[0]
vectorised_df.head()
```
9. Split dataframe from question 6 into training and test set using train_test_split function with 25% of the data as test
```
X= vectorised_df.drop('target', axis=1)
y= vectorised_df['target']
xtrain,xtest,ytrain,ytest = train_test_split(X,y, test_size=0.25, random_state=0)
xtrain.shape
```
10. Train multiclass classifier using LogisticRegression class using outcomes of question 8
```
classifier = LogisticRegression(random_state=0)
classifier.fit(xtrain,ytrain)
prediction = classifier.predict(xtest)
```
11. Evaluate the classifier using classification_report and confusion_metrix from yellowbrick
```
accuracy_score(ytest, prediction)
print(classification_report(ytest,prediction))
##### Visualize Classification Report
classes = pd.factorize(df['Product'])[1]
visualizer = ClassificationReport(classifier, classes=classes, support=True)
visualizer.fit(xtrain, ytrain) # Fit the visualizer and the model
visualizer.score(xtest, ytest) # Evaluate the model on the test data
visualizer.poof()
##### Visualize Classification Report
cm = ConfusionMatrix(classifier, classes=pd.factorize(df['Product'])[0])
cm.fit(xtrain, ytrain)
cm.score(xtest, ytest)
# How did we do?
cm.poof()
##### Visualize Classification Report
visualizer = ClassPredictionError(classifier, classes=classes)
# Fit the training data to the visualizer
visualizer.fit(xtrain, ytrain)
# Evaluate the model on the test data
visualizer.score(xtest, ytest)
# Draw visualization
visualizer.poof()
```
| github_jupyter |
# Machine Learning to Predict Earnings for Stocks: Neural Networks
**Hugh Donnelly, CFA**<br>
*AlphaWave Data*
**September 2021**
### Introduction
In this article, we are going to cover Neural Networks (NN). Let's begin by laying down the theoretical foundation of the algorithm.
Jupyter Notebooks are available on [Google Colab](https://colab.research.google.com/drive/1_6X_AXuuRxOB1rwGZP_CZrbflDiEeJQQ?usp=sharing) and [Github](https://github.com/AlphaWaveData/Jupyter-Notebooks/blob/master/AlphaWave%20Data%20Machine%20Learning%20to%20Predict%20Earnings%20for%20Stocks%20using%20Neural%20Networks%20example.ipynb).
For this project, we use several Python-based scientific computing technologies listed below.
```
import re
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
You may have heard various buzz words associated with Neural Networks (NN) like Artificial Intelligence, [Machine Learning](https://hdonnelly6.medium.com/list/machine-learning-for-investing-7f2690bb1826), and Deep Learning. For clarification, Deep Learning is a sub-field of Neural Networks which is a sub-field of Machine Learning and they all fall under the umbrella of Artificial Intelligence.
<h4>AI versus Machine Learning versus Neural Networks versus Deep Learning</h4>
<img src='ML Photos/1_NN_ML_Graph.PNG'>
Neural Networks (NN) is a prediction algorithm where you define a set of features to make predictions on a label. These labels can be binary (e.g. Is this email spam?), multi-label classification (e.g. handwritten text), or [regression](https://hdonnelly6.medium.com/introduction-to-machine-learning-regression-fee4200132f0) (e.g. What is the price of an equity option?). NN can also be used in adaptive control problems (e.g. autonomous driving).
NN are ubiquitous. Sophisticated market participants are using NN for alpha generation, hedging, and scenario analysis. Technology companies are using NN to suggest which video you should watch next and what type of news you should be consuming. So how can the NN algorithm be so flexible that it can be used to predict such a wide variety of labels?
NN are loosely based on how the brain learns. First, you set up a NN architecture that you believe is commiserate with the complexity of your problem. The simplest NN architecture will include an input neuron layer, an output layer, and an activation function. Next, you give the NN a set of features you believe are important when predicting an outcome. The NN will then determine the relationship and patterns between each neuron to generate the most plausible outcome. Before we use the NN to make predictions, we need to train the NN.
<h4>Neural Network Equation</h4>
<img src='ML Photos/2_NN_Equation.PNG'>
<h4>Neural Network Layer Graph</h4>
<img src='ML Photos/2a_NN_Layers_Graph.PNG'>
Some of the biggest misconceptions and misuses of NN is that people will feed more features or increase the complexity of the NN thinking this will allow the algorithm to detect more patterns and make better predictions. This is not necessarily true as your NN is more likely to be overfit and therefore the predictions cannot be trusted.
For NN to perform well, one must pre-process and normalize quality features, choose an appropriate architecture, and properly tune parameters. Above all, domain knowledge is paramount. There is no substitution for experience and intuition when working with data and making predictions. Assuming one has domain experience and solid knowledge of how NN work, there are several advantages of using NN over other machine learning algorithms.
NN can learn and adapt on their own which make them useful for real time applications. NN also have fault tolerance meaning the algorithm will continue to operate if one or more neurons fail. It also does a good job at handling nonlinear relationships. NN can also handle multiple tasks in parallel lending itself to distributed computing.
Let's look at how we can use NN to make binary predictions on earnings.
### Initial Setup
Now let's retrieve simulated quarterly fundamentals data over a ten year period for anonymized members of the S&P 500 from a saved pickle file for this analysis. This pickle file contains more than 40 features that we will use to predict the direction of the next quarter's earnings based on the current quarter's fundamental data.
If you wish, you can also use real financial data provided by [AlphaWave Data](https://www.alphawavedata.com/) in this analysis.
```
# Load equity dataframe from the saved pickle file
data = pd.read_pickle("./nn_data.pkl")
data
```
Before we can put our data into the NN, we need to pre-process our data. Let's begin by outlining the steps we will take to make this prediction.
### Earnings movement prediction
#### Forecast direction of next quarter earnings based on accounting information of the current quarter
#### Steps:
- Enhance data with additional information
- Preprocess the data
- Learn how to apply Neural Network algorithm on our dataset
Let's have a look at the first seven rows of the data.
```
data.head(7)
```
Let's begin by enriching our data with some additional columns. In a typical machine learning workflow, the majority of the effort is usually dedicated to data cleaning and data preparation. In order for us to run the NN successfully, we need to do a lot of the necessary work before we can actually feed the data into the model. To enhance the data, we follow the below steps.
#### Enhance data:
- Change in Earnings per share : (Current Period EPS - Prior Period EPS)
- Assign 1 to positive change in EPS and 0 to negative change
- Shift data index by -1: we will be using current financial data to predict future change in earnings
```
# Create binary column of positive and negative earnings changes
data['binary change'] = [1 if row['change in EPS'] > 0 else 0 for _,row in data.iterrows()]
# Shift date index by -1 so we are predicting future changes: 1 or 0
data['Future change'] = data['binary change'].shift(-1)
# Goal is to anticipate the sign of future earnings change from the financial data of the current quarter.
# If the future earnings changes is + , we assign 1, otherwise 0, to Future change value of the current quarter
data[['EPS','change in EPS','Future change']].head(6)
```
Using pandas describe function to examine our data, you can see there are a number of columns that have negative and positive infinity.
```
# Examine data
data.describe()
```
We will replace negative and positive infinity with NaN.
```
# Replace infinity with nan
data = data.replace([np.inf, -np.inf], np.nan)
```
We will also drop the rows where the change in earnings per share is NaN. We do this because we are trying to predict the change in earnings, so rows with NaN, or missing values, would not be useful information in our analysis.
```
#Drop rows where change in EPS is nan: they are no use to us
data = data.dropna(subset = ['change in EPS', 'Future change'])
```
We are also going to drop three columns, EPS, change in EPS, and binary change. We no longer need these columns to continue examining the missing data.
```
# We no longer need these columns
data = data.drop(columns = ['EPS','change in EPS','binary change'])
```
As you can see almost every column, other than future change, has some percentage of missing values and some columns have a substantial amount of missing values. We have to deal with these missing values before proceeding.
```
# Examine missing data
missing_column_data = 100*(data.isnull().sum() / data.shape[0]).round(3)
print('Percent of missing values per column:\n', missing_column_data)
```
Real world data often has missing values which require careful attention. The handling of missing values is very important during the preprocessing step because many machine learning algorithms do not work with missing data. There are two general ways of thinking about how to handle missing data. One way is to delete the rows with the missing data, but we risk losing valuable information doing this. The alternative is to try to compute the missing values using an array of different methods like mean or median imputation, neural networks, or Multiple Imputation by Chained Equations (MICE).
In this exercise, we will drop columns that have more than 35% of data missing.
```
# Drop 10 columns that have more than 35% of data missing
columns_to_drop = missing_column_data[missing_column_data > 35]
columns_to_drop
```
This will result in us dropping ten columns.
```
# Number of columns dropped, 10
data = data.drop(columns = list(columns_to_drop.index))
print( f'New Dataframe shape : {data.shape}')
```
Let's continue with preprocessing our data.
#### Preprocess data:
- Handle remaining missing values
- Minimize influence of outliers by performing Winsorization
- Standardize data
Handle remaining missing data by replacing NaN by mean of the column.
```
# Keep in mind that this is a naive way to handle missing values.
# This method can cause data leakage and does not factor the covariance between features.
# For more robust methods, take a look at MICE or KNN
for col in data.columns:
data[col].fillna(data[col].mean(), inplace=True)
# Check for missing values
missing_column_data = 100*(data.isnull().sum()/ data.shape[0]).round(3)
print('Percent of missing values per column:\n',missing_column_data)
```
Before we proceed further, we need to split the data into train and test. Splitting data into train and test is absolutely necessary in machine learning to avoid overfitting. It allows us to see how good our model really is and how well it performs on the new data we feed it. We train the model on the training data and then make a prediction using the model that we learned in the training phase. The prediction is made on the unlabeled test data.
Here we split the data into train and test by allocating 80% of the data to train and 20% of the data to test.
```
# First we need to split our data into train and test.
from sklearn.model_selection import train_test_split
# Independent values/features
X = data.iloc[:,:-1].values
# Dependent values
y = data.iloc[:,-1].values
# Create test and train data sets, split data randomly into 20% test and 80% train
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
```
We also need to winsorize the data to limit the influence of the extreme values, typically by setting all outliers to a specified percentile of data. Notice how we are winsorizing train data and test data separately. If you winsorize all of your data together first and then partition it later into training and testing afterwards, you are allowing future data (i.e. test data) to influence your cutoff values. Since you won't know what the future is when you use your model, you cannot use data manipulation affected by your future test data.
```
from scipy.stats import mstats
# Winsorize top 1% and bottom 1% of points
# Apply on X_train and X_test separately
X_train = mstats.winsorize(X_train, limits = [0.01, 0.01])
X_test = mstats.winsorize(X_test, limits = [0.01, 0.01])
```
There is one last thing that we have to do before we train the algorithm and that is to standardize the data.
$$z=(x-mean) / Standard Deviation$$
Standardization of a dataset is a common requirement for many machine learning estimators. The reason for this is that these algorithms may not behave well if the individual features are not standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). This means there should be a mean of zero and unit variance.
For instance many elements used in the objective function of a machine learning algorithm (such as the RBF kernel of Support-vector Machines (SVM) or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger than others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected.
```
# IMPORTANT: During testing, it is important to construct the test feature vectors using the means and standard deviations saved from
# the training data, rather than computing it from the test data. You must scale your test inputs using the saved means
# and standard deviations, prior to sending them to your Neural Networks library for classification.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
# Fit to training data and then transform it
X_train = sc.fit_transform(X_train)
# Perform standardization on testing data using mu and sigma from training data
X_test = sc.transform(X_test)
```
Let's take a deeper dive into how NN work.
### Artificial Neural Networks
The most basic unit of a NN is called a perceptron. A perceptron is a single layer NN used for binary classification. It has an input layer, a bias, an activation function, and an output layer. The input layer is made of neurons and each neuron has a weight. NN that have more than three layers of neurons, including the input and output layers, are considered deep NN or deep learning.
So how do we use a NN? First, data is fed into the NN. Input data is multiplied by the neuron's weight and is summed. A bias is then added to the sum and that value is sent to the activation function. The results from the activation function will determine your binary output.
When we are training our NN, we are trying to determine optimal weights for each neuron and bias. As you can see, the algorithm is computationally efficient since we are performing simple vector multiplication. The flexibility, simplicity, and scalability of NN is the reason why it is the most active area of research in machine learning.
### Perceptron is a single layer neural network and a multi-layer perceptron is called Neural Networks.
* First it sums values of each input x multiplied by weight w
* Weighted sum is passed through an activation function
* Activation function "converts" output to binary output of 0 or 1
* Weights are a measure of influence that each input has on the final output
<img src='img/perceptron.JPG'>
### What is an Activation Function?
The activation function introduces non-linearity into a NN. This is the key to turning a linear combination of inputs from neurons to a non-trivial output. In a binary classification problem, we want an activation function that will act as a switch. Given our inputs, will our activation function output an off or on result? Depending on the type of problem, whether binary classification or regression, we need to choose an appropriate activation function. Below are two commonly used activation functions for binary classification, a sigmoid function and a tangent function.
### Sigmoid function
* Activation function has "switch on" and "switch off" characteristic
* Moves from 0 to 1 depending on the input values of x
* Activation function adds non-linearity to the network
```
# The main reason why we use sigmoid function is because it exists between (0 to 1).
# Therefore, it is especially used for models where we have to predict the probability as an output.
# Since probability of anything exists only between the range of 0 and 1, sigmoid is the right choice.
# The function is differentiable. That means, we can find the slope of the sigmoid curve at any two points.
# There are four commonly used and popular activation functions — sigmoid, hyperbolic tangent(tanh), ReLU and Softmax.
x = np.arange(-8, 8, 0.1)
f = 1 / (1 + np.exp(-x))
plt.plot(x, f)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Sigmoid function')
plt.show()
```
### Tanh function
* Maps values between -1 and 1
* tanh is also sigmoidal (s - shaped)
```
x = np.arange(-8, 8, 0.1)
f = np.tanh(x)
plt.plot(x, f)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Tanh function')
plt.show()
# Build sigmoid function for later use
# sigmoid(w*x + b) = 1/(1+e^-(wTx+b))
# z is (w*x+b),
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
```
### Building blocks:
Now that we have an understanding of the main components of a NN, let's build one from scratch.
### Structure of an Artificial Neural Network (ANN)
* Input Layer is where data enters the network
* Hidden Layers (on the picture there are two) is where function applies weights (w) to the inputs and directs them through the activation function like sigmoid or relu
* Output Layer is where function returns the outputs from the last layer
<img src='img/nn_structure.jpg'>
### The general methodology to build a Neural Network is to:
1. Define the neural network structure ( # of input units, # of hidden layers, etc).
2. Initialize the model's parameters
3. Loop:
- Implement forward propagation
- Compute loss
- Implement backward propagation to get the gradients
- Update parameters (gradient descent)
### 1 & 2 Define and Initialize model's parameters
- n_x : size of the input layer
- n_h : size of the hidden layer
- n_y : size of the output layer
Initialize weights (w) with random values and bias (b) as zeros.
If we initialize weights with 0, the derivative with respect to a loss function will be the same for every w.
```
# Start with a basic network initialization
# Size of the input layer
n_x = 3
# Size of the hidden layer
n_h = 3
# Size of the output layer
n_y = 1
# W1 - weight matrix of shape (n_h, n_x)
W1 = np.random.randn(n_h,n_x) * 0.01
# b1 - bias vector of shape (n_h, 1)
b1 = np.zeros((n_h,1))
# W2 - weight matrix of shape (n_y, n_h)
W2 = np.random.randn(n_y,n_h) * 0.01
# b2 - bias vector of shape (n_y, 1)
b2 = np.zeros((n_y,1))
print("W1 = " + str(W1))
print("b1 = " + str(b1))
print("W2 = " + str(W2))
print("b2 = " + str(b2))
# Build function to store parameters for later use
def model_parameters(n_x, n_h, n_y):
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
# save to dictionary
parameters = {'W1' : W1,
'b1' : b1,
'W2' : W2,
'b2' : b2}
return parameters
```
### Forward propagation
* Calculations in the model that take us from an input layer all the way to the output (how NN make predictions)
* Each independent feature x will be passed to the 1st hidden layer combined with some randomized weight
* 1st hidden layer applies an activation function resulting in an output which then becomes an input for next hidden layer
* Next hidden layer, repeats step above and progresses forward
* The weights of a neuron can be thought of as weights between 2 layers
<img src='img/forward_nn.JPG'>
```
# Implement forward pass
# parameters - dictionary of initial parameters
# X - input data
def forward_propagation(X, parameters):
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Values from the picture above
Z1 = np.dot(W1,X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2,A1) + b2
# use previously built function sigmoid
A2 = sigmoid(Z2)
# save to dictionary
fwd_pass_values = {"Z1" : Z1,
"A1" : A1,
"Z2" : Z2,
"A2" : A2}
return A2, fwd_pass_values
```
Once the first forward pass has been completed and we have our prediction, how do we evaluate its accuracy?
### Loss function
* It measures cost associated with an incorrect prediction
* Our goal is to find coefficients that minimize the loss function
* Cross entropy loss is used in classification problems
```
# Implement loss function
# cost = -(1/m) * Sum(y*log(a^[2](i)) + (1-y)*log(1-a^[2](i)))
# A2 - output of sigmoid
# Y is a true output against which we'll be measuring the loss
def entropy_loss(A2, Y, parameters):
m = Y.shape[1]
log_prob = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), 1 - Y)
cost = -(1 / m) * np.sum(log_prob)
# squeeze removes axes of length one from cost
cost = float(np.squeeze(cost))
return cost
```
### Backpropagation
* Mechanism for tuning the weights based on the loss function
* During training we want to find weights and biases that minimize the error (loss function)
* To measure change in the loss function, we need to take the derivative of a function with respect to all the weights and biases
```
# Implement function to measure derivatives
# Pass dictionary of parameters, forward propagation values, input data and labeled data
def backward_propagation(parameters, fwd_pass_values, X, Y):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
A1 = fwd_pass_values["A1"]
A2 = fwd_pass_values["A2"]
# Derivatives of loss func w.r.t parameters
dZ2 = fwd_pass_values["A2"] - Y
dW2 = 1 / m*np.dot(dZ2, fwd_pass_values["A1"].T)
db2 = 1 / m*np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2)*(1 - np.power(A1, 2))
dW1 = 1 / m*np.dot(dZ1, X.T)
db1 = 1 / m*np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dW1" : dW1,
"db1" : db1,
"dW2" : dW2,
"db2" : db2}
return gradients
```
Now that we have derivatives (i.e. sensitivity of the loss function to change in parameters) how do we use them to update our weights and biases in order to decrease our loss?
### Gradient Descent
* Optimization algorithm used to find the values of parameters that minimize a cost function
* We can use it to recursively update the weights by iterating over all training samples
* It takes into account learning rate and initial parameter values
* Learning rate controls size of the step on each iteration
* parameter = parameter - learning rate * (derivative of loss function with reference to parameter)
* Derivative, slope of loss function, updates the change you want to make to the parameter
* Ideally we want Gradient Descent converging to global optimum where derivative equals to zero
<img src='img/gradient_nn.JPG'>
```
# parameters - dictionary with randomly initialized parameters
# gradients - derivatives from backward_propagation function
# parameter = parameter - learning rate * (derivative of loss function w.r.t parameter)
def update_parameters(parameters, gradients, learning_rate = 1.1):
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
dW1 = gradients["dW1"]
db1 = gradients["db1"]
dW2 = gradients["dW2"]
db2 = gradients["db2"]
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
```
### Combine functions above and build your first Neural Network model
```
# Recall our dataset
print ('The shape of X_train: ' + str(X_train.shape))
print ('The shape of y_train: ' + str(y_train.shape))
print ('The shape of X_test: ' + str(X_test.shape))
print ('The shape of y_test: ' + str(y_test.shape))
# Reshape the data
X_train_new = X_train.T
y_train_new = y_train.reshape(1, y_train.shape[0])
X_test_new = X_test.T
y_test_new = y_test.reshape(1, y_test.shape[0])
print ('The shape of X_train_new: ' + str(X_train_new.shape))
print ('The shape of y_train_new: ' + str(y_train_new.shape))
print ('The shape of X_test_new: ' + str(X_test_new.shape))
print ('The shape of y_test_new: ' + str(y_test_new.shape))
# size of input layer
n_x = X_train_new.shape[0] # size of input layer
# size of hidden layer
n_h = 4
# size of output layer
n_y = y_train_new.shape[0]
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
```
### Use model_parameter functions to initialize parameters
```
parameters = model_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
### Train Neural Network model
```
# Number of iterations used in gradient descent for loop
num_iterations = 10000
for i in range(0, num_iterations):
# Apply our forward propagation function
A2, fwd_pass_values = forward_propagation(X_train_new, parameters)
# Calculate cost associated with an incorrect prediction
cost = entropy_loss(A2, y_train_new, parameters)
# Apply backpropagation function to measure sensitivity of a loss function to parameters
gradients = backward_propagation(parameters, fwd_pass_values, X_train_new, y_train_new)
# Update parameters using Gradient descent
parameters = update_parameters(parameters, gradients)
# Print cost for every 1000th iteration
if i % 1000 == 0:
print(i,cost)
```
### Prediction
Now that we have our updated parameters that minimize the entropy loss, use forward propagation to make a prediction.
A2 is a vector of probabilities, recall it is a sigmoid().
if A2 > 0.5 then 1 and 0 otherwise. A prediction of 1 indicates a predicted increase in earnings.
```
# Pass test data into forward_propagation function along with newly optimized parameters
A2, fwd_pass_values = forward_propagation(X_test_new, parameters)
predictions = (A2 > 0.5)
# Accuracy
print ('Accuracy: %d' % float((np.dot(y_test_new , predictions.T) + np.dot(1 - y_test_new,1 - predictions.T))/float(y_test_new.size)*100) + '%')
```
### Neural Networks with scikit-learn
Now that we have successfully built a NN from scratch, we have a better appreciation for scikit-learn's built-in support for NN models.
```
# Import accuracy score
from sklearn.metrics import accuracy_score
# Multi-layer Perceptron classifier contains one or more hidden layers and can learn non-linear functions.
from sklearn.neural_network import MLPClassifier
# hidden_layer_sizes allows us to set the number of layers and the number of nodes we wish to have in the Neural Network Classifier
# max_iter denotes the number of epochs.
# activation function for the hidden layers.
# solver specifies the algorithm for weight optimization across the nodes.
mlp = MLPClassifier(hidden_layer_sizes = (150,100,50), max_iter=300,activation = 'relu',solver = 'adam', random_state = 0)
# Train
mlp.fit(X_train,y_train)
# Predict
y_pred = mlp.predict(X_test)
# Accuracy
accuracy = accuracy_score(y_test, y_pred)
print('Accuracy: {:.2f}'.format(accuracy))
```
### Additional Resources
#### Helpful Blog Posts
Machine Learning for Investing: https://hdonnelly6.medium.com/list/machine-learning-for-investing-7f2690bb1826
| github_jupyter |
# Some common 'tricks'
When modelling problems in ASP, it turns out that there are some 'tricks' that come in handy a lot of the time. Here we'll run through some of the most common of these tricks.
Let's start with setting up a function to print answer sets of a program:
```
import clingo
def print_answer_sets(program):
control = clingo.Control()
control.add("base", [], program)
control.ground([("base", [])])
control.configuration.solve.models = 0
for model in control.solve(yield_=True):
sorted_model = [str(atom) for atom in model.symbols(shown=True)]
sorted_model.sort()
print("Answer set: {{{}}}".format(", ".join(sorted_model)))
```
## Generating
The following tricks are useful for generating the right search space.
### Generating assignments to binary variables
You can generate all assignments to a set of binary variables as follows (e.g., truth assignments).
```
print_answer_sets("""
var(1..3).
true(X) :- not false(X), var(X).
false(X) :- not true(X), var(X).
#show true/1.
#show false/1.
""")
```
Or alternatively like this:
```
print_answer_sets("""
var(1..3).
1 { true(X); false(X) } 1 :- var(X).
#show true/1.
#show false/1.
""")
```
### Generating assignments to $n$-ary variables
Generating all assignments to variables with domains of more than two elements, you can do like this:
```
print_answer_sets("""
var(1..2).
value(1..4).
assign(X,V) :- var(X), value(V), not assign(X,V') : value(V'), V' != V.
#show assign/2.
""")
```
Or a bit more compactly/intuitively, like this:
```
print_answer_sets("""
var(1..2).
value(1..4).
1 { assign(X,V) : value(V) } 1 :- var(X).
#show assign/2.
""")
```
### Generating one-to-one assignments
If you have two sets of equal size, and you want to generate all one-to-one assignments between these sets, you can do that as follows:
```
print_answer_sets("""
side1(1..3).
side2(a;b;c).
1 { match(S1,S2) : side1(S1) } 1 :- side2(S2).
1 { match(S1,S2) : side2(S2) } 1 :- side1(S1).
#show match/2.
""")
```
### Generating one-to-many assignments
If you have two sets $S_1$ and $S_2$ (possibly of different size), and you want to generate all assignments where every element in $S_1$ is assigned to exactly one element in $S_2$ (but not vice versa), then this is a way to do that:
```
print_answer_sets("""
side1(1..2).
side2(a;b;c).
1 { match(S1,S2) : side2(S2) } 1 :- side1(S1).
#show match/2.
""")
```
### Generating arbitrary assignments
If you have two sets $S_1$ and $S_2$ (possibly of different size), and you want to generate *all possible* (partial) assignments, you can do this:
```
print_answer_sets("""
side1(1..2).
side2(a;b).
{ match(S1,S2) : side1(S1) } :- side2(S2).
#show match/2.
""")
```
### Generating injective assignments
If you have two sets $S_1$ and $S_2$ (possibly of different size), and you want to generate all assignments where every element in $S_1$ is assigned to exactly one element in $S_2$ (but not vice versa) that are *injective* (i.e., no two elements in $S_1$ are assigned to the same element of $S_2$), you can do that as follows:
```
print_answer_sets("""
side1(1..2).
side2(a;b;c).
{ match(S1,S2) : side1(S1) } 1 :- side2(S2).
1 { match(S1,S2) : side2(S2) } 1 :- side1(S1).
#show match/2.
""")
```
### Generating arbitrary subsets
Selecting an arbitrary subset of elements from a given set can be done as follows:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
#show select/1.
""")
```
Or alternatively like this:
```
print_answer_sets("""
element(a;b;c).
{ select(E) : element(E) }.
#show select/1.
""")
```
### Generating subsets of size $k$
If you want to generate all subsets that are of size *exactly* $k$, you can do this:
```
print_answer_sets("""
element(a;b;c).
2 { select(E) : element(E) } 2.
#show select/1.
""")
```
### Generating subsets of size $\leq k$
If you want to generate all subsets that are of size *at most* $k$, you can do this:
```
print_answer_sets("""
element(a;b;c).
{ select(E) : element(E) } 2.
#show select/1.
""")
```
### Generating subsets of size $\geq k$
If you want to generate all subsets that are of size *at least* $k$, you can do this:
```
print_answer_sets("""
element(a;b;c).
2 { select(E) : element(E) }.
#show select/1.
""")
```
## Constraints
The following tricks are useful for filtering out incorrect solutions, after you have generated a search space.
We will illustrate these with the example case where we generated an arbitrary subset of elements, but the same tricks apply also to the other cases.
### Basic constraints
If you want to ensure that `something` is **true**, you can add the constraint `:- not something.` (which can be read as: "It is not the case that `something` is not true").
For example, if you want to ensure that `a` is selected, you can use `:- not select(a).`.
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
:- not select(a).
#show select/1.
""")
```
If you want to ensure that `something` is **false**, you can add the constraint `:- something.` (which can be read as: "It is not the case that `something` is true").
For example, if you want to ensure that `a` is **not** selected, you can use `:- select(a).`.
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
:- select(a).
#show select/1.
""")
```
### AND-constraints
If you want to ensure that both `thing1` and `thing2` are true, you can define a new predicate (e.g., `my_property`), add rules that express that `my_property` is true if both `thing1` and `thing2` are true, and add a constraint that says that `my_property` must be true.
For example:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
my_property :- select(a), select(b).
:- not my_property.
#show select/1.
""")
```
This strategy (of defining a new predicate, defining when exactly this predicate is true, and requiring it to be true) works for more complicated cases as well.
In this simple example, we could easily have done without the new predicate too, e.g.:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
:- not select(a).
:- not select(b).
#show select/1.
""")
```
### OR-constraints
If you want to ensure that `thing1` is true **or** `thing2` is true, you can use the strategy of introducing a new predicate, like this:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
my_property :- select(a).
my_property :- select(b).
:- not my_property.
#show select/1.
""")
```
Or you can add a constraint `:- not thing1, not thing2.` (which can be read as: "It is not the case that both `thing1` is false and `thing2` is false").
For example:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
:- not select(a), not select(b).
#show select/1.
""")
```
### IMPLIES-constraints
If you want to express that if `thing1` is true, then also `thing2` must be true, you can do that like this, for example:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
:- select(a), not select(b).
#show select/1.
""")
```
Again, also here the strategy of defining a new predicate would work well.
### IF-AND-ONLY-IF-constraints
If you want to express that two things must either both be true, or both be false, you can do that by using two 'if-then' constraints. For example like this:
```
print_answer_sets("""
element(a;b;c).
{ select(E) } :- element(E).
:- select(a), not select(b).
:- not select(a), select(b).
#show select/1.
""")
```
| github_jupyter |
```
# !pip3 install -r requirements.txt
# !pip install transformers
import pandas as pd
import numpy as np
import datetime
import time
import matplotlib.pyplot as plt
#import ipdb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
import torch
from torch.utils.data import TensorDataset, DataLoader
from transformers import BertTokenizer, BertModel, BertConfig
import os
```
## Parameters
```
## BERT parameters
bert_tokenizer_model_id = 'bert-base-uncased'
# bert_pretrained_model_id = 'google/bert_uncased_L-2_H-128_A-2' # tiny
# bert_pretrained_model_id = 'google/bert_uncased_L-4_H-256_A-4' # mini
# bert_pretrained_model_id = 'google/bert_uncased_L-4_H-512_A-8' # small
# bert_pretrained_model_id = 'google/bert_uncased_L-8_H-512_A-8' # medium
bert_pretrained_model_id = 'google/bert_uncased_L-12_H-768_A-12' # base
## other training parameters
max_doc_length = 256 # max in train data is 62 in main and 258 in extra data
clip = 0.25 #gradient clipping
lr = 0.00003 #initial learning rate
wdecay=1.2e-6 #weight decay applied to all weights
epochs = 30 #maximum number of epochs
batch_size = 4 #batch size
save = 'model.pt' #path to save the final model
use_extra_data = True #if extra data should be used
train_max_number_batches = -1 # only for the sake of debugging. Set to -1 to be ignored
inference_max_number_batches = -1 # only for the sake of debugging. Set to -1 to be ignored
## log parameters
log_interval = 100 #log interval during training
log_interval_val = 100 #log interval during validation
# Check PyTorch GPU capabilities:
print("\nPyTorch:")
if torch.cuda.is_available():
device = torch.device("cuda")
print('%d GPU(s) available.' % torch.cuda.device_count())
print('GPU-Name:', torch.cuda.get_device_name(0))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
```
## Preparing Data
### Loading Captions
### Reading Data, tokenization, and loading into Tensors
```
labelcaptions = {"negative": 0, "neutral": 1, "positive": 2}
df = pd.read_csv('./all-data.csv', header=None, sep=",", encoding='ISO-8859-1', names=["label","text"])
df["label"] = df["label"].apply(lambda x: labelcaptions[x])
df["sentence_id"] = np.array(list(range(len(df))))
df_extra = pd.read_csv('./public-test-set.csv', header=None, sep=",", encoding='UTF-8', names=["label","text"])
df_extra["label"] = df_extra["label"].apply(lambda x: {"negative": 0, "neutral": 1, "positive": 2}[x])
df_extra["sentence_id"] = np.array(list(range(len(df),len(df)+len(df_extra))))
```
#### Split into train, val, test
```
train_size = 0.8
val_size = 0.2
test_size = 0
assert train_size + val_size + test_size == 1.
train_ind = slice(0,int(len(df)*train_size))
val_ind = slice(int(len(df)*train_size),int(len(df)*train_size)+int(len(df)*val_size))
test_ind = slice(int(len(df)*train_size)+int(len(df)*val_size),int(len(df)*train_size)+int(len(df)*val_size)++int(len(df)*test_size))
# Shuffle
df_shuffle = df.sample(frac=1)
# Split
df_train_, df_val, df_test = df_shuffle[train_ind], df_shuffle[val_ind], df_shuffle[test_ind]
df_train_["label"] = df_train_["label"].to_numpy().astype(np.int64)
def get_extended_df(df1, df2):
labels = np.array(list(df1["label"].to_numpy()) + list(df2["label"].to_numpy()))
texts = np.array(list(df1["text"].to_numpy()) + list(df2["text"].to_numpy()))
df = pd.DataFrame({"label":labels, "text":texts})
df["sentence_id"] = np.array(list(range(len(df))))
df = df.sample(frac=1)
return df
if use_extra_data: df_train = get_extended_df(df_train_, df_extra)
else: df_train = df_train_
df_train, df_train["text"].apply(lambda x: len(x.split(" "))).max()
```
#### Bert stuff
```
tokenizer = BertTokenizer.from_pretrained(bert_tokenizer_model_id, do_lower_case=True)
# sample subword tokenization
_sample_text = 'From a deceptively simple premise , this deeply moving French drama develops a startling story that works both as a detailed personal portrait and as a rather frightening examination of modern times .'
print (tokenizer.tokenize(_sample_text))
{id:word for word, id in tokenizer.get_vocab().items()}[1300]
def convert_text_bertids_tensor(texts):
#### complete the code (1 point) - START ####
# do tokenization here and return a dictionary, containing tensors of 'input_ids', 'token_type_ids', and 'attention_mask'
# the result looks like the following dictionary for the samples below
#{'input_ids': tensor([[ 101, 2013, 1037, 11703, 22048, 2135, 3722, 18458, 1010, 2023,
# 6171, 3048, 2413, 3689, 11791, 1037, 19828, 2466, 2008, 2573,
# 2119, 2004, 1037, 6851, 3167, 6533, 1998, 2004, 1037, 2738,
# 17115, 7749, 1997, 2715, 2335, 1012, 102],
# [ 101, 2009, 1005, 1055, 1037, 13012, 21031, 1997, 1037, 3185,
# 1010, 2007, 1037, 2261, 11680, 4193, 2019, 4895, 28578, 17007,
# 3085, 3730, 2415, 1012, 102, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])}
#### complete the code - END ####
return tokenizer(texts, padding=True, return_tensors="pt", truncation=True, max_length=max_doc_length)
# sample
_sample_text1 = 'From a deceptively simple premise , this deeply moving French drama develops a startling story that works both as a detailed personal portrait and as a rather frightening examination of modern times .'
_sample_text2 = "It 's a trifle of a movie , with a few laughs surrounding an unremarkable soft center ."
print (convert_text_bertids_tensor([_sample_text1, _sample_text2]))
def get_document_label_tensor(df):
documents = []
for x in df['text'].values:
documents.append(x.strip())
labels = np.array(list(df['label'].values))
data = convert_text_bertids_tensor(documents)
return data, labels
x_train, y_train = get_document_label_tensor(df_train)
print ('Train data items:', str(x_train['input_ids'].shape), str(y_train.shape))
x_val, y_val = get_document_label_tensor(df_val)
print ('Validation data items:', str(x_val['input_ids'].shape), str(y_val.shape))
# x_test, y_test = get_document_label_tensor(df_test)
# print ('Test data items:', str(x_test['input_ids'].shape), str(y_test.shape))
# dataloaders
dataset_train = TensorDataset(x_train['input_ids'], x_train['token_type_ids'], x_train['attention_mask'], torch.LongTensor(y_train))
dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, num_workers=7, drop_last=True)
dataset_val = TensorDataset(x_val['input_ids'], x_val['token_type_ids'], x_val['attention_mask'], torch.LongTensor(y_val))
dataloader_val = DataLoader(dataset_val, batch_size=batch_size, shuffle=True, num_workers=7, drop_last=True)
dataset_test = dataset_val #TensorDataset(x_test['input_ids'], x_test['token_type_ids'], x_test['attention_mask'], torch.LongTensor(y_test))
dataloader_test = DataLoader(dataset_test, batch_size=batch_size, shuffle=True, num_workers=7, drop_last=True)
```
### Practical Functions
```
def model_save(fn, model, criterion, optimizer):
with open(fn, 'wb') as f:
torch.save([model.state_dict(), criterion.state_dict(), optimizer.state_dict()], f)
def model_load(fn):
with open(fn, 'rb') as f:
model_state, criterion_state, optimizer_state = torch.load(f)
return model_state, criterion_state, optimizer_state
```
### Model
```
class BERTClassifierModel(torch.nn.Module):
'''
Classification model with BERT
'''
def __init__(self, bert, nout):
super(BERTClassifierModel, self).__init__()
self.bert = bert
self.embedding_size = self.bert.config.hidden_size
self.output_projection_layer = torch.nn.Linear(self.embedding_size, nout)
'''
input format: seq_len, batch
'''
def forward(self, input_batch):
#### complete the code (2 points) - START ####
# use the contents of `input_batch`, and pass them as parameters to self.bert
# use the output of BERT together with self.output_projection_layer to provide predictions
# the final results should consist of two variables:
# `log_probs` -> tensor of logarithms of the predicted probabilities for classes
# `final_representations` -> tensor of the output BERT vectors, based on which the prediction is done (for visualization purposes)
_out = self.bert.forward(input_ids=input_batch["input_ids"], attention_mask=input_batch["attention_mask"], token_type_ids=input_batch["token_type_ids"])
final_representations = _out["last_hidden_state"][:,0,:]
logits = self.output_projection_layer(final_representations)
#ipdb.set_trace()
log_probs = torch.nn.LogSoftmax(dim=1)(logits)
#### complete the code - END ####
return log_probs, final_representations
## DUMMY TEST
bert = BertModel.from_pretrained(bert_pretrained_model_id, cache_dir="cache")
_model = BERTClassifierModel(bert=bert, nout=4)
_input_ids = torch.LongTensor(np.random.randint(low=0, high=1000, size=(max_doc_length, batch_size)))
_token_type_ids = torch.LongTensor(np.zeros(shape=(max_doc_length, batch_size)))
_attention_mask = torch.LongTensor(np.ones(shape=(max_doc_length, batch_size)))
print ('input_ids shape: %s' % str(_input_ids.shape))
_input_batch = {'input_ids': _input_ids, 'token_type_ids': _token_type_ids, 'attention_mask': _attention_mask}
_output, _representations = _model.forward(_input_batch)
print ('output shape: %s' % str(_output.shape))
print ('representations shape: %s' % str(_representations.shape))
print ('done!')
```
### Instantiating Model
```
bert = BertModel.from_pretrained(bert_pretrained_model_id, cache_dir="cache", output_attentions=True)
model = BERTClassifierModel(bert=bert, nout=len(labelcaptions.keys()))
model.to(device)
print('Model:', model)
criterion = torch.nn.NLLLoss()
params = list(model.parameters())
optimizer = torch.optim.Adam(params, lr=lr, weight_decay=wdecay)
stored_res = 0
```
### Training ...
```
def softmax(X): return [np.exp(x) / np.sum(np.exp(x)) for x in X]
def predict(dataloader, model):
model.to(device)
# Turn on evaluation mode which disables dropout.
model.eval()
_predictions = []
_outputs = []
_labels = []
_representations = []
for i, batch in enumerate(dataloader):
_input_ids, _token_type_ids, _attention_mask, _label = [e.to(device) for e in batch]
_input_batch = {'input_ids': _input_ids,
'token_type_ids': _token_type_ids,
'attention_mask': _attention_mask}
with torch.no_grad():
_output, _batch_representations = model.forward(_input_batch)
_batch_predictions = torch.argmax(_output, dim=1)
_outputs.extend(softmax(_output.cpu().numpy()))
_predictions.extend(_batch_predictions.cpu().numpy())
_labels.extend(_label.cpu().numpy())
_representations.extend(_batch_representations.cpu().numpy())
if i % log_interval_val == 0 and i > 0:
print('Prediction | %5d batches | %5d data |' % (i, i*batch_size))
if (i > inference_max_number_batches) and (inference_max_number_batches != -1):
break
return _predictions, _outputs, _labels, np.array(_representations)
def train(dataloader, model, criterion, optimizer):
model.to(device)
# Turn on training mode which enables dropout.
model.train()
start_time = time.time()
log_interval_loss = 0
for i, batch in enumerate(dataloader):
_input_ids, _token_type_ids, _attention_mask, _label = [e.to(device) for e in batch]
_input_batch = {'input_ids': _input_ids,
'token_type_ids': _token_type_ids,
'attention_mask': _attention_mask}
#### complete the code (2 points) - START ####
# here the actual training happens. Required steps:
# forward pass
# calculating loss
# back-propagation
# updating parameters
optimizer.zero_grad()
_output, _ = model.forward(_input_batch)
loss = criterion(_output, _label)
loss.backward()
if clip:
torch.nn.utils.clip_grad_norm_(params, clip)
optimizer.step()
#### complete the code - END ####
log_interval_loss += loss.item()
if i % log_interval == 0 and i > 0:
cur_loss = log_interval_loss / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d} batches | {:5d} data | ms/batch {:5.2f} | loss {:5.3f}'.
format(epoch, i, i*batch_size, elapsed * 1000 / log_interval, cur_loss))
log_interval_loss = 0
start_time = time.time()
if (i > train_max_number_batches) and (train_max_number_batches != -1):
break
print('=' * 89)
print('Start training')
EVAL_MEASURE = 'accuracy'
# Loop over epochs.
best_val_res = []
try: stored_res = stored_res
except: stored_res = 0
for epoch in range(1, epochs+1):
epoch_start_time = time.time()
train(dataloader_train, model, criterion, optimizer)
print('Epoch %d validation' % epoch)
yhat, _, y, _ = predict(dataloader_val, model)
val_results = classification_report(y, yhat, output_dict=True)
val_res = val_results[EVAL_MEASURE]
print('-' * 89)
print('| end of epoch %3d | time: %5.2fs | validation %s %.3f | ' %
(epoch, (time.time() - epoch_start_time), EVAL_MEASURE, val_res))
print('-' * 89)
if val_res > stored_res:
save = f"models/BERT_valacc={val_res*100:.2f}%_bs={batch_size}_doc_len={max_doc_length}_epoch={epoch}_lr={lr}_valsize={val_size*100:.0f}_{bert_pretrained_model_id.split('/')[-1]}"
model_save(save, model, criterion, optimizer)
print('Saving model (new best validation)')
stored_res = val_res
best_val_res.append(val_res)
print('End of training')
```
### Test set Evaluation
```
# Load the best saved model.
model_state, criterion_state, optimizer_state = model_load(save)
model.load_state_dict(model_state)
criterion.load_state_dict(criterion_state)
optimizer.load_state_dict(optimizer_state)
# Run on test data.
yhat, yhat_proba, y, x_test_representations = predict(dataloader_test, model)
results = classification_report(y, yhat, output_dict=True)
print (classification_report(y, yhat))
print('=' * 89)
print('| End of testing | test %s %.3f ' % (EVAL_MEASURE, results[EVAL_MEASURE]))
print('=' * 89)
```
### Plots
```
# Can somehow distinguish between positive and negative, but not very good
scores = (yhat_proba * np.array([-1,0,1])).sum(axis=1)
plt.figure(figsize=(15,10))
plt.hist(scores[np.array(y)==0], color="red", alpha=0.5,label="negative", bins=30)
plt.hist(scores[np.array(y)==1], color="gray", alpha=0.5,label="neutral", bins=30)
plt.hist(scores[np.array(y)==2], color="green", alpha=0.5,label="positive", bins=30)
plt.legend()
plt.show()
#code from https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range (cm.shape[0]):
for j in range (cm.shape[1]):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
tuples=[(key, value) for key, value in labelcaptions.items()]
tuples.sort(key=lambda x: x[1])
labelcaptions_inorder = [t[0] for t in tuples]
# Compute confusion matrix
cnf_matrix = confusion_matrix(y, yhat)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
#plt.figure(figsize=(8,6))
#plot_confusion_matrix(cnf_matrix, classes=labelcaptions_inorder,
# title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure(figsize=(8,6))
plot_confusion_matrix(cnf_matrix, classes=labelcaptions_inorder, normalize=True,
title='Normalized confusion matrix')
plt.show()
```
| github_jupyter |
```
import os
import numpy as np
from skimage.transform import resize, rescale
from skimage.io import imread, imsave, imread_collection
import matplotlib.pyplot as plt
%matplotlib inline
# imgs = imread_collection('../data/icons/*.png')
icons_dir = '/Users/universome/Downloads/chosen-icons'
imgs = [imread(f'{icons_dir}/{f}') for f in os.listdir(f'{icons_dir}') if not f.startswith('.')]
def to_binary(img, threshold:float=0):
return (img > threshold).astype(np.uint8) * 255
def downscale_to_fit(img, max_size):
"Rescales image such that its max width/height is max_size"
target_scale = max_size / max(img.shape)
result = rescale(img, target_scale, multichannel=False)
return result
pytorch_logo = imread(f'{icons_dir}/pytorch-logo.png')
imsave(f'{icons_dir}/pytorch-logo.png', resize(pytorch_logo[:,:,3], (512, 512)))
plt.imshow(resize(pytorch_logo[:,:,3], (512, 512)), cmap='gray')
processed = [None] * len(imgs)
processed[0] = ((imgs[1] > 100) * 255).astype(np.uint8)
processed[1] = imgs[0][:, :, 3]
processed[2] = ((imgs[2] > 200) * 255).astype(np.uint8)
processed[3] = ((imgs[3][:, :, 3] < 250) * 255).astype(np.uint8)
processed[4] = ((imgs[4][:, :, 3] < 250) * 255).astype(np.uint8)
# imsave('/Users/universome/Downloads/custom-icons/processed/mona-lisa.png', processed[0])
# imsave('/Users/universome/Downloads/custom-icons/processed/batman-logo-1.png', processed[1])
# imsave('/Users/universome/Downloads/custom-icons/processed/goose.png', processed[2])
# imsave('/Users/universome/Downloads/custom-icons/processed/putin.png', processed[3])
# imsave('/Users/universome/Downloads/custom-icons/processed/batman-logo-2.png', processed[4])
i = 0
threshold = 0.5
plt.imshow(to_binary(downscale_to_fit(processed[i], 300), threshold)[30:180, 65:155], cmap='gray');
resized = [None] * len(imgs)
# resized[0] = to_binary(downscale_to_fit(processed[0], 100), 0.6)
# resized[0] = to_binary(downscale_to_fit(processed[0], 300), 0.6)
resized[0] = to_binary(downscale_to_fit(processed[i], 300), threshold)[30:180, 65:155]
resized[1] = to_binary(downscale_to_fit(processed[1], 100), 0.6)
resized[2] = to_binary(downscale_to_fit(processed[2], 100), 0.7)
resized[3] = to_binary(downscale_to_fit(processed[3], 100), 0.6)
resized[4] = to_binary(downscale_to_fit(processed[4], 100), 0.)
imsave('/Users/universome/Downloads/custom-icons/resized/mona-lisa-face.png', resized[0])
imsave('/Users/universome/Downloads/custom-icons/resized/batman-logo-1.png', resized[1])
imsave('/Users/universome/Downloads/custom-icons/resized/goose.png', resized[2])
imsave('/Users/universome/Downloads/custom-icons/resized/putin.png', resized[3])
imsave('/Users/universome/Downloads/custom-icons/resized/batman-logo-2.png', resized[4])
import os
size_name = f'{target_size[0]}x{target_size[1]}'
os.makedirs(f'../data/icons-resized/{size_name}', exist_ok=True)
for img, filename in zip(imgs, imgs.files):
result = to_binary(resize(img, target_size, mode='constant'))
imsave(f'../data/icons-resized/{size_name}/{os.path.basename(filename)}', result)
resized = [to_binary(resize(img, target_size, mode='constant')) for img in imgs]
num_whites = [(x > 0).sum() / x.size for x in resized]
plt.hist(num_whites, bins=20);
plt.grid()
```
| github_jupyter |
```
import sys
sys.path.append('../scripts/')
from ideal_robot import *
from scipy.stats import expon, norm, uniform
class Robot(IdealRobot):
def __init__(self, pose, agent=None, sensor=None, color="black", \
noise_per_meter=5, noise_std=math.pi/60, bias_rate_stds=(0.1,0.1), \
expected_stuck_time=1e100, expected_escape_time = 1e-100,\
expected_kidnap_time=1e100, kidnap_range_x = (-5.0,5.0), kidnap_range_y = (-5.0,5.0)): #追加
super().__init__(pose, agent, sensor, color)
self.noise_pdf = expon(scale=1.0/(1e-100 + noise_per_meter))
self.distance_until_noise = self.noise_pdf.rvs()
self.theta_noise = norm(scale=noise_std)
self.bias_rate_nu = norm.rvs(loc=1.0, scale=bias_rate_stds[0])
self.bias_rate_omega = norm.rvs(loc=1.0, scale=bias_rate_stds[1])
self.stuck_pdf = expon(scale=expected_stuck_time)
self.escape_pdf = expon(scale=expected_escape_time)
self.is_stuck = False
self.time_until_stuck = self.stuck_pdf.rvs()
self.time_until_escape = self.escape_pdf.rvs()
self.kidnap_pdf = expon(scale=expected_kidnap_time)
self.time_until_kidnap = self.kidnap_pdf.rvs()
rx, ry = kidnap_range_x, kidnap_range_y
self.kidnap_dist = uniform(loc=(rx[0], ry[0], 0.0), scale=(rx[1]-rx[0], ry[1]-ry[0], 2*math.pi ))
def noise(self, pose, nu, omega, time_interval):
self.distance_until_noise -= abs(nu)*time_interval + self.r*omega*time_interval
if self.distance_until_noise <= 0.0:
self.distance_until_noise += self.noise_pdf.rvs()
pose[2] += self.theta_noise.rvs()
return pose
def bias(self, nu, omega):
return nu*self.bias_rate_nu, omega*self.bias_rate_omega
def stuck(self, nu, omega, time_interval):
if self.is_stuck:
self.time_until_escape -= time_interval
if self.time_until_escape <= 0.0:
self.time_until_escape += self.escape_pdf.rvs()
self.is_stuck = False
else:
self.time_until_stuck -= time_interval
if self.time_until_stuck <= 0.0:
self.time_until_stuck += self.stuck_pdf.rvs()
self.is_stuck = True
return nu*(not self.is_stuck), omega*(not self.is_stuck)
def kidnap(self, pose, time_interval):
self.time_until_kidnap -= time_interval
if self.time_until_kidnap <= 0.0:
self.time_until_kidnap += self.kidnap_pdf.rvs()
return np.array(self.kidnap_dist.rvs()).T
else:
return pose
def one_step(self, time_interval):
if not self.agent: return
obs =self.sensor.data(self.pose) if self.sensor else None
nu, omega = self.agent.decision(obs)
nu, omega = self.bias(nu, omega)
nu, omega = self.stuck(nu, omega, time_interval)
self.pose = self.state_transition(nu, omega, time_interval, self.pose)
self.pose = self.noise(self.pose, nu, omega, time_interval)
self.pose = self.kidnap(self.pose, time_interval)
class Camera(IdealCamera): ###camera_third### (noiseは省略)
def __init__(self, env_map,
distance_range=(0.5, 6.0),
direction_range=(-math.pi/3, math.pi/3),
distance_noise_rate=0.1, direction_noise=math.pi/90,
distance_bias_rate_stddev=0.1, direction_bias_stddev=math.pi/90): #追加
super().__init__(env_map, distance_range, direction_range)
self.distance_noise_rate = distance_noise_rate
self.direction_noise = direction_noise
self.distance_bias_rate_std = norm.rvs(scale=distance_bias_rate_stddev) #追加
self.direction_bias = norm.rvs(scale=direction_bias_stddev) #追加
def noise(self, relpos):
ell = norm.rvs(loc=relpos[0], scale=relpos[0]*self.distance_noise_rate)
phi = norm.rvs(loc=relpos[1], scale=self.direction_noise)
return np.array([ell, phi]).T
def bias(self, relpos): #追加
return relpos + np.array([relpos[0]*self.distance_bias_rate_std,
self.direction_bias]).T
def data(self, cam_pose):
observed = []
for lm in self.map.landmarks:
z = self.observation_function(cam_pose, lm.pos)
if self.visible(z):
z = self.bias(z) #追加
z = self.noise(z)
observed.append((z, lm.id))
self.lastdata = observed
return observed
world = World(30, 0.1)
### 地図を生成して3つランドマークを追加 ###
m = Map()
m.append_landmark(Landmark(-4,2))
m.append_landmark(Landmark(3,-3))
m.append_landmark(Landmark(3,3))
m.append_landmark(Landmark(3,-2))
m.append_landmark(Landmark(3,0))
m.append_landmark(Landmark(3,1))
world.append(m)
### ロボットを作る ###
straight = Agent(0.2, 0.0)
r = Robot( np.array([ 0, 0, 0]).T, sensor=Camera(m), agent=straight)
world.append(r)
### アニメーション実行 ###
world.draw()
```
| github_jupyter |
```
# If you run on colab uncomment the following line
#!pip install git+https://github.com/clementchadebec/benchmark_VAE.git
import torch
import torchvision.datasets as datasets
%load_ext autoreload
%autoreload 2
mnist_trainset = datasets.MNIST(root='../../data', train=True, download=True, transform=None)
train_dataset = mnist_trainset.data[:-10000].reshape(-1, 1, 28, 28) / 255.
eval_dataset = mnist_trainset.data[-10000:].reshape(-1, 1, 28, 28) / 255.
from pythae.models import MSSSIM_VAE, MSSSIM_VAEConfig
from pythae.trainers import BaseTrainerConfig
from pythae.pipelines.training import TrainingPipeline
from pythae.models.nn.benchmarks.mnist import Encoder_VAE_MNIST, Decoder_AE_MNIST
config = BaseTrainerConfig(
output_dir='my_model',
learning_rate=1e-3,
batch_size=100,
num_epochs=100
)
model_config = MSSSIM_VAEConfig(
input_dim=(1, 28, 28),
latent_dim=16,
beta=1e-2,
window_size=3
)
model = MSSSIM_VAE(
model_config=model_config,
encoder=Encoder_VAE_MNIST(model_config),
decoder=Decoder_AE_MNIST(model_config)
)
pipeline = TrainingPipeline(
training_config=config,
model=model
)
pipeline(
train_data=train_dataset,
eval_data=eval_dataset
)
import os
last_training = sorted(os.listdir('my_model'))[-1]
trained_model = MSSSIM_VAE.load_from_folder(os.path.join('my_model', last_training, 'final_model'))
from pythae.samplers import NormalSampler
# create normal sampler
normal_samper = NormalSampler(
model=trained_model
)
# sample
gen_data = normal_samper.sample(
num_samples=25
)
import matplotlib.pyplot as plt
# show results with normal sampler
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(10, 10))
for i in range(5):
for j in range(5):
axes[i][j].imshow(gen_data[i*5 +j].cpu().squeeze(0), cmap='gray')
axes[i][j].axis('off')
plt.tight_layout(pad=0.)
from pythae.samplers import GaussianMixtureSampler, GaussianMixtureSamplerConfig
# set up GMM sampler config
gmm_sampler_config = GaussianMixtureSamplerConfig(
n_components=10
)
# create gmm sampler
gmm_sampler = GaussianMixtureSampler(
sampler_config=gmm_sampler_config,
model=trained_model
)
# fit the sampler
gmm_sampler.fit(train_dataset)
# sample
gen_data = gmm_sampler.sample(
num_samples=25
)
# show results with gmm sampler
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(10, 10))
for i in range(5):
for j in range(5):
axes[i][j].imshow(gen_data[i*5 +j].cpu().squeeze(0), cmap='gray')
axes[i][j].axis('off')
plt.tight_layout(pad=0.)
```
## ... the other samplers work the same
| github_jupyter |
<a href="https://colab.research.google.com/github/aubricot/computer_vision_with_eol_images/blob/master/object_detection_for_image_cropping/multitaxa/multitaxa_split_train_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Split EOL user crops dataset into train and test for all taxa
---
*Last Updated 29 March 2020*
Instead of creating image annotations from scratch, EOL user-generated cropping coordinates are used to create training and testing data to teach object detection models and evaluate model accuracy for YOLO via darkflow, SSD and Faster-RCNN object detection models, respectively.
Following the [Pareto principle](https://en.wikipedia.org/wiki/Pareto_principle), for each taxon 80% of the original EOL crops dataset are randomly selected to be training data and the remaining 20% will be used to test model accuracy.
Resulting train and test datasets for each taxon are exported for further pre-processing in [multitaxa_preprocessing.ipynb](https://github.com/aubricot/computer_vision_with_eol_images/tree/master/object_detection_for_image_cropping/multitaxa/multitaxa_preprocessing.ipynb), before they are ready to use with the object detection models.
```
# Mount google drive to import/export files
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
```
Run for each taxon (Coleoptera, Anura, Squamata and Carnivora), change names where you see '# TO-DO'
```
import pandas as pd
import numpy as np
# Read in EOL user-generated cropping data
# TO-DO: Change to anura, coleoptera, squamata, and carnivora _crops.tsv
crops = pd.read_csv('drive/My Drive/fall19_smithsonian_informatics/train/carnivora_crops.tsv', sep="\t", header=0)
print(crops.head())
# Randomly select 80% of data to use for training
# set seed with random_state=2 for reproducible results
idx = crops.sample(frac = 0.8, random_state=2).index
train = crops.iloc[idx]
print(train.head())
# Select the remaining 20% of data for testing using the inverse index from above
test = crops.iloc[crops.index.difference(idx)]
print(test.head())
# Write test and train to tsvs
# TO-DO: Change to anura, coleoptera, squamata, and carnivora _crops.tsv _crops_train.tsv and _crops_test.tsv
train.to_csv('drive/My Drive/fall19_smithsonian_informatics/train/carnivora_crops_train.tsv', sep='\t', header=True, index=False)
test.to_csv('drive/My Drive/fall19_smithsonian_informatics/train/carnivora_crops_test.tsv', sep='\t', header=True, index=False)
```
| github_jupyter |
```
suppressWarnings(suppressPackageStartupMessages(library(ggplot2)))
suppressWarnings(suppressPackageStartupMessages(library(ggthemes)))
res.ips = read.csv("/nfs/leia/research/stegle/acuomo/mean/day0/all_expts/allresults.csv", row.names = 1)
res.mes = read.csv("/nfs/leia/research/stegle/acuomo/mean/mesendo_est_June20/allresults.csv", row.names = 1)
res.defendo = read.csv("/nfs/leia/research/stegle/acuomo/mean/defendo_est_June20/allresults.csv", row.names = 1)
selftag_filename = "/nfs/leia/research/stegle/dseaton/hipsci/singlecell_endodiff/data/qtl_annotation/all_results_combined.self_tagging.endodiff_donor_ref.txt"
selftag = read.csv(selftag_filename, sep = "\t", row.names = 1)
# retain only genes that are present more than once
dupl = selftag[selftag$feature %in% selftag$feature[duplicated(selftag$feature)],]
dim(selftag)
dim(dupl)
dupl = dupl[order(dupl$feature),]
head(dupl)
dupl$eQTL = gsub("\\}","",gsub("\\{","",gsub(":ips:","-",gsub(":mesendo:","-",gsub(":defendo:","-",dupl$GWAS_Annotation)))))
head(dupl)
dupl$lead.switch = "not_sure"
for(i in 1:length(unique(dupl$feature))){
# print (i)
gene = unique(dupl$feature)[i]
res = dupl[dupl$feature == gene,]
if(nrow(res) == 2 & res$snp_id[1] == res$snp_id[2]){
dupl$lead.switch[dupl$feature == gene] <- "F"
}
if(nrow(res) == 3 & res$snp_id[1] == res$snp_id[2] & res$snp_id[1] == res$snp_id[3]){
dupl$lead.switch[dupl$feature == gene] <- "F"
}
}
dupl_diffsnps = dupl[dupl$lead.switch != "F",]
dim(dupl)
dim(dupl_diffsnps)
for(i in 1:length(unique(dupl_diffsnps$feature))){
# print (i)
gene = unique(dupl_diffsnps$feature)[i]
res = dupl_diffsnps[dupl_diffsnps$feature == gene,]
if(nrow(res) == 2 &
length(unlist(strsplit(res$eQTL[1],";"))) == 1 &
length(unlist(strsplit(res$eQTL[2],";"))) == 1){
# print(res[,c("snp_id","feature","eQTL")])
if(paste0(res[1,"snp_id"],"-",res[1,"feature"]) == res[1,"eQTL"] |
paste0(res[3,"snp_id"],"-",res[2,"feature"]) == res[2,"eQTL"]){
dupl$lead.switch[dupl$feature == gene] <- "T"
}
}
if(nrow(res) == 3 &
length(unlist(strsplit(res$eQTL[1],";"))) == 1 &
length(unlist(strsplit(res$eQTL[2],";"))) == 1 &
length(unlist(strsplit(res$eQTL[3],";"))) == 1){
# print(res[,c("snp_id","feature","eQTL")])
if(paste0(res[1,"snp_id"],"-",res[1,"feature"]) == res[1,"eQTL"] &
paste0(res[2,"snp_id"],"-",res[2,"feature"]) == res[2,"eQTL"] &
paste0(res[3,"snp_id"],"-",res[3,"feature"]) == res[3,"eQTL"]){
dupl$lead.switch[dupl$feature == gene] <- "T"
}
}
}
lead_switches = dupl[dupl$lead.switch == "T",]
i = 33
gene = unique(lead_switches$feature)[i]
gene
gene_start = 8594387
gene_end = 8621488
res = lead_switches[lead_switches$feature == gene,]
res
col_day0 = canva_pal("Pool party")(4)[1]
col_day1 = canva_pal("Pool party")(4)[2]
col_day2 = canva_pal("Pool party")(4)[3]
col_day3 = canva_pal("Pool party")(4)[4]
col_ips = canva_pal("Modern and clean")(4)[2]
col_mesendo = canva_pal("Modern and clean")(4)[4]
col_defendo = canva_pal("Warm tones")(4)[3]
df0 = res.ips[res.ips$feature == gene,]
df0$stage = "ips"
df1 = res.mes[res.mes$feature == gene,]
df1$stage = "mesendo"
df2 = res.defendo[res.defendo$feature == gene,]
df2$stage = "defendo"
df = rbind(df0,df1,df2)
m = min(df$p_value)
p = ggplot(df, aes(x = pos, y = -log10(p_value), colour = stage))
p = p + geom_point(size = 1, alpha = 0.8)
p = p + scale_colour_manual(values = c(col_defendo,col_ips,col_mesendo))
p = p + xlab(paste0("genomic position on Chr", df$chrom[1])) + ylab("-log10(P)")
p = p + ylim(c(-3,-log10(m)+0.2))
# p = p + ggtitle(gene)
p = p + geom_vline(xintercept = res[res$stage == "defendo","snp_position"], col = col_defendo, alpha = 0.4, size = 1)
p = p + geom_vline(xintercept = res[res$stage == "mesendo","snp_position"], col = col_mesendo, alpha = 0.4, size = 1)
p = p + geom_vline(xintercept = res[res$stage == "ips","snp_position"], col = col_ips, alpha = 0.4, size = 1)
p = p + geom_point(aes(x = res[res$stage == "defendo","snp_position"],
y = -log10(res[res$stage == "defendo","p_value.mean"])), col = col_defendo, pch = 23,
size = 3, fill = col_defendo, alpha = 0.6)
p = p + geom_point(aes(x = res[res$stage == "ips","snp_position"],
y = -log10(res[res$stage == "ips","p_value.mean"])), col = col_ips, pch = 23,
size = 3, fill = col_ips, alpha = 0.6)
p = p + geom_rect(mapping = aes(xmin = gene_start, xmax = gene_end, ymin = -0.7, ymax = -0.3),
color = "black", alpha = 0.2, fill = "lightgrey")
# p = p + geom_rect(mapping = aes(xmin = min(df$pos), xmax = max(df$pos), ymin = -3, ymax = -1),
# color = "white", fill = "gray89")
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "defendo","snp_position"]-3000,
xmax = res[res$stage == "defendo","snp_position"]+3000, ymin = -1.5, ymax = -1.2),
color = col_day3, fill = col_day3)
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "defendo","snp_position"]-3000,
xmax = res[res$stage == "defendo","snp_position"]+3000, ymin = -2, ymax = -1.7),
color = col_day2, fill = col_day2)
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "defendo","snp_position"]-3000,
xmax = res[res$stage == "defendo","snp_position"]+3000, ymin = -2.5, ymax = -2.2),
color = col_day1, fill = col_day1)
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "defendo","snp_position"]-3000,
xmax = res[res$stage == "defendo","snp_position"]+3000, ymin = -3, ymax = -2.7),
color = col_day0, fill = col_day0)
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "ips","snp_position"]-3000,
xmax = res[res$stage == "ips","snp_position"]+3000, ymin = -2, ymax = -1.7),
color = col_day2, fill = col_day2)
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "ips","snp_position"]-3000,
xmax = res[res$stage == "ips","snp_position"]+3000, ymin = -2.5, ymax = -2.2),
color = col_day1, fill = col_day1)
p = p + geom_rect(mapping = aes(xmin = res[res$stage == "ips","snp_position"]-3000,
xmax = res[res$stage == "ips","snp_position"]+3000, ymin = -3, ymax = -2.7),
color = col_day0, fill = col_day0)
p <- p + annotate("text", x = gene_end + 13000, y = -0.5, label = "CPZ", col = "black", size = 6)
p <- p + annotate("text", x = res[res$stage == "defendo","snp_position"]-18000, y = -1.4, label = "day3",
col = col_day3, size = 4.5)
p <- p + annotate("text", x = res[res$stage == "defendo","snp_position"]-18000, y = -1.9, label = "day2",
col = col_day2, size = 4.5)
p <- p + annotate("text", x = res[res$stage == "defendo","snp_position"]-18000, y = -2.4, label = "day1",
col = col_day1, size = 4.5)
p <- p + annotate("text", x = res[res$stage == "defendo","snp_position"]-18000, y = -2.9, label = "day0",
col = col_day0, size = 4.5)
p <- p + annotate("text", x = res[res$stage == "ips","snp_position"]+60000, y = -2, label = "H3K4me1",
col = "black", size = 5)
options(repr.plot.width = 8, repr.plot.height = 5)
p = p + theme_classic()
p + theme(legend.position = "none", text = element_text(size = 20))
# axis.ticks.y = element_blank(),
# # panel.border = element_rect(colour = "gray88", fill = NA, size = 0.75),
# panel.grid.major = element_line(size = 0.5, linetype = 'solid', colour = "gray95"),
# panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "gray88"))
```
| github_jupyter |
### Numba does something quite different
[Numba](http://numba.pydata.org/) is a library that enables just-in-time (JIT)
compiling of Python code. It uses the [LLVM](http://llvm.org/) tool chain to do
this. Briefly, what LLVM does takes an intermediate representation of your code
and compile that down to highly optimized machine code, as the code is running.
That means that if you can get to this IR, you can get your code to run really
fast.
Numba is the bridge between the Python code and this intermediate
representation. Along the way, it does some clever type inference (for example,
if the code can take different types as input, integers vs. floats for example),
which allows it to be even faster. And there is a bunch of additional
cleverness. In particular, Numba is designed with scientific/numerical code in
mind, so it can sometimes leverage the fact that you are using Numpy. But we
won't get into that here.
### Installing
Numba can be installed using `conda`:
```
conda install numba
```
### Just-in-time compiling
Let's look again at the Fibonacci example we used before:
```
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a+b, a
return a
```
To get it to just-in-time compile on the first time it's run, we use Numba's
`jit` function:
```
from numba import jit
fibj = jit(fib)
```
Comparing timings we see a roughly ten-fold speedup. But note that we have to run this more than once to see this speedup. This is because the first time the code is run, it is compiled.
```
%timeit fibj(10)
%timeit fib(10)
```
Another way to use `jit` is as a decorator:
```
@jit
def fibj(n):
a, b = 1, 1
for i in range(n):
a, b = a+b, a
return a
```
> ## Python Decorators
>
> Decorators are a way to uniformly modify functions in a particular way. You
> can think of them as functions that take functions as input and produce a
> function as output (as explained on [this](http://matthew-brett.github.io/pydagogue/decorating_for_dummies.html) page by Matthew Brett).
>
> But the most concise explanation (as pointed out by MB) actually comes
> from the [Python reference documentation](https://docs.python.org/3/reference/compound_stmts.html#function-definitions):
>
> A function definition may be wrapped by one or more
> (*decorator*)[http://docs.python.org/glossary.html#term-decorator]
> expressions. Decorator expressions are evaluated when the function is
> defined, in the scope that contains the function definition. The result
> must be a callable, which is invoked with the function object as the
> only argument. The returned value is bound to the function name
> instead of the function object. Multiple decorators are applied in
> nested fashion. For example, the following code:
>
> @f1(arg)
> @f2
> def func(): pass
>
> is equivalent to:
>
> def func(): pass
> func = f1(arg)(f2(func))
>
> As pointed out there, they are not limited neccesarily to function
> definitions, and [can also be used on class definitions](https://docs.python.org/3/reference/compound_stmts.html#class-definitions).
{: .callout}
## How does Numba work?
Do understand a little bit about how Numba works, let's see where it fails to
work. Let's rewrite the `fib` function using a custom Python object:
```
class MyInt(object):
def __init__(self, n):
self.int = n
def fib_obj(n):
a, b = MyInt(1), MyInt(1)
for i in range(n.int):
a.int, b.int = a.int+b.int, a.int
return a.int
```
This looks odd, but it works in the same way that the function above does
It's a bit slower, though (why do you think that is?).
Now, let's try to speed this up with Numba:
```
@jit
def fib_obj_j(n):
a, b = MyInt(1), MyInt(1)
for i in range(n.int):
a.int, b.int = a.int+b.int, a.int
return a.int
```
Timing this, we find it to be substantially *slower* than the non-jitted Python
version. The reason for this is that Numba is unable to do any type inference
here. Instead it defaults to the non-optimized Python code.
```
%timeit fib_obj_j(MyInt(10))
```
### What is it really good for?
Let's look at an example where Numba really shines (taken from [a blog post by Jake Vanderplas](https://jakevdp.github.io/blog/2012/08/08/memoryview-benchmarks/)).
Consider a numpy function to calculate the parwise Euclidean distances between
two sets of coordinates:
```
def pdist_numpy(xs):
return np.sqrt(((xs[:,None,:] - xs)**2).sum(-1))
```
We can use Numba to get this function to JIT (notice this is another way of `jit`-ing):
```
pdist_numba = jit(pdist_numpy)
```
Let's compare to Cython as well (this also demonstrates how to use numpy in
Cython code!):
```
%load_ext cython
%%cython
import numpy as cnp
def pdistx(xs):
return cnp.sqrt(((xs[:,None,:] - xs)**2).sum(-1))
```
Timing all of these:
```
import numpy as np
time_pdist_numpy = %timeit -o pdist_numpy(np.random.randn(5, 100))
time_pdist_numba = %timeit -o pdist_numba(np.random.randn(5, 100))
time_pdistx = %timeit -o pdistx(np.random.randn(5, 100))
print(time_pdist_numpy.best/time_pdist_numba.best)
print(time_pdist_numpy.best/time_pdistx.best)
```
We can see that we really can't beat numpy doing any of these.
But consider the following naive implementation of the same function in Python:
```
def pdist_python(xs):
n, p = xs.shape
D = np.empty((n, n))
for i in range(n):
for j in range(n):
s = 0.0
for k in range(p):
tmp = xs[i,k] - xs[j,k]
s += tmp * tmp
D[i, j] = s**0.5
return D
time_pdist_python = %timeit -o pdist_python(np.random.randn(5, 100))
print(time_pdist_python.best/time_pdist_numpy.best)
```
This is terrible! But this function can be substantially sped up with `jit`:
```
pdist_python_numba = jit(pdist_python)
time_pdist_python_numba = %timeit -o pdist_python_numba(np.random.randn(5, 100))
print(time_pdist_numpy.best/time_pdist_python_numba.best)
```
This is tremendously powerful, because there are many physical and statistical
functions that would be very hard to vectorize. Instead, in Numba, you are
*encouraged* to use nested loops, because Numba can leverage these, together
with type inference to do things blazingly fast.
### Using annotations
It is possible to use annotations in Numba as well.
To do that, we will need to create a Python module of our code (say `fib.py`)
and then add a `__main__` block, so that the code gets run (remember, it's
just-in-time compilation!) and can be annotated. For example:
```
from numba import jit
class MyInt(object):
def __init__(self, n):
self.int = n
@jit
def fib_obj(n):
a, b = MyInt(1), MyInt(1)
for i in range(n.int):
a.int, b.int = a.int+b.int, a.int
return a.int
@jit
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
if __name__ == "__main__":
fib_obj(MyInt(10))
fib(10)
```
> ### What does `if __name__ == "__main__":` do?
>
> This block can be added to a Python module to make it work both as a module
> and as a script. When a module is imported all of the function definitions
> in this module get executed, but the code in the functions does not get run.
> If, however, there is some code that is just sitting there .
>
> Defining a main block (fencing it inside an indented block under the
> `if __name__ == "__main__":` statement) allows us to define a specific part
> of the code that does not get run on importing, but does get run when the
> code is interpreted. For example, when `python fib.py` is called from the
> command line
>
Annotations can then be done using:
```
numba --annotate-html fib.html fib.py
```
In this case, the code that interacts with Python objects that can't be
optimized is marked in red. If you click on the `show numba IR` text, you can
view the intermediate representation used by Numba to pass to LLVM. In general,
the more you see `pyobject` in there, the less Numba can do in terms of type
inferece to optimize your code. But whenever you see types inferred (e.g.
`int64`), the better Numba can do.
> # Numba Annotations
>
> Annotate the code we used for Euclidean distance calculations. Can you point
> out in the annotation why Numba-izing the naive implementation works better
> than operating on the Numpy-based implementation?
>
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Text classification with an RNN
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/text/text_classification_rnn"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/text_classification_rnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/text_classification_rnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/text_classification_rnn.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This text classification tutorial trains a [recurrent neural network](https://developers.google.com/machine-learning/glossary/#recurrent_neural_network) on the [IMDB large movie review dataset](http://ai.stanford.edu/~amaas/data/sentiment/) for sentiment analysis.
## Setup
```
!pip install -q tensorflow_datasets
import numpy as np
import tensorflow_datasets as tfds
import tensorflow as tf
tfds.disable_progress_bar()
```
Import `matplotlib` and create a helper function to plot graphs:
```
import matplotlib.pyplot as plt
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
```
## Setup input pipeline
The IMDB large movie review dataset is a *binary classification* dataset—all the reviews have either a *positive* or *negative* sentiment.
Download the dataset using [TFDS](https://www.tensorflow.org/datasets). See the [loading text tutorial](../load_data/text.ipynb) for details on how to load this sort of data manually.
```
dataset, info = tfds.load('imdb_reviews', with_info=True,
as_supervised=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
train_dataset.element_spec
```
Initially this returns a dataset of (text, label pairs):
```
for example, label in train_dataset.take(1):
print('text: ', example.numpy())
print('label: ', label.numpy())
```
Next shuffle the data for training and create batches of these `(text, label)` pairs:
```
BUFFER_SIZE = 10000
BATCH_SIZE = 64
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
for example, label in train_dataset.take(1):
print('texts: ', example.numpy()[:3])
print()
print('labels: ', label.numpy()[:3])
```
## Create the text encoder
The raw text loaded by `tfds` needs to be processed before it can be used in a model. The simplest way to process text for training is using the `experimental.preprocessing.TextVectorization` layer. This layer has many capabilities, but this tutorial sticks to the default behavior.
Create the layer, and pass the dataset's text to the layer's `.adapt` method:
```
VOCAB_SIZE=1000
encoder = tf.keras.layers.experimental.preprocessing.TextVectorization(
max_tokens=VOCAB_SIZE)
encoder.adapt(train_dataset.map(lambda text, label: text))
```
The `.adapt` method sets the layer's vocabulary. Here are the first 20 tokens. After the padding and unknown tokens they're sorted by frequency:
```
vocab = np.array(encoder.get_vocabulary())
vocab[:20]
```
Once the vocabulary is set, the layer can encode text into indices. The tensors of indices are 0-padded to the longest sequence in the batch (unless you set a fixed `output_sequence_length`):
```
encoded_example = encoder(example)[:3].numpy()
encoded_example
```
With the default settings, the process is not completely reversible. There are three main reasns for that:
1. The default value for `preprocessing.TextVectorization`'s `standardize` argument is `"lower_and_strip_punctuation"`.
2. The limited vocabulary size and lack of character-based fallback results in some unknown tokens.
```
for n in range(3):
print("Original: ", example[n].numpy())
print("Round-trip: ", " ".join(vocab[encoded_example[n]]))
print()
```
## Create the model

Above is a diagram of the model.
1. This model can be build as a `tf.keras.Sequential`.
1. The first layer is the `encoder`, which converts the text to a sequence of token indices.
2. After the encoder is an embedding layer. An embedding layer stores one vector per word. When called, it converts the sequences of word indices to sequences of vectors. These vectors are trainable. After training (on enough data), words with similar meanings often have similar vectors.
This index-lookup is much more efficient than the equivalent operation of passing a one-hot encoded vector through a `tf.keras.layers.Dense` layer.
3. A recurrent neural network (RNN) processes sequence input by iterating through the elements. RNNs pass the outputs from one timestep to their input on the next timestep.
The `tf.keras.layers.Bidirectional` wrapper can also be used with an RNN layer. This propagates the input forward and backwards through the RNN layer and then concatenates the final output.
* The main advantage to a bidirectional RNN is that the signal from the beginning of the input doesn't need to be processed all the way through every timestep to affect the output.
* The main disadvantage of a bidirectional RNN is that you can't efficiently stream predictions as words are being added to the end.
1. After the RNN has converted the sequence to a single vector the two `layers.Dense` do some final processing, and convert from this vector representation to a single logit as the classification output.
The code to implement this is below:
```
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(
input_dim=len(encoder.get_vocabulary()),
output_dim=64,
# Use masking to handle the variable sequence lengths
mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
```
Please note that Keras sequential model is used here since all the layers in the model only have single input and produce single output. In case you want to use stateful RNN layer, you might want to build your model with Keras functional API or model subclassing so that you can retrieve and reuse the RNN layer states. Please check [Keras RNN guide](https://www.tensorflow.org/guide/keras/rnn#rnn_state_reuse) for more details.
The embedding layer [uses masking](../../guide/keras/masking_and_padding) to handle the varying sequence-lengths. All the layers after the `Embedding` support masking:
```
print([layer.supports_masking for layer in model.layers])
```
To confirm that this works as expected, evaluate a sentence twice. First, alone so there's no padding to mask:
```
# predict on a sample text without padding.
sample_text = ('The movie was cool. The animation and the graphics '
'were out of this world. I would recommend this movie.')
predictions = model.predict(np.array([sample_text]))
print(predictions[0])
```
Now, evaluate it again in a batch with a longer sentence. The result should be identical:
```
# predict on a sample text with padding
padding = "the " * 2000
predictions = model.predict(np.array([sample_text, padding]))
print(predictions[0])
```
Compile the Keras model to configure the training process:
```
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
```
## Train the model
```
history = model.fit(train_dataset, epochs=10,
validation_data=test_dataset,
validation_steps=30)
test_loss, test_acc = model.evaluate(test_dataset)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plot_graphs(history, 'accuracy')
plt.ylim(None,1)
plt.subplot(1,2,2)
plot_graphs(history, 'loss')
plt.ylim(0,None)
```
Run a prediction on a neew sentence:
If the prediction is >= 0.0, it is positive else it is negative.
```
sample_text = ('The movie was cool. The animation and the graphics '
'were out of this world. I would recommend this movie.')
predictions = model.predict(np.array([sample_text]))
```
## Stack two or more LSTM layers
Keras recurrent layers have two available modes that are controlled by the `return_sequences` constructor argument:
* If `False` it returns only the last output for each input sequence (a 2D tensor of shape (batch_size, output_features)). This is the default, used in the previous model.
* If `True` the full sequences of successive outputs for each timestep is returned (a 3D tensor of shape `(batch_size, timesteps, output_features)`).
Here is what the flow of information looks like with `return_sequences=True`:

The interesting thing about using an `RNN` with `return_sequences=True` is that the output still has 3-axes, like the input, so it can be passed to another RNN layer, like this:
```
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(len(encoder.get_vocabulary()), 64, mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1)
])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
history = model.fit(train_dataset, epochs=10,
validation_data=test_dataset,
validation_steps=30)
test_loss, test_acc = model.evaluate(test_dataset)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
# predict on a sample text without padding.
sample_text = ('The movie was not good. The animation and the graphics '
'were terrible. I would not recommend this movie.')
predictions = model.predict(np.array([sample_text]))
print(predictions)
plt.figure(figsize=(16,6))
plt.subplot(1,2,1)
plot_graphs(history, 'accuracy')
plt.subplot(1,2,2)
plot_graphs(history, 'loss')
```
Check out other existing recurrent layers such as [GRU layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU).
If you're interestied in building custom RNNs, see the [Keras RNN Guide](../../guide/keras/rnn.ipynb).
| github_jupyter |
```
import torch
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from sklearn.preprocessing import OneHotEncoder
import os, math, glob, argparse
from utils.torch_utils import *
from utils.utils import *
from apa_predictor_pytorch import *
import matplotlib.pyplot as plt
import utils.language_helpers
#plt.switch_backend('agg')
import numpy as np
from models import *
from wgan_gp_apa_analyzer import *
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
from torch.distributions import Normal as torch_normal
class IdentityEncoder :
def __init__(self, seq_len, channel_map) :
self.seq_len = seq_len
self.n_channels = len(channel_map)
self.encode_map = channel_map
self.decode_map = {
nt: ix for ix, nt in self.encode_map.items()
}
def encode(self, seq) :
encoding = np.zeros((self.seq_len, self.n_channels))
for i in range(len(seq)) :
if seq[i] in self.encode_map :
channel_ix = self.encode_map[seq[i]]
encoding[i, channel_ix] = 1.
return encoding
def encode_inplace(self, seq, encoding) :
for i in range(len(seq)) :
if seq[i] in self.encode_map :
channel_ix = self.encode_map[seq[i]]
encoding[i, channel_ix] = 1.
def encode_inplace_sparse(self, seq, encoding_mat, row_index) :
raise NotImplementError()
def decode(self, encoding) :
seq = ''
for pos in range(0, encoding.shape[0]) :
argmax_nt = np.argmax(encoding[pos, :])
max_nt = np.max(encoding[pos, :])
seq += self.decode_map[argmax_nt]
return seq
def decode_sparse(self, encoding_mat, row_index) :
raise NotImplementError()
class ActivationMaximizer(nn.Module) :
def __init__(self, generator_dir, batch_size=1, seq_len=205, latent_size=128, sequence_template=None):
super(ActivationMaximizer, self).__init__()
self.generator = Generator_lang(4, seq_len, batch_size, 512)
self.predictor = APAClassifier(batch_size=batch_size).cnn
self.load_generator(generator_dir)
self.use_cuda = torch.cuda.is_available()
self.x_mask = None
self.x_template = None
if sequence_template is not None :
onehot_mask = np.zeros((seq_len, 4))
onehot_template = np.zeros((seq_len, 4))
for j in range(len(sequence_template)) :
if sequence_template[j] == 'N' :
onehot_mask[j, :] = 1.
elif sequence_template[j] == 'A' :
onehot_template[j, 0] = 1.
elif sequence_template[j] == 'C' :
onehot_template[j, 1] = 1.
elif sequence_template[j] == 'G' :
onehot_template[j, 2] = 1.
elif sequence_template[j] == 'T' :
onehot_template[j, 3] = 1.
self.x_mask = Variable(torch.FloatTensor(onehot_mask).unsqueeze(0))
self.x_template = Variable(torch.FloatTensor(onehot_template).unsqueeze(0))
if self.use_cuda :
self.x_mask = self.x_mask.to(device)
self.x_template = self.x_template.to(device)
if self.use_cuda :
self.generator.cuda()
self.predictor.cuda()
self.cuda()
def load_generator(self, directory, iteration=None) :
list_generator = glob.glob(directory + "G*.pth")
generator_file = max(list_generator, key=os.path.getctime)
self.generator.load_state_dict(torch.load(generator_file))
def forward(self, z) :
x = self.generator.forward(z)
if self.x_mask is not None :
x = x * self.x_mask + self.x_template
return self.predictor.forward(x.unsqueeze(2).transpose(1, 3))
def get_pattern(self, z) :
x = self.generator.forward(z)
if self.x_mask is not None :
x = x * self.x_mask + self.x_template
return x
#Sequence length
seq_len = 205
batch_size = 64
#Sequence decoder
acgt_encoder = IdentityEncoder(seq_len, {'A':0, 'C':1, 'G':2, 'T':3})
#Sequence template
sequence_template = 'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAATTGTTCGTTGGTCGGCTTGAGTGCGTGTGTCTCGTTTAGATGCTGCGCCTAACCCTAAGCAGATTCTTCATGCAATTG'
#Activation maximization model (pytorch)
act_maximizer = ActivationMaximizer(batch_size=batch_size, seq_len=seq_len, generator_dir='./checkpoint/' + 'apa_simple_sample' + '/', sequence_template=sequence_template)
#Function for optimizing n sequences for a target predictor
def optimize_sequences(act_maximizer, n_seqs, batch_size=1, latent_size=128, n_iters=100, eps1=0., eps2=0.1, noise_std=1e-6, use_adam=True, run_name='default', store_intermediate_n_seqs=None, store_every_iter=100) :
z = Variable(torch.randn(batch_size, latent_size, device="cuda"), requires_grad=True)
norm_var = torch_normal(0, 1)
optimizer = None
if use_adam :
optimizer = optim.Adam([z], lr=eps2)
else :
optimizer = optim.SGD([z], lr=1)
z.register_hook(lambda grad, batch_size=batch_size, latent_size=latent_size, noise_std=noise_std: grad + noise_std * torch.randn(batch_size, latent_size, device="cuda"))
seqs = []
fitness_histo = []
n_batches = n_seqs // batch_size
for batch_i in range(n_batches) :
if batch_i % 4 == 0 :
print("Optimizing sequence batch " + str(batch_i))
#Re-initialize latent GAN seed
z.data = torch.randn(batch_size, latent_size, device="cuda")
fitness_scores_batch = [act_maximizer(z)[:, 0].data.cpu().numpy().reshape(-1, 1)]
for curr_iter in range(n_iters) :
fitness_score = act_maximizer(z)[:, 0]
fitness_loss = -torch.sum(fitness_score)
z_prior = -torch.sum(norm_var.log_prob(z))
loss = None
if use_adam :
loss = fitness_loss
else :
loss = eps1 * z_prior + eps2 * fitness_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
fitness_scores_batch.append(fitness_score.data.cpu().numpy().reshape(-1, 1))
if store_intermediate_n_seqs is not None and batch_i * batch_size < store_intermediate_n_seqs and curr_iter % store_every_iter == 0 :
onehot_batch = act_maximizer.get_pattern(z).data.cpu().numpy()
seq_batch = [
acgt_encoder.decode(onehot_batch[k]) for k in range(onehot_batch.shape[0])
]
with open(run_name + "_curr_iter_" + str(curr_iter) + ".txt", "a+") as f :
for i in range(len(seq_batch)) :
seq = seq_batch[i]
f.write(seq + "\n")
onehot_batch = act_maximizer.get_pattern(z).data.cpu().numpy()
seq_batch = [
acgt_encoder.decode(onehot_batch[k]) for k in range(onehot_batch.shape[0])
]
seqs.extend(seq_batch)
fitness_histo.append(np.concatenate(fitness_scores_batch, axis=1))
fitness_histo = np.concatenate(fitness_histo, axis=0)
return seqs, fitness_histo
n_seqs = 960
n_iters = 1000
run_name = 'killoran_apa_' + str(n_seqs) + "_sequences" + "_" + str(n_iters) + "_iters"
seqs, fitness_scores = optimize_sequences(
act_maximizer,
n_seqs,
batch_size=64,
latent_size=128,
n_iters=n_iters,
eps1=0.,
eps2=0.1,
noise_std=1e-6,
use_adam=True,
run_name=run_name,
store_intermediate_n_seqs=960,
store_every_iter=100
)
#Plot fitness statistics of optimization runs
#Plot k trajectories
plot_n_traj = 10
f = plt.figure(figsize=(8, 6))
for i in range(min(plot_n_traj, n_seqs)) :
plt.plot(fitness_scores[i, :], linewidth=2, alpha=0.75)
plt.xlabel("Training iteration", fontsize=14)
plt.ylabel("Fitness score", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim(0, n_iters)
plt.ylim(0, 1)
plt.tight_layout()
plt.show()
#Plot mean trajectory
f = plt.figure(figsize=(8, 6))
plt.plot(np.mean(fitness_scores, axis=0), linewidth=2, alpha=0.75)
plt.xlabel("Training iteration", fontsize=14)
plt.ylabel("Fitness score", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim(0, n_iters)
plt.ylim(0, 1)
plt.tight_layout()
plt.show()
#Save sequences to file
with open(run_name + ".txt", "wt") as f :
for i in range(len(seqs)) :
seq = seqs[i]
f.write(seq + "\n")
```
| github_jupyter |
# Data Manipulation in R using `dplyr`
## What is `dplyr`?
dplyr is a new package which provides a set of tools for efficiently manipulating datasets in R. dplyr is the next iteration of plyr , focussing on only data frames. With `dplyr` , anything you can do to a local data frame you can also do to a remote database table.
## Why `dplyr`?
- Great for data exploration and transformation
- Intuitive to write and easy to read, especially when using the “chaining” syntax (covered below)
- Fast on data frames
## `dplyr` functionality
- Five basic verbs: `filter`, `select`, `arrange`, `mutate`, `summarise` and `groub_by`
- Can work with data stored in databases and data tables
- Joins: inner join, left join, semi-join, anti-join
- Window functions for calculating ranking, offsets, and more
- Better than plyr if you’re only working with data frames (though it doesn’t yet duplicate all of the plyr functionality)
## Load Packages and Data
```
install.packages('hflights', repos = 'http://cran.us.r-project.org')
# Load packages
library(dplyr)
library(hflights)
# Explore data
data(hflights)
head(hflights)
#`tbl_df` creates a “local data frame”
# Local data frame is simply a wrapper for a data frame that prints nicely
flights <- tbl_df(hflights)
# Examine first few rows
head(flights)
# Examine last few rows
tail(flights)
```
## Command structure (for all dplyr verbs)
- first argument is a **data frame**
- return value is a data frame
- nothing is modified in place
- Note: dplyr generally does not **preserve row names**
## `filter`: Keep rows matching criteria
### `AND` Operator( & )
```
# Note: you can use comma or ampersand(&) to represent AND conditionfilter(flights, Month==1, DayofMonth==1)
# conditionfilter(flights, Month==1, DayofMonth==1)
filter(flights, Month==1, DayofMonth==1)
# Using &
filter(flights, Month==1 & DayofMonth==1)
```
### `OR` Operator( | )
```
# Pipe for OR operation
filter(flights, UniqueCarrier == "AA" | UniqueCarrier == "UA" )
```
### `%in%` Operator
```
# Use of %in% operator
filter(flights, UniqueCarrier %in% c("AA", "UA"))
```
## `select`: Pick columns by name
- dplyr approach uses similar syntax to filter
```
# Selecting columns
select(flights, DepTime, ArrTime, FlightNum)
```
### `contains`
```
# Use colon to select multiple contiguous columns, and use `contains` to match columns by name
# Note: `starts_with`, `ends_with`, and `matches`
# (for regular expressions) can also be used to match columns by name
select(flights, Year:DayofMonth, contains("Taxi"), contains("Delay"))
```
## "Chaining” or “Pipelining"
- Usual way to perform multiple operations in one line is by nesting
- Can write commands in a natural order by using the %>% infix operator (which can be pronounced as “then”)
```
# Nesting Method
filter(select(flights, UniqueCarrier, DepDelay), DepDelay > 60)
# Chaining method
flights %>%
select(UniqueCarrier, DepDelay) %>%
filter(DepDelay > 60) %>%
head()
```
## `arrange`: Reorder rows
```
# Ascending order
flights %>%
select(UniqueCarrier, DepDelay) %>%
arrange(DepDelay) %>%
head()
# Use `desc` for descending
flights %>%
select(UniqueCarrier, DepDelay) %>%
arrange(desc(DepDelay)) %>%
head()
```
## `mutate`: Add new variable
```
# Add a new variable and prints the new variable but does not store it
flights %>%
select(Distance, AirTime) %>%
mutate(Speed = Distance/AirTime * 60) %>%
head()
# Store the new variable
flights <- flights %>% mutate(Speed = Distance/AirTime * 60)
# See Dataset
head(flights)
```
## `summarise`: Reduce variables to value
- Primarily useful with data that has been grouped by one or more variables
- `group_by` creates the groups that will be operated on
- `summarise` uses the provided aggregation function to summarise each group
```
# create a table grouped by Dest, and then summarise each group by taking the mean of ArrDelay
flights %>%
group_by(Dest) %>%
summarise(avg_delay = mean(ArrDelay, na.rm=TRUE)) %>%
head()
# summarise_each allows you to apply the same summary function to multiple columns at once
# Note: mutate_each is also available
# for each carrier, calculate the percentage of flights cancelled or diverted
flights %>%
group_by(UniqueCarrier) %>%
summarise_each(funs(mean), Cancelled, Diverted) %>%
head()
# for each carrier, calculate the minimum and maximum arrival and departure delays
flights %>%
group_by(UniqueCarrier) %>%
summarise_each(funs(min(., na.rm=TRUE), max(., na.rm=TRUE)), matches("Delay")) %>%
head()
```
- Helper function n() counts the number of rows in a group
- Helper function n_distinct(vector) counts the number of unique items in that vector
```
# for each day of the year, count the total number of flights and sort in descending order
flights %>%
group_by(Month, DayofMonth) %>%
summarise(flight_count = n()) %>%
arrange(desc(flight_count)) %>%
head()
# rewrite more simply with the `tally` function
flights %>%
group_by(Month, DayofMonth) %>%
tally(sort = TRUE) %>%
head()
# for each destination, count the total number of flights and the number of distinct planes that flew there
flights %>%
group_by(Dest) %>%
summarise(flight_count = n(), plane_count = n_distinct(TailNum)) %>%
head()
# for each destination, show the number of cancelled and not cancelled flights
flights %>%
group_by(Dest) %>%
select(Cancelled) %>%
table() %>%
head()
```
## `window` Functions
- Aggregation function (like `mean`) takes n inputs and returns 1 value
- Window function takes n inputs and returns n values
- Includes ranking and ordering functions (like `min_rank`), `offset` functions (lead and lag), and cumulative aggregates (like cummean).
```
# for each carrier, calculate which two days of the year they had their longest departure delays
# note: smallest (not largest) value is ranked as 1, so you have to use `desc` to rank by largest value
flights %>%
group_by(UniqueCarrier) %>%
select(Month, DayofMonth, DepDelay) %>%
filter(min_rank(desc(DepDelay)) <= 2) %>%
arrange(UniqueCarrier, desc(DepDelay))
# rewrite more simply with the `top_n` function
flights %>%
group_by(UniqueCarrier) %>%
select(Month, DayofMonth, DepDelay) %>%
top_n(2) %>%
arrange(UniqueCarrier, desc(DepDelay))
# for each month, calculate the number of flights and the change from the previous month
flights %>%
group_by(Month) %>%
summarise(flight_count = n()) %>%
mutate(change = flight_count - lag(flight_count))
# rewrite more simply with the `tally` function
flights %>%
group_by(Month) %>%
tally() %>%
mutate(change = n - lag(n))
```
## Other Useful Convenience Functions
```
# randomly sample a fixed number of rows, without replacement
flights %>% sample_n(5)
# randomly sample a fraction of rows, with replacement
flights %>% sample_frac(0.25, replace=TRUE)
str(flights)
# dplyr approach: better formatting, and adapts to your screen width
glimpse(flights)
```
## References
- https://rpubs.com/justmarkham/dplyr-tutorial
- https://rpubs.com/justmarkham/dplyr-tutorial-part-2
- https://rafalab.github.io/dsbook/
- [Official dplyr reference manual and vignettes on CRAN](http://cran.r-project.org/web/packages/dplyr/index.html)
| github_jupyter |
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name below:
```
let name = ""
let rollno = ""
```
## Important notes about grading:
1. **Compiler errors:** All code you submit must compile. Programs that do not compile will probably receive an automatic zero. If you are having trouble getting your assignment to compile, please visit consulting hours. If you run out of time, it is better to comment out the parts that do not compile, than hand in a more complete file that does not compile.
2. **Late assignments:** Please carefully review the course website's policy on late assignments, as all assignments handed in after the deadline will be considered late. Verify on moodle that you have submitted the correct version, before the deadline. Submitting the incorrect version before the deadline and realizing that you have done so after the deadline will be counted as a late submission.
# Lambda Calculus Interpreter
In this assignment, you will implement lambda calculus interpreters that use different reduction strategies. The abstract syntax tree (AST) for lambda expressions is the one that we have seen in class:
```ocaml
type expr =
| Var of string
| Lam of string * expr
| App of expr * expr
```
You are provided a parser function `parse_string` that converts a string to this AST and a printer function `string_of_expr` that converts the AST to string. For example,
```
#use "init.ml"
open Syntax
let parse_string = Lambda_parse.parse_string
let string_of_expr = string_of_expr
let _ = parse_string "(\\x.x) (\\y.y)"
let _ = string_of_expr (App (Var "x",Lam("y",App(Var "y", Var "x"))))
```
You will need some helper functions to operate over sets. Since we have not studied set data structure in OCaml. We will use lists instead and implement set functionality on top of lists. You can use the functions from the OCaml list standard library for this assignment. The documentation for OCaml list module is available in the [OCaml manual](https://caml.inria.fr/pub/docs/manual-ocaml/libref/List.html).
## Problem 1
Implement a function
```ocaml
mem : 'a -> 'a list -> bool
```
`mem e l` returns `true` if the element `e` is present in the list. Otherwise, it returns false.
```
let mem e l =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 5 points *)
assert (mem "b" ["a";"b";"c"] = true);
assert (mem "x" ["a";"b";"c"] = false)
```
## Problem 2
Implement a function
```ocaml
remove : 'a -> 'a list -> 'a list
```
`remove e l` returns a list `l'` with all the element in `l` except `e`. `remove` also preserves the order of the elements not removed. If `e` is not present in `l`, then return `l`.
```
let remove e l =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 5 points *)
assert (remove "b" ["a";"b";"c"] = ["a";"c"]);
assert (remove "x" ["a";"b";"c"] = ["a";"b";"c"])
```
## Problem 3
Implement a function
```ocaml
union : string list -> string list -> string list
```
`union l1 l2` performs set union of elements in `l1` and `l2`. The elements in the result list `l` must be lexicographically sorted. Hint: You may want to use the functions `List.sort` and `remove_stutter` from assignment 1 to implement union. Here is an example of using `List.sort`.
```
assert (List.sort String.compare ["x";"a";"b";"m"] = ["a";"b";"m";"x"])
let union l1 l2 =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 5 points *)
assert (union ["a"; "c"; "b"] ["d"; "b"; "x"; "a"] = ["a"; "b"; "c"; "d"; "x"])
```
## Problem 4
Implement a function
```ocaml
add : 'a -> 'a list -> 'a list
```
`add e l` does a set addition of element `e` to list `l` and returns a list. The resultant list is sorted.
```
let add e l =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 5 points *)
assert (add "b" ["a";"c"] = ["a";"b";"c"]);
assert (add "a" ["c"; "a"] = ["a";"c"])
```
## Substitution
At the heart of reducing lambda expressions is substitution. Recall from the lecture that substitution requires us to generate fresh variable names that is different from every other name used in the current context. We will use the following helper function to generate fresh names.
```
let r = ref 0
let fresh s =
let v = !r in
r := !r + 1;
s ^ (string_of_int v)
```
It uses mutability features of OCaml which we will study in later lectures. You can use the `fresh` function as follows:
```
let a = fresh "a"
let b = fresh "b"
```
## Problem 5
Implement a function
```ocaml
free_variables: expr -> string list
```
that returns the free variables in the given lambda term.
```
let rec free_variables e =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 20 points *)
assert (free_variables (parse_string "\\x.x") = []);
assert (free_variables (parse_string "\\x.y") = ["y"])
```
## Problem 6
Implement the function
```ocaml
substitute : expr -> string -> expr -> expr
```
where `substitute e x v` does `e[v/x]`. For renaming `x` in `Lam(x,y)` with a fresh name, use `Lam (fresh x, ...)`.
```
let rec substitute expr a b =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 20 points *)
assert (alpha_equiv
(substitute (parse_string "\\y.x") "x" (parse_string "\\z.z w"))
(parse_string "λy.λz.z w"));
assert (alpha_equiv
(substitute (parse_string "\\x.x") "x" (parse_string "y"))
(parse_string "λx.x"));
assert (alpha_equiv
(substitute (parse_string "\\x.y") "y" (parse_string "x"))
(parse_string "λx0.x"))
```
## Problem 7
Implement a single step of the call-by-value reduction. Implement the function
```ocaml
reduce_cbv : expr -> expr option
```
which does a single step of the call-by-value reduction. Recall that call-by-value reduction is determinisitic. Hence, if reduction is possible, then a single rule applies. `reduce e` returns `Some e'` if reduction is possible and `e'` is the new expression. `reduce e` returns `None` if reduction is not possible.
```
let rec reduce_cbv e =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 20 points *)
begin match reduce_cbv (parse_string "(\\x.x) ((\\x.x) (\\z.(\\x.x) z))") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λx.x) (λz.(λx.x) z)"))
| None -> assert false
end;
begin match reduce_cbv (parse_string "(λx.x) (λz.(λx.x) z)") with
| Some expr -> assert (alpha_equiv expr (parse_string "λz.(λx.x) z"))
| None -> assert false
end;
assert (reduce_cbv (parse_string "λz.(λx.x) z") = None);
begin match reduce_cbv (parse_string "(λx.y) ((λx.x x) (λx.x x))") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λx.y) ((λx.x x) (λx.x x))"))
| None -> assert false
end;
assert (reduce_cbv (parse_string "x y z") = None)
```
## Problem 8
Implement a single step of the call-by-name reduction. Implement the function
```ocaml
reduce_cbn : expr -> expr option
```
The rest of the instructions are same as `reduce_cbv`.
```
let rec reduce_cbn e =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 20 points *)
begin match reduce_cbn (parse_string "(\\x.x) ((\\x.x) (\\z.(\\x.x) z))") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λx.x) (λz.(λx.x) z)"))
| None -> assert false
end;
begin match reduce_cbn (parse_string "(λx.x) (λz.(λx.x) z)") with
| Some expr -> assert (alpha_equiv expr (parse_string "λz.(λx.x) z"))
| None -> assert false
end;
assert (reduce_cbn (parse_string "λz.(λx.x) z") = None);
begin match reduce_cbn (parse_string "(λx.y) ((λx.x x) (λx.x x))") with
| Some expr -> assert (alpha_equiv expr (parse_string "y"))
| None -> assert false
end;
begin match reduce_cbn (parse_string "(\\x.x x) ((\\z.z) y)") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λz.z) y ((λz.z) y)"))
| None -> assert false
end;
assert (reduce_cbn (parse_string "x y z") = None)
```
## Problem 9
Implement a single step of the normal order reduction. Implement the function
```ocaml
reduce_normal : expr -> expr option
```
The rest of the instructions are same as `reduce_cbv`.
```
let rec reduce_normal e =
(* YOUR CODE HERE *)
raise (Failure "Not implemented")
(* 20 points *)
begin match reduce_normal (parse_string "(\\x.x) ((\\x.x) (\\z.(\\x.x) z))") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λx.x) (λz.(λx.x) z)"))
| None -> assert false
end;
begin match reduce_normal (parse_string "(λx.x) (λz.(λx.x) z)") with
| Some expr -> assert (alpha_equiv expr (parse_string "λz.(λx.x) z"))
| None -> assert false
end;
begin match reduce_normal (parse_string "λz.(λx.x) z") with
| Some expr -> assert (alpha_equiv expr (parse_string "λz. z"))
| None -> assert false
end;
begin match reduce_normal (parse_string "(λx.y) ((λx.x x) (λx.x x))") with
| Some expr -> assert (alpha_equiv expr (parse_string "y"))
| None -> assert false
end;
begin match reduce_normal (parse_string "(\\x.x x) ((\\z.z) y)") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λz.z) y ((λz.z) y)"))
| None -> assert false
end;
begin match reduce_normal (parse_string "f (\\x.x x) ((\\z.z) y)") with
| Some expr -> assert (alpha_equiv expr (parse_string "f (λx.x x) y"))
| None -> assert false
end;
begin match reduce_normal (parse_string "(\\x.(\\z.z) y) (\\x.x x)") with
| Some expr -> assert (alpha_equiv expr (parse_string "(λz.z) y"))
| None -> assert false
end
let rec eval log depth reduce expr =
if depth = 0 then failwith "non-termination?"
else
begin match reduce expr with
| None -> expr
| Some expr' ->
if log then print_endline ("= " ^ (string_of_expr expr'));
eval log (depth-1) reduce expr'
end
let eval_cbv = eval true 1000 reduce_cbv
let eval_cbn = eval true 1000 reduce_cbn
let eval_normal = eval true 1000 reduce_normal
(* 10 points *)
let zero = parse_string "\\f.\\x. x" in
let one = parse_string "\\f.\\x. f x" in
let two = parse_string "\\f.\\x. f (f x)" in
let three = parse_string "\\f.\\x. f (f (f x))" in
let plus = parse_string "λm. λn. λs. λz. m s (n s z)" in
let mult = parse_string "λm. λn. λs. λz. m (n s) z" in
assert (alpha_equiv (eval_normal (App (App (plus, one), two))) three);
print_endline "";
assert (alpha_equiv (eval_normal (App (App (mult, one), three))) three);
print_endline "";
assert (alpha_equiv (eval_normal (App (App (mult, zero), three))) zero)
```
| github_jupyter |
# Creating Vocubalary
```
import re
from collections import Counter
def createVocabulary(reviews):
vocabulary = []
for i in range(0,len(reviews)):
tweet2 = re.sub(r'^RT[\s]+', '', reviews[i])
# remove hyperlinks
tweet2 = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet2)
# remove hashtags
# only removing the hash # sign from the word
tweet2 = re.sub(r'#', '', tweet2)
word = tweet2.split(' ')
for j in range(0,len(word)):
join_word = ''.join(word[j])
un_word = Counter(join_word)
s = ''.join(un_word.keys())
if s not in vocabulary:
vocabulary.append(s)
return vocabulary
corpus = ['I love this hotel, I love it','I am in heaven , great hotel','What an amazing food','I dont like the food! HAte it' ]
voc = createVocabulary(corpus) # ---> This is called corpus
print(voc)
```
# Feature Extraction
```
sample_positive = 'I love this food'
sample_negative = 'I HAte this food'
```
Now creating posivive and negative frequency based on the sample_data
```
import pandas as pd
def createFrequency(words):
l=[]
s = words.split(' ')
for i in range(0,len(voc)):
l.append(words.count(voc[i]))
return l
pos_result = createFrequency(sample_positive)
neg_result = createFrequency(sample_negative)
print("Positive Sample : ",pos_result)
print("Negative Sample : ",neg_result)
d = {'Vocubulary':voc,'Positive Frequency':pos_result,'Negative Frequency':neg_result}
d = pd.DataFrame(d)
print(d)
```
# Preprocessing
1. Stemming
2. Stop Words
```
#Importing Regular Expression and NLTK
import re
import nltk
# Downloading Tweet Data from Twitter to perform Setimental analysis
nltk.download('twitter_samples')
positive_tweets = pd.read_json('./positive_tweets.json',lines=True)
negative_tweets = pd.read_json('./negative_tweets.json',lines=True)
print("Number of positive tweets : ",len(positive_tweets))
print("Number of negative tweets : ",len(negative_tweets))
positive_tweets.text
```
## Preprocess raw text for Sentiment analysis
Data preprocessing is one of the critical steps in any machine learning project. It includes cleaning and formatting the data before feeding into a machine learning algorithm. For NLP, the preprocessing steps are comprised of the following tasks:
* Tokenizing the string
* Lowercasing
* Removing stop words and punctuation
* Stemming
The videos explained each of these steps and why they are important. Let's see how we can do these to a given tweet. We will choose just one and see how this is transformed by each preprocessing step.
```
# download the stopwords from NLTK
nltk.download('stopwords')
import re # library for regular expression operations
import string # for string operations
from nltk.corpus import stopwords # module for stop words that come with NLTK
from nltk.stem import PorterStemmer # module for stemming
from nltk.tokenize import TweetTokenizer # module for tokenizing strings
res = createVocabulary(positive_tweets.text)
# print(res)
stopwords_english = stopwords.words('english')
tweets_clean = []
stemmer = PorterStemmer()
for word in res: # Go through every word in your tokens list
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
tweets_clean.append(word)
print(tweets_clean)
tweets_stem = []
for word in tweets_clean:
stem_word = stemmer.stem(word) # stemming word
tweets_stem.append(stem_word) # append to the list
print(tweets_stem)
```
| github_jupyter |
```
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, BatchNormalization, LocallyConnected2D, Permute
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
import keras.losses
import tensorflow as tf
#tf.compat.v1.enable_eager_execution()
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import pandas as pd
import os
import pickle
import numpy as np
import random
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import isolearn.io as isoio
import isolearn.keras as isol
from genesis.visualization import *
from genesis.generator import *
from genesis.predictor import *
from genesis.optimizer import *
from definitions.generator.aparent_deconv_conv_generator_concat_trainmode import load_generator_network
from definitions.predictor.aparent import load_saved_predictor
import sklearn
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from scipy.stats import pearsonr
import seaborn as sns
from matplotlib import colors
from scipy.stats import norm
from genesis.vae import *
def set_seed(seed_value) :
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
tf.set_random_seed(seed_value)
# 5. Configure a new global `tensorflow` session
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def load_data(data_name, valid_set_size=0.05, test_set_size=0.05, batch_size=32) :
#Load cached dataframe
cached_dict = pickle.load(open(data_name, 'rb'))
plasmid_df = cached_dict['plasmid_df']
plasmid_cuts = cached_dict['plasmid_cuts']
#print("len(plasmid_df) = " + str(len(plasmid_df)) + " (loaded)")
#Generate training and test set indexes
plasmid_index = np.arange(len(plasmid_df), dtype=np.int)
plasmid_train_index = plasmid_index[:-int(len(plasmid_df) * (valid_set_size + test_set_size))]
plasmid_valid_index = plasmid_index[plasmid_train_index.shape[0]:-int(len(plasmid_df) * test_set_size)]
plasmid_test_index = plasmid_index[plasmid_train_index.shape[0] + plasmid_valid_index.shape[0]:]
#print('Training set size = ' + str(plasmid_train_index.shape[0]))
#print('Validation set size = ' + str(plasmid_valid_index.shape[0]))
#print('Test set size = ' + str(plasmid_test_index.shape[0]))
data_gens = {
gen_id : iso.DataGenerator(
idx,
{'df' : plasmid_df},
batch_size=batch_size,
inputs = [
{
'id' : 'seq',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: row['padded_seq'][180 + 40: 180 + 40 + 81] + "G" * (128-81),
'encoder' : iso.OneHotEncoder(seq_length=128),
'dim' : (1, 128, 4),
'sparsify' : False
}
],
outputs = [
{
'id' : 'dummy_output',
'source_type' : 'zeros',
'dim' : (1,),
'sparsify' : False
}
],
randomizers = [],
shuffle = True if gen_id == 'train' else False
) for gen_id, idx in [('all', plasmid_index), ('train', plasmid_train_index), ('valid', plasmid_valid_index), ('test', plasmid_test_index)]
}
x_train = np.concatenate([data_gens['train'][i][0][0] for i in range(len(data_gens['train']))], axis=0)
x_test = np.concatenate([data_gens['test'][i][0][0] for i in range(len(data_gens['test']))], axis=0)
return x_train, x_test
#Specfiy problem-specific parameters
experiment_suffix = '_strong_vae_very_high_kl_epoch_35_lower_fitness'
vae_model_prefix = "vae/saved_models/vae_apa_max_isoform_doubledope_strong_cano_pas_len_128_50_epochs_very_high_kl"
vae_model_suffix = "_epoch_35"#""#
#VAE model path
saved_vae_encoder_model_path = vae_model_prefix + "_encoder" + vae_model_suffix + ".h5"
saved_vae_decoder_model_path = vae_model_prefix + "_decoder" + vae_model_suffix + ".h5"
#Padding for the VAE
vae_upstream_padding = ''
vae_downstream_padding = 'G' * 47
#VAE sequence template
vae_sequence_template = 'ATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCC' + 'G' * (128 - 81)
#VAE latent dim
vae_latent_dim = 100
#Oracle predictor model path
saved_predictor_model_path = '../../../aparent/saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
#Subtring indices for VAE
vae_pwm_start = 40
vae_pwm_end = 121
#VAE parameter collection
vae_params = [
saved_vae_encoder_model_path,
saved_vae_decoder_model_path,
vae_upstream_padding,
vae_downstream_padding,
vae_latent_dim,
vae_pwm_start,
vae_pwm_end
]
#Load data set
vae_data_path = "vae/apa_doubledope_cached_set_strong_short_cano_pas.pickle"
_, x_test = load_data(vae_data_path, valid_set_size=0.005, test_set_size=0.095)
#Evaluate ELBO distribution on test set
#Load VAE models
vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
#Compute multi-sample ELBO on test set
log_mean_p_vae_test, mean_log_p_vae_test, log_p_vae_test = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=128)
print("mean log(likelihood) = " + str(mean_log_p_vae_test))
#Log Likelihood Plot
plot_min_val = None
plot_max_val = None
f = plt.figure(figsize=(6, 4))
log_p_vae_test_hist, log_p_vae_test_edges = np.histogram(log_mean_p_vae_test, bins=50, density=True)
bin_width_test = log_p_vae_test_edges[1] - log_p_vae_test_edges[0]
plt.bar(log_p_vae_test_edges[1:] - bin_width_test/2., log_p_vae_test_hist, width=bin_width_test, linewidth=2, edgecolor='black', color='orange')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
if plot_min_val is not None and plot_max_val is not None :
plt.xlim(plot_min_val, plot_max_val)
plt.xlabel("VAE Log Likelihood", fontsize=14)
plt.ylabel("Data Density", fontsize=14)
plt.axvline(x=mean_log_p_vae_test, linewidth=2, color='red', linestyle="--")
plt.tight_layout()
plt.show()
#Evaluate ELBO distribution on test set (training-level no. of samples)
#Load VAE models
vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
#Compute multi-sample ELBO on test set
log_mean_p_vae_test, mean_log_p_vae_test, log_p_vae_test = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=32)
print("mean log(likelihood) = " + str(mean_log_p_vae_test))
#Log Likelihood Plot
plot_min_val = None
plot_max_val = None
f = plt.figure(figsize=(6, 4))
log_p_vae_test_hist, log_p_vae_test_edges = np.histogram(log_mean_p_vae_test, bins=50, density=True)
bin_width_test = log_p_vae_test_edges[1] - log_p_vae_test_edges[0]
plt.bar(log_p_vae_test_edges[1:] - bin_width_test/2., log_p_vae_test_hist, width=bin_width_test, linewidth=2, edgecolor='black', color='orange')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
if plot_min_val is not None and plot_max_val is not None :
plt.xlim(plot_min_val, plot_max_val)
plt.xlabel("VAE Log Likelihood", fontsize=14)
plt.ylabel("Data Density", fontsize=14)
plt.axvline(x=mean_log_p_vae_test, linewidth=2, color='red', linestyle="--")
plt.tight_layout()
plt.show()
#Load models
#Load VAE models
vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
predictor_model = load_model(saved_predictor_model_path)
#Setup predictor function
libraries = ['tomm5_up_n20c20_dn_c20', 'tomm5_up_c20n20_dn_c20', 'tomm5_up_n20c20_dn_n20', 'tomm5_up_c20n20_dn_n20', 'doubledope', 'simple', 'atr', 'hsp', 'snh', 'sox', 'wha', 'array', 'aar']
library_dict = { lib : i for i, lib in enumerate(libraries) }
lib_ix = library_dict['doubledope']
predictor_upstream_padding = "CTTCCGATCTCTCGCTCTTTCTATGGCATTCATTACTCGC"
predictor_downstream_padding = "AATTAAGCCTGTCGTCGTGGGTGTCGAAAATGAAATAAAACAAGTCAATTGCGTAGTTTATTCAGACGTACCCCGTGGACCTAC"
predictor_pwm_start = 0
predictor_pwm_end = 5 + 71 + 5
predictor_upstream_padding_one_hots = None
if len(predictor_upstream_padding) > 0 :
predictor_upstream_padding_one_hots = one_hot_encode(predictor_upstream_padding, n=1, singleton_axis=-1)
predictor_downstream_padding_one_hots = None
if len(predictor_downstream_padding) > 0 :
predictor_downstream_padding_one_hots = one_hot_encode(predictor_downstream_padding, n=1, singleton_axis=-1)
#Predict fitness score
def _predict_func(sequence_one_hots, predictor_model, lib_ix=lib_ix) :
x_1 = np.moveaxis(sequence_one_hots, 1, -1)[:, predictor_pwm_start:predictor_pwm_end, :, :]
if len(predictor_upstream_padding) > 0 :
x_1 = np.concatenate([np.tile(predictor_upstream_padding_one_hots, (x_1.shape[0], 1, 1, 1)), x_1], axis=1)
if len(predictor_downstream_padding) > 0 :
x_1 = np.concatenate([x_1, np.tile(predictor_downstream_padding_one_hots, (x_1.shape[0], 1, 1, 1))], axis=1)
x_2 = np.zeros((sequence_one_hots.shape[0], 13))
x_2[:, lib_ix] = 1.
x_3 = np.ones((sequence_one_hots.shape[0], 1))
iso_pred, _ = predictor_model.predict(x=[x_1, x_2, x_3], batch_size=32, verbose=False)
logodds_pred = np.log(iso_pred[:, 0] / (1. - iso_pred[:, 0]))
return logodds_pred
#Evaluate ELBOs
n_z_samples = 128
n = 1024
#Evaluate VAE Likelihood on test data
log_mean_p_vae_test, mean_log_p_vae_test, _ = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test[:n], n_samples=n_z_samples)
fitness_score_test = _predict_func(x_test, predictor_model)
#Evaluate VAE Likelihood on random data
x_random = np.zeros((n, 1, 128, 4))
for i in range(n) :
random_seq = ''
for j in range(len(vae_sequence_template)) :
if vae_sequence_template[j] == 'N' :
rand_letter = np.random.choice(['A', 'C', 'G', 'T'], replace=False)
random_seq += rand_letter
else :
random_seq += vae_sequence_template[j]
for j in range(len(random_seq)) :
if random_seq[j] == 'A' :
x_random[i, 0, j, 0] = 1.
elif random_seq[j] == 'C' :
x_random[i, 0, j, 1] = 1.
elif random_seq[j] == 'G' :
x_random[i, 0, j, 2] = 1.
elif random_seq[j] == 'T' :
x_random[i, 0, j, 3] = 1.
log_mean_p_vae_random, mean_log_p_vae_random, _ = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_random, n_samples=n_z_samples)
fitness_score_random = _predict_func(x_random, predictor_model)
batch_size = 64
experiment_suffixes = [
'_strong_vae_very_high_kl_epoch_35_margin_neg_2_lower_fitness',
'_strong_vae_very_high_kl_epoch_35_margin_0_lower_fitness',
'_strong_vae_very_high_kl_epoch_35_margin_pos_2_lower_fitness',
'_weak_vae_very_high_kl_epoch_35_only_fitness',
]
#Evaluate VAE Likelihood on generated data
log_mean_p_vae_new_dict = {}
mean_log_p_vae_new_dict = {}
fitness_score_new_dict = {}
for temp_ix, temp_suffix in enumerate(experiment_suffixes) :
print("Evaluating on genesis" + temp_suffix + ".")
den_name = 'genesis_apa_max_isoform_doubledope' + temp_suffix + '_vae_kl'
den_model = load_model("saved_models/" + den_name + "_predictor.h5", custom_objects={'st_sampled_softmax': st_sampled_softmax, 'st_hardmax_softmax': st_hardmax_softmax})
sequence_class = np.array([0] * n).reshape(-1, 1) #np.random.uniform(-6, 6, (n, 1)) #
noise_1 = np.random.uniform(-1, 1, (n, 100))
noise_2 = np.random.uniform(-1, 1, (n, 100))
sampled_sequences_new = den_model.predict([sequence_class, noise_1, noise_2], batch_size=batch_size, verbose=False)[5]
sampled_sequences_new = np.moveaxis(sampled_sequences_new[:, 0, ...], 3, 1)
x_new = np.concatenate([
one_hot_encode(vae_upstream_padding, n=n, singleton_axis=1),
sampled_sequences_new[:, :, vae_pwm_start:vae_pwm_end, :],
one_hot_encode(vae_downstream_padding, n=n, singleton_axis=1)
], axis=2)
log_mean_p_vae_new, mean_log_p_vae_new, _ = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_new, n_samples=n_z_samples)
fitness_score_new = _predict_func(x_new, predictor_model)
log_mean_p_vae_new_dict[temp_suffix] = log_mean_p_vae_new
mean_log_p_vae_new_dict[temp_suffix] = mean_log_p_vae_new
fitness_score_new_dict[temp_suffix] = fitness_score_new
likelihood_histos = [
log_mean_p_vae_test,
log_mean_p_vae_new_dict['_strong_vae_very_high_kl_epoch_35_margin_neg_2_lower_fitness'],
log_mean_p_vae_new_dict['_strong_vae_very_high_kl_epoch_35_margin_0_lower_fitness'],
log_mean_p_vae_new_dict['_strong_vae_very_high_kl_epoch_35_margin_pos_2_lower_fitness'],
log_mean_p_vae_new_dict['_weak_vae_very_high_kl_epoch_35_only_fitness']
]
fitness_histos = [
fitness_score_test,
fitness_score_new_dict['_strong_vae_very_high_kl_epoch_35_margin_neg_2_lower_fitness'],
fitness_score_new_dict['_strong_vae_very_high_kl_epoch_35_margin_0_lower_fitness'],
fitness_score_new_dict['_strong_vae_very_high_kl_epoch_35_margin_pos_2_lower_fitness'],
fitness_score_new_dict['_weak_vae_very_high_kl_epoch_35_only_fitness']
]
name_list = [
'Test',
'-2',
'0',
'+2',
'Fitness'
]
color_list = [
'orange',
'whitesmoke',
'silver',
'dimgray',
'black'
]
#Joint histograms
#Compare VAE Log Likelihoods (ELBO)
plot_joint_histo(
likelihood_histos,
name_list,
color_list,
'VAE Log Likelihood',
'Data Density',
min_val=-40.5,
max_val=-35,
max_y_val=0.85,
n_bins=40,
figsize=(6, 4),
save_fig=True,
fig_name="apa_doubledope_" + experiment_suffix + "_likelihood"
)
#Compare Fitness Scores
plot_joint_histo(
fitness_histos,
name_list,
color_list,
'Predicted Fitness Score',
'Data Density',
min_val=-4,
max_val=8,
max_y_val=0.675,
n_bins=40,
figsize=(6, 4),
save_fig=True,
fig_name="apa_doubledope_" + experiment_suffix + "_fitness"
)
#Individual histograms
for temp_ix, temp_suffix in enumerate(experiment_suffixes) :
log_mean_p_vae_new = log_mean_p_vae_new_dict[temp_suffix]
mean_log_p_vae_new = mean_log_p_vae_new_dict[temp_suffix]
fitness_score_new = fitness_score_new_dict[temp_suffix]
#Compare VAE Log Likelihoods (ELBO)
plot_joint_histo(
[log_mean_p_vae_test, log_mean_p_vae_new],
['Test', 'Gen'],
['orange', 'red'],
'VAE Log Likelihood',
'Data Density',
min_val=-40.5,
max_val=-35,
max_y_val=0.85,
n_bins=40,
figsize=(6, 4),
save_fig=True,
fig_name="apa_doubledope_" + temp_suffix + "_likelihood"
)
#Compare Fitness Scores
plot_joint_histo(
[fitness_score_test, fitness_score_new],
['Test', 'Gen'],
['orange', 'red'],
'Predicted Fitness Score',
'Data Density',
min_val=-4,
max_val=8,
max_y_val=0.675,
n_bins=40,
figsize=(6, 4),
save_fig=True,
fig_name="apa_doubledope_" + temp_suffix + "_fitness"
)
```
| github_jupyter |
```
import csv
import json
import numpy as np
import colorspacious
ALL_NUM_COLORS = [6, 8, 10]
```
## Generate LaTeX markup for results table
```
with open("../aesthetic-models/top-cycles.json") as infile:
top_cycles = json.load(infile)
top_cycles = {int(i): top_cycles[i] for i in top_cycles}
npz = np.load("../color-name-model/colornamemodel.npz")
COLOR_NAMES = list(npz["names"])
BCT_IDX = npz["bct_idxs"]
COLOR_NAME_IDX = npz["name_idxs"]
# Calculate min delta E for each position in each cycle
min_dists = {}
for nc in ALL_NUM_COLORS:
rgb = (
np.array(
[(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in top_cycles[nc]]
)
/ 255
)
min_dists[nc] = [100]
min_dist = 100
for i in range(1, nc):
for severity in range(1, 101):
deut = colorspacious.cspace_convert(
rgb[: i + 1],
{
"name": "sRGB1+CVD",
"cvd_type": "deuteranomaly",
"severity": severity,
},
"sRGB1",
)
prot = colorspacious.cspace_convert(
rgb[: i + 1],
{"name": "sRGB1+CVD", "cvd_type": "protanomaly", "severity": severity},
"sRGB1",
)
trit = colorspacious.cspace_convert(
rgb[: i + 1],
{"name": "sRGB1+CVD", "cvd_type": "tritanomaly", "severity": severity},
"sRGB1",
)
for j in range(i):
min_dist = min(min_dist, colorspacious.deltaE(rgb[i], rgb[j]))
min_dist = min(min_dist, colorspacious.deltaE(deut[i], deut[j]))
min_dist = min(min_dist, colorspacious.deltaE(prot[i], prot[j]))
min_dist = min(min_dist, colorspacious.deltaE(trit[i], trit[j]))
min_dists[nc].append(min_dist)
table_output = r"""\begin{tabular}{@{}rrrrrrrrrrrrrrr@{}}
\toprule
\multicolumn{5}{c}{Six Colors} & \multicolumn{5}{c}{Eight Colors} & \multicolumn{5}{c}{Ten Colors} \\ \cmidrule(r){1-5}\cmidrule(rl){6-10}\cmidrule(l){11-15}
& \multicolumn{1}{c}{R} & \multicolumn{1}{c}{G} & \multicolumn{1}{c}{B} & $\min\Delta E_\text{cvd}$ & & \multicolumn{1}{c}{R} & \multicolumn{1}{c}{G} & \multicolumn{1}{c}{B} & $\min\Delta E_\text{cvd}$ & & \multicolumn{1}{c}{R} & \multicolumn{1}{c}{G} & \multicolumn{1}{c}{B} & $\min\Delta E_\text{cvd}$ \\ \midrule
"""
for i in range(10):
for nc in ALL_NUM_COLORS:
if i < nc:
color = top_cycles[nc][i]
r = int(color[0:2], 16)
g = int(color[2:4], 16)
b = int(color[4:], 16)
name = COLOR_NAMES[COLOR_NAME_IDX[r + g * 256 + b * 256 ** 2]]
table_output += (
name + r" \textcolor[HTML]{" + color + r"}{$\blacksquare$} & "
)
table_output += f"{r} & {g} & {b} & {min_dists[nc][i]:.1f} & "
else:
table_output += "& & & & & "
table_output = table_output[:-2]
table_output += "\\\\\n"
table_output += r"""\bottomrule
\end{tabular}"""
print(table_output)
```
## Generate LaTeX markup for sequential-search cycles table
```
with open("../set-generation/maxdistinct_nc11_cvd100_minj0_maxj100.txt") as csv_file:
csv_file.readline()
csv_file.readline()
csv_file.readline()
csv_file.readline()
csv_reader = csv.reader(csv_file, delimiter=" ")
seq_cycle_a = [["ffffff", 100.0]] + [[row[0], float(row[1])] for row in csv_reader]
with open("../set-generation/maxdistinct_nc11_cvd100_minj40_maxj90.txt") as csv_file:
csv_file.readline()
csv_file.readline()
csv_file.readline()
csv_file.readline()
csv_reader = csv.reader(csv_file, delimiter=" ")
seq_cycle_b = [["ffffff", 100.0]] + [[row[0], float(row[1])] for row in csv_reader]
table_output = r"""\begin{tabular}{@{}crrrrcrrrr@{}}
\toprule
\multicolumn{5}{c}{$J' \in [0, 100]$} & \multicolumn{5}{c}{$J' \in [40, 90]$} \\ \cmidrule(r){1-5}\cmidrule(l){6-10}
& \multicolumn{1}{c}{R} & \multicolumn{1}{c}{G} & \multicolumn{1}{c}{B} & $\Delta E_\text{cvd}$ & & \multicolumn{1}{c}{R} & \multicolumn{1}{c}{G} & \multicolumn{1}{c}{B} & $\Delta E_\text{cvd}$ \\ \midrule
"""
for i in range(1, 11):
color = seq_cycle_a[i][0]
r = int(color[0:2], 16)
g = int(color[2:4], 16)
b = int(color[4:], 16)
table_output += r"\textcolor[HTML]{" + color + r"}{$\blacksquare$} & "
table_output += f"{r} & {g} & {b} & {seq_cycle_a[i][1]:.1f} & "
color = seq_cycle_b[i][0]
r = int(color[0:2], 16)
g = int(color[2:4], 16)
b = int(color[4:], 16)
table_output += r"\textcolor[HTML]{" + color + r"}{$\blacksquare$} & "
table_output += f"{r} & {g} & {b} & {seq_cycle_b[i][1]:.1f} " + "\\\\\n"
table_output += r"""\bottomrule
\end{tabular}"""
print(table_output)
```
## Teaser figure with visualization of top cycles
```
svg = """<svg width="7in" height="2.25in" version="1.1" viewBox="0 0 7 2.25" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path id="a" d="m0 0c0.4-0.13 0.6-0.13 1 0 0.4 0.13 0.6 0.13 1 0" fill="none" stroke-linecap="round" stroke-width=".02"/>
</defs>
<use transform="translate(.019088 .1169)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(.019088 .47974)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(.019088 .84259)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(.019088 1.2054)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(.019088 1.5683)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(.019088 1.9311)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 .1169)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 .37607)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 .63525)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 .89443)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 1.1536)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 1.4128)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 1.672)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(2.5 1.9311)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 .1169)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 .31848)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 .52006)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 .72164)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 .92323)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 1.1248)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 1.3264)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 1.528)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 1.7296)" xlink:href="#a" stroke="#{}"/>
<use transform="translate(4.9811 1.9311)" xlink:href="#a" stroke="#{}"/>
<rect x=".01" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x=".39164" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x=".77327" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="1.1549" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="1.5365" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="1.9182" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="2.4959" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="2.7685" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="3.0411" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="3.3137" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="3.5863" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="3.8589" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="4.1315" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="4.4041" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="4.982" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="5.194" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="5.406" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="5.6181" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="5.8301" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="6.0421" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="6.2541" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="6.4661" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="6.6782" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
<rect x="6.8902" y="2.14" width=".1" height=".1" rx=".01" ry=".01" fill="#{}"/>
</svg>"""
print(svg.format(*((top_cycles[6] + top_cycles[8] + top_cycles[10]) * 2)))
```
## Generate LaTeX markup for best / worst color sets
```
COLOR_FILE = {
6: "../set-generation/colors_mcd20.0_mld5.0_nc6_cvd100_minj40_maxj80_ns10000_f.txt",
8: "../set-generation/colors_mcd18.0_mld4.2_nc8_cvd100_minj40_maxj82_ns10000_f.txt",
10: "../set-generation/colors_mcd16.0_mld3.6_nc10_cvd100_minj40_maxj84_ns10000_f.txt",
}
with open("max-min-dist-sets.json") as infile:
max_min_dist_sets = json.load(infile)
max_min_dist_sets = {int(i): np.array(max_min_dist_sets[i]) for i in max_min_dist_sets}
for nc in ALL_NUM_COLORS:
rgb = [
(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in max_min_dist_sets[nc]
]
jab = [colorspacious.cspace_convert(i, "sRGB255", "CAM02-UCS") for i in rgb]
hcl = np.array(
[[np.arctan2(i[2], i[1]), np.sqrt(i[1] ** 2 + i[2] ** 2), i[0]] for i in jab]
)
max_min_dist_sets[nc] = max_min_dist_sets[nc][np.lexsort(hcl[:, ::-1].T)]
# Load color data and sort by hue, chroma, lightness
colors_rgb = {}
for num_colors in ALL_NUM_COLORS:
with open(COLOR_FILE[num_colors]) as csv_file:
# Skip header rows
csv_file.readline()
csv_file.readline()
csv_file.readline()
csv_reader = csv.reader(csv_file, delimiter=" ")
colors_rgb[num_colors] = []
for row in csv_reader:
row = [i.strip() for i in row]
rgb = [(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in row]
jab = [colorspacious.cspace_convert(i, "sRGB255", "CAM02-UCS") for i in rgb]
hcl = np.array(
[
[np.arctan2(i[2], i[1]), np.sqrt(i[1] ** 2 + i[2] ** 2), i[0]]
for i in jab
]
)
new_row = np.array(row)[np.lexsort(hcl[:, ::-1].T)]
colors_rgb[num_colors].append(new_row)
colors_rgb[num_colors] = np.array(colors_rgb[num_colors])
npz = np.load("../aesthetic-models/set-scores.npz")
scores = {}
for i in ALL_NUM_COLORS:
mean = npz[f"mean{i:02d}"]
saliency = npz[f"saliency{i:02d}"]
scores[i] = mean * saliency
argsorts = {nc: np.argsort(scores[nc]) for nc in ALL_NUM_COLORS}
print(
r"""\begin{tabular}{@{}rccc@{}}
\toprule
Rank & Six Colors & Eight Colors & Ten Colors \\
\midrule"""
)
lines = []
for i in range(9990, 10000):
line = f"{10000 - i} & "
for nc in ALL_NUM_COLORS:
for c in colors_rgb[nc][argsorts[nc][i]]:
line += r"\textcolor[HTML]{" + c + r"}{$\blacksquare$}"
line += r" & "
lines.append(line[:-2] + r"\\")
for l in reversed(lines):
print(l)
print(r"\midrule")
lines = []
for i in range(10):
line = f"{10000 - i} & "
for nc in ALL_NUM_COLORS:
for c in colors_rgb[nc][argsorts[nc][i]]:
line += r"\textcolor[HTML]{" + c + r"}{$\blacksquare$}"
line += r" & "
lines.append(line[:-2] + r"\\")
for l in reversed(lines):
print(l)
print(r"\midrule")
line = r"$\max\min(\Delta E_\text{cvd})$ & "
for nc in ALL_NUM_COLORS:
for c in max_min_dist_sets[nc]:
line += r"\textcolor[HTML]{" + c + r"}{$\blacksquare$}"
line += r" & "
print(line[:-2] + r"\\")
print(
r"""\bottomrule
\end{tabular}"""
)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Federated Learning for Text Generation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/federated/tutorials/federated_learning_for_text_generation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/federated_learning_for_text_generation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/federated_learning_for_text_generation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/federated_learning_for_text_generation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federated#compatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.
This tutorial builds on the concepts in the [Federated Learning for Image Classification](federated_learning_for_image_classification.ipynb) tutorial, and demonstrates several other useful approaches for federated learning.
In particular, we load a previously trained Keras model, and refine it using federated training on a (simulated) decentralized dataset. This is practically important for several reasons . The ability to use serialized models makes it easy to mix federated learning with other ML approaches. Further, this allows use of an increasing range of pre-trained models --- for example, training language models from scratch is rarely necessary, as numerous pre-trained models are now widely available (see, e.g., [TF Hub](https://www.tensorflow.org/hub)). Instead, it makes more sense to start from a pre-trained model, and refine it using Federated Learning, adapting to the particular characteristics of the decentralized data for a particular application.
For this tutorial, we start with a RNN that generates ASCII characters, and refine it via federated learning. We also show how the final weights can be fed back to the original Keras model, allowing easy evaluation and text generation using standard tools.
```
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated-nightly
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import functools
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
np.random.seed(0)
# Test the TFF is working:
tff.federated_computation(lambda: 'Hello, World!')()
```
## Load a pre-trained model
We load a model that was pre-trained following the TensorFlow tutorial
[Text generation using a RNN with eager execution](https://www.tensorflow.org/tutorials/sequences/text_generation). However,
rather than training on [The Complete Works of Shakespeare](http://www.gutenberg.org/files/100/100-0.txt), we pre-trained the model on the text from the Charles Dickens'
[A Tale of Two Cities](http://www.ibiblio.org/pub/docs/books/gutenberg/9/98/98.txt)
and
[A Christmas Carol](http://www.ibiblio.org/pub/docs/books/gutenberg/4/46/46.txt).
Other than expanding the vocabulary, we didn't modify the original tutorial, so this initial model isn't state-of-the-art, but it produces reasonable predictions and is sufficient for our tutorial purposes. The final model was saved with `tf.keras.models.save_model(include_optimizer=False)`.
We will use federated learning to fine-tune this model for Shakespeare in this tutorial, using a federated version of the data provided by TFF.
### Generate the vocab lookup tables
```
# A fixed vocabularly of ASCII chars that occur in the works of Shakespeare and Dickens:
vocab = list('dhlptx@DHLPTX $(,048cgkoswCGKOSW[_#\'/37;?bfjnrvzBFJNRVZ"&*.26:\naeimquyAEIMQUY]!%)-159\r')
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
```
### Load the pre-trained model and generate some text
```
def load_model(batch_size):
urls = {
1: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch1.kerasmodel',
8: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch8.kerasmodel'}
assert batch_size in urls, 'batch_size must be in ' + str(urls.keys())
url = urls[batch_size]
local_file = tf.keras.utils.get_file(os.path.basename(url), origin=url)
return tf.keras.models.load_model(local_file, compile=False)
def generate_text(model, start_string):
# From https://www.tensorflow.org/tutorials/sequences/text_generation
num_generate = 200
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
temperature = 1.0
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temperature
predicted_id = tf.random.categorical(
predictions, num_samples=1)[-1, 0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
# Text generation requires a batch_size=1 model.
keras_model_batch1 = load_model(batch_size=1)
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
```
## Load and Preprocess the Federated Shakespeare Data
The `tff.simulation.datasets` package provides a variety of datasets that are split into "clients", where each client corresponds to a dataset on a particular device that might participate in federated learning.
These datasets provide realistic non-IID data distributions that replicate in simulation the challenges of training on real decentralized data. Some of the pre-processing of this data was done using tools from the [Leaf project](https://arxiv.org/abs/1812.01097) ([github](https://github.com/TalwalkarLab/leaf)).
```
train_data, test_data = tff.simulation.datasets.shakespeare.load_data()
```
The datasets provided by `shakespeare.load_data()` consist of a sequence of
string `Tensors`, one for each line spoken by a particular character in a
Shakespeare play. The client keys consist of the name of the play joined with
the name of the character, so for example `MUCH_ADO_ABOUT_NOTHING_OTHELLO` corresponds to the lines for the character Othello in the play *Much Ado About Nothing*. Note that in a real federated learning scenario
clients are never identified or tracked by ids, but for simulation it is useful
to work with keyed datasets.
Here, for example, we can look at some data from King Lear:
```
# Here the play is "The Tragedy of King Lear" and the character is "King".
raw_example_dataset = train_data.create_tf_dataset_for_client(
'THE_TRAGEDY_OF_KING_LEAR_KING')
# To allow for future extensions, each entry x
# is an OrderedDict with a single key 'snippets' which contains the text.
for x in raw_example_dataset.take(2):
print(x['snippets'])
```
We now use `tf.data.Dataset` transformations to prepare this data for training the char RNN loaded above.
```
# Input pre-processing parameters
SEQ_LENGTH = 100
BATCH_SIZE = 8
BUFFER_SIZE = 100 # For dataset shuffling
# Construct a lookup table to map string chars to indexes,
# using the vocab loaded above:
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab, values=tf.constant(list(range(len(vocab))),
dtype=tf.int64)),
default_value=0)
def to_ids(x):
s = tf.reshape(x['snippets'], shape=[1])
chars = tf.strings.bytes_split(s).values
ids = table.lookup(chars)
return ids
def split_input_target(chunk):
input_text = tf.map_fn(lambda x: x[:-1], chunk)
target_text = tf.map_fn(lambda x: x[1:], chunk)
return (input_text, target_text)
def preprocess(dataset):
return (
# Map ASCII chars to int64 indexes using the vocab
dataset.map(to_ids)
# Split into individual chars
.unbatch()
# Form example sequences of SEQ_LENGTH +1
.batch(SEQ_LENGTH + 1, drop_remainder=True)
# Shuffle and form minibatches
.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# And finally split into (input, target) tuples,
# each of length SEQ_LENGTH.
.map(split_input_target))
```
Note that in the formation of the original sequences and in the formation of
batches above, we use `drop_remainder=True` for simplicity. This means that any
characters (clients) that don't have at least `(SEQ_LENGTH + 1) * BATCH_SIZE`
chars of text will have empty datasets. A typical approach to address this would
be to pad the batches with a special token, and then mask the loss to not take
the padding tokens into account.
This would complicate the example somewhat, so for this tutorial we only use full batches, as in the
[standard tutorial](https://www.tensorflow.org/tutorials/sequences/text_generation).
However, in the federated setting this issue is more significant, because many
users might have small datasets.
Now we can preprocess our `raw_example_dataset`, and check the types:
```
example_dataset = preprocess(raw_example_dataset)
print(example_dataset.element_spec)
```
## Compile the model and test on the preprocessed data
We loaded an uncompiled keras model, but in order to run `keras_model.evaluate`, we need to compile it with a loss and metrics. We will also compile in an optimizer, which will be used as the on-device optimizer in Federated Learning.
The original tutorial didn't have char-level accuracy (the fraction
of predictions where the highest probability was put on the correct
next char). This is a useful metric, so we add it.
However, we need to define a new metric class for this because
our predictions have rank 3 (a vector of logits for each of the
`BATCH_SIZE * SEQ_LENGTH` predictions), and `SparseCategoricalAccuracy`
expects only rank 2 predictions.
```
class FlattenedCategoricalAccuracy(tf.keras.metrics.SparseCategoricalAccuracy):
def __init__(self, name='accuracy', dtype=tf.float32):
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.reshape(y_true, [-1, 1])
y_pred = tf.reshape(y_pred, [-1, len(vocab), 1])
return super().update_state(y_true, y_pred, sample_weight)
```
Now we can compile a model, and evaluate it on our `example_dataset`.
```
BATCH_SIZE = 8 # The training and eval batch size for the rest of this tutorial.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
# Confirm that loss is much lower on Shakespeare than on random data
loss, accuracy = keras_model.evaluate(example_dataset.take(5), verbose=0)
print(
'Evaluating on an example Shakespeare character: {a:3f}'.format(a=accuracy))
# As a sanity check, we can construct some completely random data, where we expect
# the accuracy to be essentially random:
random_guessed_accuracy = 1.0 / len(vocab)
print('Expected accuracy for random guessing: {a:.3f}'.format(
a=random_guessed_accuracy))
random_indexes = np.random.randint(
low=0, high=len(vocab), size=1 * BATCH_SIZE * (SEQ_LENGTH + 1))
data = collections.OrderedDict(
snippets=tf.constant(
''.join(np.array(vocab)[random_indexes]), shape=[1, 1]))
random_dataset = preprocess(tf.data.Dataset.from_tensor_slices(data))
loss, accuracy = keras_model.evaluate(random_dataset, steps=10, verbose=0)
print('Evaluating on completely random data: {a:.3f}'.format(a=accuracy))
```
## Fine-tune the model with Federated Learning
TFF serializes all TensorFlow computations so they can potentially be run in a
non-Python environment (even though at the moment, only a simulation runtime implemented in Python is available). Even though we are running in eager mode, (TF 2.0), currently TFF serializes TensorFlow computations by constructing the
necessary ops inside the context of a "`with tf.Graph.as_default()`" statement.
Thus, we need to provide a function that TFF can use to introduce our model into
a graph it controls. We do this as follows:
```
# Clone the keras_model inside `create_tff_model()`, which TFF will
# call to produce a new copy of the model inside the graph that it will
# serialize. Note: we want to construct all the necessary objects we'll need
# _inside_ this method.
def create_tff_model():
# TFF uses an `input_spec` so it knows the types and shapes
# that your model expects.
input_spec = example_dataset.element_spec
keras_model_clone = tf.keras.models.clone_model(keras_model)
return tff.learning.from_keras_model(
keras_model_clone,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
```
Now we are ready to construct a Federated Averaging iterative process, which we will use to improve the model (for details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629)).
We use a compiled Keras model to perform standard (non-federated) evaluation after each round of federated training. This is useful for research purposes when doing simulated federated learning and there is a standard test dataset.
In a realistic production setting this same technique might be used to take models trained with federated learning and evaluate them on a centralized benchmark dataset for testing or quality assurance purposes.
```
# This command builds all the TensorFlow graphs and serializes them:
fed_avg = tff.learning.build_federated_averaging_process(
model_fn=create_tff_model,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(lr=0.5))
```
Here is the simplest possible loop, where we run federated averaging for one round on a single client on a single batch:
```
state = fed_avg.initialize()
state, metrics = fed_avg.next(state, [example_dataset.take(5)])
train_metrics = metrics['train']
print('loss={l:.3f}, accuracy={a:.3f}'.format(
l=train_metrics['loss'], a=train_metrics['accuracy']))
```
Now let's write a slightly more interesting training and evaluation loop.
So that this simulation still runs relatively quickly, we train on the same three clients each round, only considering two minibatches for each.
```
def data(client, source=train_data):
return preprocess(source.create_tf_dataset_for_client(client)).take(5)
clients = [
'ALL_S_WELL_THAT_ENDS_WELL_CELIA', 'MUCH_ADO_ABOUT_NOTHING_OTHELLO',
]
train_datasets = [data(client) for client in clients]
# We concatenate the test datasets for evaluation with Keras by creating a
# Dataset of Datasets, and then identity flat mapping across all the examples.
test_dataset = tf.data.Dataset.from_tensor_slices(
[data(client, test_data) for client in clients]).flat_map(lambda x: x)
```
The initial state of the model produced by `fed_avg.initialize()` is based
on the random initializers for the Keras model, not the weights that were loaded,
since `clone_model()` does not clone the weights. To start training
from a pre-trained model, we set the model weights in the server state
directly from the loaded model.
```
NUM_ROUNDS = 5
# The state of the FL server, containing the model and optimization state.
state = fed_avg.initialize()
# Load our pre-trained Keras model weights into the global model state.
state = tff.learning.state_with_new_model_weights(
state,
trainable_weights=[v.numpy() for v in keras_model.trainable_weights],
non_trainable_weights=[
v.numpy() for v in keras_model.non_trainable_weights
])
def keras_evaluate(state, round_num):
# Take our global model weights and push them back into a Keras model to
# use its standard `.evaluate()` method.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
state.model.assign_weights_to(keras_model)
loss, accuracy = keras_model.evaluate(example_dataset, steps=2, verbose=0)
print('\tEval: loss={l:.3f}, accuracy={a:.3f}'.format(l=loss, a=accuracy))
for round_num in range(NUM_ROUNDS):
print('Round {r}'.format(r=round_num))
keras_evaluate(state, round_num)
state, metrics = fed_avg.next(state, train_datasets)
train_metrics = metrics['train']
print('\tTrain: loss={l:.3f}, accuracy={a:.3f}'.format(
l=train_metrics['loss'], a=train_metrics['accuracy']))
print('Final evaluation')
keras_evaluate(state, NUM_ROUNDS + 1)
```
With the default changes, we haven't done enough training to make a big difference, but if you train longer on more Shakespeare data, you should see a difference in the style of the text generated with the updated model:
```
# Set our newly trained weights back in the originally created model.
keras_model_batch1.set_weights([v.numpy() for v in keras_model.weights])
# Text generation requires batch_size=1
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
```
## Suggested extensions
This tutorial is just the first step! Here are some ideas for how you might try extending this notebook:
* Write a more realistic training loop where you sample clients to train on randomly.
* Use "`.repeat(NUM_EPOCHS)`" on the client datasets to try multiple epochs of local training (e.g., as in [McMahan et. al.](https://arxiv.org/abs/1602.05629)). See also [Federated Learning for Image Classification](federated_learning_for_image_classification.ipynb) which does this.
* Change the `compile()` command to experiment with using different optimization algorithms on the client.
* Try the `server_optimizer` argument to `build_federated_averaging_process` to try different algorithms for applying the model updates on the server.
* Try the `client_weight_fn` argument to to `build_federated_averaging_process` to try different weightings of the clients. The default weights client updates by the number of examples on the client, but you can do e.g. `client_weight_fn=lambda _: tf.constant(1.0)`.
| github_jupyter |
```
import pickle
import os
import numpy as np
import pandas as pd
# pd.set_option('display.max_rows', None)
import matplotlib.pyplot as plt
import country_converter as coco
cc = coco.CountryConverter()
df_weights = pd.read_csv('../data/ref/parsed_intnt_pop.csv')
df_region = pd.read_csv('../data/ref/parsed_country_region.csv')
class Transform:
# Requirements:
# import os
# import pickle
# import numpy as np
# import pandas as pd
# import matplotlib.pyplot as plt
def __init__(self, keyword):
self.keyword = keyword
def agg(self, df, kind, **kwargs):
if (kind=='arithmetic mean'):
return df.mean(axis=0)
if kind=='weighted mean':
temp = df.merge(kwargs['weights'], how='left', on=kwargs['key'])
temp['intnt_pop_norm'] = temp['intnt_pop'].apply(lambda x: x / temp['intnt_pop'].sum())
for col in df.convert_dtypes().select_dtypes(include=['int', 'float']).columns:
temp[col] = temp[col] * temp['intnt_pop_norm']
if self.grouping == 'global':
return temp.drop(['intnt_pop_norm', 'intnt_pop'], axis=1).mean(numeric_only=True)
grouped = temp.drop(['intnt_pop_norm', 'intnt_pop'], axis=1).groupby('region').mean()
return grouped
def cleanColumns(self, df, grouping):
df.columns = df.columns.str.replace('\"', '').str.replace(' ', '_')
df['date'] = pd.to_datetime(df['date']).sort_values()
df = df.reset_index(drop=True)
# Reordering columns to have 'date' as the first column
col_order = ['date'] + (df.columns.tolist()[:-1])
df = df[col_order]
# Converting all column names to lower case and shuffling keyword order
new_cols = ['date']
for col in df.columns[1:]:
if grouping == 'global':
brand, denom = col.lower().split('_')
new_col = denom + '_' + brand
else:
region, brand, denom = col.lower().split('_')
new_col = denom + '_' + brand + '_' + region
new_cols.append(new_col)
new_df = df.rename(columns=dict(zip(df.columns,new_cols)))
return new_df
def aggFromPickles(self, inputPath, aggFunc, grouping=pd.DataFrame({'Global': [0,0]}), **kwargs):
# grouping(pd.DataFrame()): dataframe denoting regions to be grouped by, defaults to global
# aggFunc(str): aggregation method of choice (e.g. 'arithmetic mean')
df = pd.DataFrame()
for root, _, files in os.walk(inputPath):
for file in sorted(files):
if file.endswith(".pkl"):
year = file.split('-')[0]
month = file.split('-')[1]
with open(os.path.join(root, file), 'rb') as f:
df_temp = pickle.load(f)
df_temp = df_temp.rename(columns={'geoName': 'country_code'})
df_temp = df_temp.loc[df_temp['country_code']!='U.S. Outlying Islands', :]
df_temp['country_code'] = cc.convert(names=df_temp['country_code'].tolist(),
to='ISO3',
not_found='not there')
df_temp = df_temp.loc[df_temp['country_code']!='not there', :]
if grouping.equals(pd.DataFrame({'Global': [0,0]})):
self.grouping = 'global'
if aggFunc == 'weighted mean':
df_temp = pd.DataFrame(self.agg(df_temp,
aggFunc,
weights=kwargs['weights'],
key=kwargs['key'])).T
else:
df_temp = pd.DataFrame(self.agg(df_temp.iloc[:, 1:], aggFunc)).T
else:
df_temp_merged = df_temp.merge(grouping, on='country_code')
if aggFunc == 'weighted mean':
df_temp = self.agg(df_temp_merged,
aggFunc,
weights=kwargs['weights'],
key=kwargs['key'])
df_temp = df_temp.reset_index() \
.melt(id_vars='region')
else:
df_temp = df_temp_merged.groupby(['region']) \
.apply(self.agg, aggFunc) \
.reset_index() \
.melt(id_vars='region')
df_temp['Group'] = df_temp['region'] + '_' + df_temp['variable']
df_temp = df_temp.rename(columns={'value': aggFunc})
df_temp = df_temp[['Group', aggFunc]].set_index('Group').T
df_temp['date'] = year + '-' + month
df = df.append(df_temp)
df = self.cleanColumns(df, self.grouping)
self.df = df
def lineplot(self):
fig, ax = plt.subplots(figsize=(10,7))
for i in range(1, len(self.df.columns)):
ax.plot(self.df.iloc[:,0], self.df.iloc[:, i], label=self.df.columns[i])
# plt.xticks(np.arange(0, len(self.df), 12),
# list(map(str, self.df['date'].dt.year.unique())),
# rotation=60)
ax.set_ylim(bottom=0)
ax.set_xlabel('Year')
ax.set_ylabel('Mean')
ax.set_title('Keyword: \'{}\''.format(self.keyword.capitalize()))
ax.legend()
def toPickle(self, outputPath):
fileName = 'global-arimean-{}'.format(self.keyword)
pathName = '{}/{}.pkl'.format(outputPath, fileName)
with open(pathName, 'wb') as f:
pickle.dump(self.df, f)
def toCSV(self, outputPath, aggFunc, grouping):
fileName = '{}-{}-{}'.format(self.keyword, aggFunc, grouping)
pathName = '{}/{}.csv'.format(outputPath, fileName)
self.df.to_csv(pathName, index=False)
class Transform_keyword(Transform):
# Subclass of class Transform
def __init__(self, keyword):
super().__init__(keyword)
def path(self):
return '../data/raw/{}'.format(self.keyword)
def aggFromPickles(self):
super().aggFromPickles(self.path())
t = Transform('mirrorless')
t.aggFromPickles(inputPath='../data/raw/mirrorless', aggFunc='weighted mean', weights= df_weights, key='country_code')
t.df
t.lineplot()
t.toCSV('../data/agged', 'wtdmean', 'global')
def main():
t_cam = Transform_keyword('camera')
t_cam.aggFromPickles()
t_cam.lineplot()
t_cam.toCSV('../data/agged/', 'arimean', 'global')
t_dslr = Transform_keyword('dslr')
t_dslr.aggFromPickles()
t_dslr.lineplot()
t_dslr.toCSV('../data/agged/', 'arimean', 'global')
t_ml = Transform_keyword('mirrorless')
t_ml.aggFromPickles()
t_ml.lineplot()
t_ml.toCSV('../data/agged/', 'arimean', 'global')
if __name__ == '__main__':
main()
```
| github_jupyter |
# 目标检测数据集
:label:`sec_object-detection-dataset`
目标检测领域没有像 MNIST 和 Fashion-MNIST 那样的小数据集。
为了快速测试目标检测模型,[**我们收集并标记了一个小型数据集**]。
首先,我们拍摄了一组香蕉的照片,并生成了 1000 张不同角度和大小的香蕉图像。
然后,我们在一些背景图片的随机位置上放一张香蕉的图像。
最后,我们在图片上为这些香蕉标记了边界框。
## [**下载数据集**]
包含所有图像和 csv 标签文件的香蕉检测数据集可以直接从互联网下载。
```
%matplotlib inline
import os
import pandas as pd
import paddle
import paddle.vision as vision
from d2l import torch as d2l
#@save
d2l.DATA_HUB['banana-detection'] = (
d2l.DATA_URL + 'banana-detection.zip',
'5de26c8fce5ccdea9f91267273464dc968d20d72')
```
## 读取数据集
通过 `read_data_bananas` 函数,我们[**读取香蕉检测数据集**]。
该数据集包括一个的 csv 文件,内含目标类别标签和位于左上角和右下角的真实边界框坐标。
```
#@save
def read_data_bananas(is_train=True):
"""读取香蕉检测数据集中的图像和标签。"""
data_dir = d2l.download_extract('banana-detection')
csv_fname = os.path.join(data_dir,
'bananas_train' if is_train else 'bananas_val',
'label.csv')
csv_data = pd.read_csv(csv_fname)
csv_data = csv_data.set_index('img_name')
images, targets = [], []
for img_name, target in csv_data.iterrows():
images.append(
vision.image.image_load(
os.path.join(data_dir,
'bananas_train' if is_train else 'bananas_val',
'images', f'{img_name}')))
# Here `target` contains (class, upper-left x, upper-left y,
# lower-right x, lower-right y), where all the images have the same
# banana class (index 0)
targets.append(list(target))
return images, torch.tensor(targets).unsqueeze(1) / 256
```
通过使用 `read_data_bananas` 函数读取图像和标签,以下 `BananasDataset` 类别将允许我们[**创建一个自定义 `Dataset` 实例**]来加载香蕉检测数据集。
```
#@save
class BananasDataset(torch.utils.data.Dataset):
"""一个用于加载香蕉检测数据集的自定义数据集。"""
def __init__(self, is_train):
self.features, self.labels = read_data_bananas(is_train)
print('read ' + str(len(self.features)) + (
f' training examples' if is_train else f' validation examples'))
def __getitem__(self, idx):
return (self.features[idx].float(), self.labels[idx])
def __len__(self):
return len(self.features)
```
最后,我们定义 `load_data_bananas` 函数,来[**为训练集和测试集返回两个数据加载器实例**]。对于测试集,无需按随机顺序读取它。
```
#@save
def load_data_bananas(batch_size):
"""加载香蕉检测数据集。"""
train_iter = paddle.io.DataLoader(BananasDataset(is_train=True),
batch_size, shuffle=True)
val_iter = paddle.io.DataLoader(BananasDataset(is_train=False),
batch_size)
return train_iter, val_iter
```
让我们[**读取一个小批量,并打印其中的图像和标签的形状**]。
图像的小批量的形状为(批量大小、通道数、高度、宽度),看起来很眼熟:它与我们之前图像分类任务中的相同。
标签的小批量的形状为(批量大小,$m$,5),其中 $m$ 是数据集的任何图像中边界框可能出现的最大数量。
小批量计算虽然高效,但它要求每张图像含有相同数量的边界框,以便放在同一个批量中。
通常来说,图像可能拥有不同数量个边界框;因此,在达到 $m$ 之前,边界框少于 $m$ 的图像将被非法边界框填充。
这样,每个边界框的标签将被长度为 5 的数组表示。
数组中的第一个元素是边界框中对象的类别,其中 -1 表示用于填充的非法边界框。
数组的其余四个元素是边界框左上角和右下角的 ($x$, $y$) 坐标值(值域在0到1之间)。
对于香蕉数据集而言,由于每张图像上只有一个边界框,因此 $m=1$。
```
batch_size, edge_size = 32, 256
train_iter, _ = load_data_bananas(batch_size)
batch = next(iter(train_iter))
batch[0].shape, batch[1].shape
```
## [**示范**]
让我们展示 10 幅带有真实边界框的图像。
我们可以看到在所有这些图像中香蕉的旋转角度、大小和位置都有所不同。
当然,这只是一个简单的人工数据集,实践中真实世界的数据集通常要复杂得多。
```
imgs = (batch[0][0:10].permute(0, 2, 3, 1)) / 255
axes = d2l.show_images(imgs, 2, 5, scale=2)
for ax, label in zip(axes, batch[1][0:10]):
d2l.show_bboxes(ax, [label[0][1:5] * edge_size], colors=['w'])
```
## 小结
* 我们收集的香蕉检测数据集可用于演示目标检测模型。
* 用于目标检测的数据加载与图像分类的数据加载类似。但是,在目标检测中,标签还包含真实边界框的信息,它不出现在图像分类中。
## 练习
1. 在香蕉检测数据集中演示其他带有真实边界框的图像。它们在边界框和目标方面有什么不同?
1. 假设我们想要将数据增强(例如随机裁剪)应用于目标检测。它与图像分类中的有什么不同?提示:如果裁剪的图像只包含物体的一小部分会怎样?
[Discussions](https://discuss.d2l.ai/t/3202)
| github_jupyter |
# Entrainment-LF17
This notebook runs [GOTM](https://gotm.net/) simulating the entrainment of an initial mixed layer under various constant wind, waves, and destabilizing surface buoyancy flux forcing as described in [Li and Fox-Kemper, 2017](https://doi.org/10.1175/JPO-D-17-0085.1) (LF17). The idealized initial conditions and surface forcing are consisten with the LES in LF17 so the results are directly comparable. See Section 3.3 and 4.4.1 of [Li et al., 2019](https://doi.org/10.1029/2019MS001810).
Note that this notebook will setup 54 GOTM runs for each chosen turbulence closure method, which take up to 500 MB space.
```
import sys
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
sys.path.append("../../../gotmtool")
from gotmtool import *
from gotmtool.stokesdrift import stokes_drift_dhh85
```
## Create a model
Create a model with environment file `../../.gotm_env.yaml`, which is created by `gotm_env_init.py`.
```
m = Model(name='Entrainment-LF17', environ='../../.gotm_env.yaml')
```
Take a look at what are defined in the environment file.
```
for key in m.environ:
print('{:>15s}: {}'.format(key, m.environ[key]) )
```
## Build the model
```
%%time
m.build()
```
## Configuration
Initialize the GOTM configuration
```
cfg = m.init_config()
```
Update the configuration
```
# setup
title = 'Entrainment - LF17'
nlev = 256
depth = 163.84
cfg['title'] = title
cfg['location']['name'] = 'OSMOSIS'
cfg['location']['latitude'] = 45.0
cfg['location']['longitude'] = 0.0
cfg['location']['depth'] = depth
cfg['time']['start'] = '2000-01-01 00:00:00'
cfg['time']['stop'] = '2000-01-03 00:00:00'
cfg['time']['dt'] = 60.0
cfg['grid']['nlev'] = nlev
# output
cfg['output'] = {}
cfg['output']['gotm_out'] = {}
cfg['output']['gotm_out']['use'] = True
cfg['output']['gotm_out']['title'] = title
cfg['output']['gotm_out']['k1_stop'] = nlev+1
cfg['output']['gotm_out']['k_stop'] = nlev
cfg['output']['gotm_out']['time_unit'] = 'dt'
cfg['output']['gotm_out']['time_step'] = 15
cfg['output']['gotm_out']['variables'] = [{}]
cfg['output']['gotm_out']['variables'][0]['source'] = '*'
# forcing
datadir = m.environ['gotmdir_data']+'/examples/Entrainment-LF17'
cfg['temperature']['method'] = 'file'
cfg['temperature']['file'] = datadir+'/t_prof.dat'
cfg['salinity']['method'] = 'file'
cfg['salinity']['file'] = datadir+'/s_prof.dat'
cfg['surface']['fluxes']['heat']['method'] = 'constant'
cfg['surface']['fluxes']['tx']['method'] = 'constant'
cfg['surface']['fluxes']['ty']['method'] = 'constant'
cfg['surface']['swr']['method'] = 'constant'
cfg['surface']['precip']['method'] = 'constant'
cfg['waves']['stokes_drift']['us']['method'] = 'file'
cfg['waves']['stokes_drift']['vs']['method'] = 'file'
cfg['waves']['stokes_drift']['us']['file'] = 'us_prof.dat'
cfg['waves']['stokes_drift']['us']['column'] = 1
cfg['waves']['stokes_drift']['vs']['file'] = 'us_prof.dat'
cfg['waves']['stokes_drift']['vs']['column'] = 2
# EOS -- use linear
cfg['eq_state']['form'] = 'linear_custom'
cfg['eq_state']['linear']['T0'] = 12.0
cfg['eq_state']['linear']['S0'] = 35.0
cfg['eq_state']['linear']['dtr0'] = -0.20
cfg['eq_state']['linear']['dsr0'] = 0.75
cfg['physical_constants']['rho_0'] = 1000.0
```
## Set the turbulence methods
- GLS-C01A: the generic length scale (GLS; [Umlauf and Burchard, 2003](https://doi.org/10.1357/002224003322005087)) model in the $k$-$\epsilon$ formulation with the weak-equilibrium stability function by [Canuto et al., 2001](https://doi.org/10.1175/1520-0485(2001)031%3C1413:OTPIOP%3E2.0.CO;2) (C01A).
- KPP-CVMix: KPP implementation in CVMix ([Large et al., 1994](https://doi.org/10.1029/94RG01872), [Griffies et al., 2015](https://github.com/CVMix/CVMix-description/raw/master/cvmix.pdf))
- KPPLT-VR12: KPP with Langmuir mixing ([Li et al., 2016](https://doi.org/10.1016%2Fj.ocemod.2015.07.020))
- KPPLT-LF17: KPP with Lanmguir turbulence enhanced entrainment ([Li and Fox-Kemper, 2017](https://doi.org/10.1175%2FJPO-D-17-0085.1))
```
turbmethods = [
'GLS-C01A',
'KPP-CVMix',
'KPPLT-VR12',
'KPPLT-LF17',
]
```
## Create a list of configurations
```
# heat flux (W/m^2)
heatflux = {'BF05': -5.0, 'BF10': -10.0, 'BF25': -25.0, 'BF50': -50.0,
'BF1h': -100.0, 'BF2h': -200.0, 'BF3h': -300.0, 'BF5h':-500.0}
# 10-meter wind (m/s)
u10 = {'WD05': 5.0, 'WD08': 8.0, 'WD10': 10.0}
v10 = {'WD05': 0.0, 'WD08': 0.0, 'WD10': 0.0}
taux = {'WD05': 0.036, 'WD08': 0.09216, 'WD10': 0.144}
tauy = {'WD05': 0.0, 'WD08': 0.0, 'WD10': 0.0}
# wave age (unitless)
wave_age = {'WV00': 1.2, 'WV01': 1.2, 'WV02': 1.0, 'WV03': 0.8, 'WV04': 0.6}
# LF17 case list
cases = [
'BF05WD05WV00',
'BF05WD05WV01',
'BF05WD05WV02',
'BF05WD05WV03',
'BF05WD05WV04',
'BF05WD08WV00',
'BF05WD08WV01',
'BF05WD08WV02',
'BF05WD08WV03',
'BF05WD08WV04',
'BF05WD10WV00',
'BF05WD10WV01',
'BF05WD10WV02',
'BF05WD10WV03',
'BF05WD10WV04',
'BF10WD05WV00',
'BF10WD05WV01',
'BF10WD05WV03',
'BF1hWD05WV00',
'BF1hWD05WV01',
'BF1hWD05WV03',
'BF1hWD08WV00',
'BF1hWD08WV01',
'BF1hWD08WV03',
'BF1hWD10WV00',
'BF1hWD10WV01',
'BF1hWD10WV03',
'BF25WD05WV00',
'BF25WD05WV01',
'BF25WD05WV03',
'BF25WD08WV00',
'BF25WD08WV01',
'BF25WD08WV03',
'BF25WD10WV00',
'BF25WD10WV01',
'BF25WD10WV03',
'BF2hWD05WV00',
'BF2hWD05WV01',
'BF2hWD05WV03',
'BF3hWD05WV00',
'BF3hWD05WV01',
'BF3hWD05WV03',
'BF50WD05WV00',
'BF50WD05WV01',
'BF50WD05WV03',
'BF50WD08WV00',
'BF50WD08WV01',
'BF50WD08WV03',
'BF50WD10WV00',
'BF50WD10WV01',
'BF50WD10WV03',
'BF5hWD05WV00',
'BF5hWD05WV01',
'BF5hWD05WV03',
]
```
Set the depth and time for `stokes_drift_dhh85()` and `dat_dump_pfl()`. The former computes Stokes drift using the [Donelan et al., 1985](https://doi.org/10.1098/rsta.1979.0079) spectrum, and the latter write profile data in the GOTM input format.
```
dz = depth/nlev
z = np.linspace(-0.5*dz, -depth+0.5*dz, nlev)
time = pd.date_range(cfg['time']['start'], freq='D', periods=1)
nt = len(time)
cfgs = []
labels = []
for i, casename in enumerate(cases):
print(casename)
# set surface fluxes
heatflux_label = casename[:4]
wind_label = casename[4:8]
wave_label = casename[8:]
wind_speed = np.sqrt(u10[wind_label]**2+v10[wind_label]**2)
cfg['surface']['fluxes']['heat']['constant_value'] = heatflux[heatflux_label]
cfg['surface']['fluxes']['tx']['constant_value'] = taux[wind_label]
cfg['surface']['fluxes']['ty']['constant_value'] = tauy[wind_label]
# compute Stokes drift
xcomp = u10[wind_label]/wind_speed
ycomp = v10[wind_label]/wind_speed
stokes_drift = stokes_drift_dhh85(z, wind_speed, wave_age[wave_label])
us_arr = np.tile(stokes_drift, (nt,1)) * xcomp
vs_arr = np.tile(stokes_drift, (nt,1)) * ycomp
# set turbulence methods
for turbmethod in turbmethods:
run_label = casename+'/'+turbmethod
labels.append(run_label)
run_dir = m.environ['gotmdir_run']+'/'+m.name+'/'+run_label
os.makedirs(run_dir, exist_ok=True)
dat_dump_pfl(time, z, [us_arr, vs_arr], run_dir+'/us_prof.dat')
if turbmethod == 'GLS-C01A':
cfg['turbulence']['turb_method'] = 'second_order'
cfg['turbulence']['tke_method'] = 'tke'
cfg['turbulence']['len_scale_method'] = 'gls'
cfg['turbulence']['scnd']['method'] = 'weak_eq_kb_eq'
cfg['turbulence']['scnd']['scnd_coeff'] = 'canuto-a'
cfg['turbulence']['turb_param']['length_lim'] = 'false'
cfg['turbulence']['turb_param']['compute_c3'] = 'true'
cfg['turbulence']['turb_param']['Ri_st'] = 0.25
cfg['turbulence']['generic']['gen_m'] = 1.5
cfg['turbulence']['generic']['gen_n'] = -1.0
cfg['turbulence']['generic']['gen_p'] = 3.0
cfg['turbulence']['generic']['cpsi1'] = 1.44
cfg['turbulence']['generic']['cpsi2'] = 1.92
cfg['turbulence']['generic']['cpsi3minus'] = -0.63
cfg['turbulence']['generic']['cpsi3plus'] = 1.0
cfg['turbulence']['generic']['sig_kpsi'] = 1.0
cfg['turbulence']['generic']['sig_psi'] = 1.3
elif turbmethod == 'KPP-CVMix':
cfg['turbulence']['turb_method'] = 'cvmix'
cfg['cvmix']['surface_layer']['kpp']['langmuir_method'] = 'none'
elif turbmethod == 'KPPLT-VR12':
cfg['turbulence']['turb_method'] = 'cvmix'
cfg['cvmix']['surface_layer']['kpp']['langmuir_method'] = 'lwf16'
elif turbmethod == 'KPPLT-LF17':
cfg['turbulence']['turb_method'] = 'cvmix'
cfg['cvmix']['surface_layer']['kpp']['langmuir_method'] = 'lf17'
else:
raise ValueError('Turbulence closure method \'{}\' not defined.'.format(turbmethod))
cfgs.append(copy.deepcopy(cfg))
```
## Run the model
```
%%time
sims = m.run_batch(configs=cfgs, labels=labels, nproc=4)
```
| github_jupyter |
# Overview
This project is an implementation of the streaming, one-pass histograms described in Ben-Haim's [Streaming Parallel Decision Trees](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). The histograms act as an approximation of the underlying dataset. The histogram bins do not have a preset size, so as values stream into the histogram, bins are dynamically added and merged as needed. One particularly nice feature of streaming histograms is that they can be used to approximate quantiles without sorting (or even individually storing) values. Additionally, they can be used for learning, visualization, discretization, or analysis. The histograms may be built independently and merged, making them convenient for parallel and distributed algorithms.
This `Python` version of the algorithm combines ideas and code from [BigML](https://bigml.com)'s [Streaming Histograms for Clojure/Java](https://github.com/bigmlcom/histogram) and [VividCortex](https://vividcortex.com)'s [Streaming approximate histograms in Go](https://github.com/VividCortex/gohistogram).
# Installation
`streamhist` has not yet been uploaded to [PyPi](https://pypi.python.org/pypi), as we are currently at the 'pre-release' stage. Having said that you should be able to install it via `pip` directly from the GitHub repository with:
```bash
pip install git+git://github.com/carsonfarmer/streamhist.git
```
You can also install `streamhist` by cloning the [GitHub repository](https://github.com/carsonfarmer/streamhist) and using the setup script:
```bash
git clone https://github.com/carsonfarmer/streamhist.git
cd streamhist
python setup.py install
```
# Testing
`streamhist` comes with a relatively comprehensive range of tests, including unit tests and regression tests. To run the tests, you can use `pytest`, which can be installed via `pip` using the `recommended.txt` file (note, this will also install `numpy`, `matplotlib`, and `IPython` which are used for tests and examples):
```bash
pip install -r recommended.txt
pytest streamhist
```
# Features
In the following examples we use `numpy` to generate data and `matplotlib` for plotting.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use("ggplot") # Makes for nicer looking plots
try:
from functools import reduce
except ImportError:
pass
from streamhist import StreamHist
```
## Basics
The simplest way to use a `StreamHist` is to initialize one and then `update` it with data points. In this first example, we create a sequence of 200k samples from a normal distribution (mean=0; variance=1) and add them to the histogram:
```
normal_data = np.random.normal(0.0, 1.0, 200000)
h1 = StreamHist(maxbins=32) # Create histogram with 32 bins
h1.update(normal_data) # Add points all at once
```
You can use the `sum` method to find the approximate number of points less than a given threshold:
```
h1.sum(0.0)
```
The `density` method gives us an estimate of the point density at the given location:
```
h1.density(0.0)
```
The `count`, `mean`, `median`, `min`, `max`, and `var` methods return useful summary statistics for the underlying dataset (some methods return approximate results). There is also a `describe` method that produces multiple summary statistics:
```
h1.describe()
```
Arbritrary quantiles/percentiles can be found using `quantile`:
```
h1.quantiles(0.5, 0.95, 0.99) # Supports multiple quantile inputs
```
## Sums and densities
We can plot the sums and density estimates as functions. First we compute the data bounds and then we create 100 linearly spaced numbers whithin those bounds for plotting:
```
l, u = h1.bounds()
x = np.linspace(l, u, 100)
plt.figure()
y1 = [h1.sum(z) for z in x]
y2 = [h1.density(z) for z in x]
plt.plot(x, y1, label="Sum")
plt.plot(x, y2, label="Density")
plt.title("Sum and density")
plt.ylabel("Frequency")
plt.xlabel("Data")
plt.legend(loc="best")
plt.ylim(-5000, 205000)
plt.show()
```
If we normalized the values (dividing by 200K), these lines approximate the [cumulative distribution function](http://en.wikipedia.org/wiki/Cumulative_distribution_function) (CDF) and the [probability density function](http://en.wikipedia.org/wiki/Probability_density_function) (PDF) for the normal distribution. Alternatively, we can compute the CDF and PDF directly:
```
plt.figure()
y1 = [h1.cdf(z) for z in x]
y2 = [h1.pdf(z) for z in x]
plt.plot(x, y1, label="CDF")
plt.plot(x, y2, label="PDF")
plt.title("CDF and PDF")
plt.ylabel("Density")
plt.xlabel("Data")
plt.legend(loc="best")
plt.ylim(-0.03, 1.03)
plt.show()
```
## Bin counts
The histogram approximates distributions using a constant number of bins. This bin limit can be specified as parameter when creating a `StreamHist` object (`maxbins` defaults to 64). A bin contains a `count` of the points within the bin along with the `mean` for the values in the bin. The edges of the bin aren't explicitly captured. Instead the histogram assumes that points of a bin are distributed with half the points less than the bin mean and half greater. This explains the fractional sum in the following example.
```
h2 = StreamHist(maxbins=3).update([1, 2, 3])
list(h2.bins)
h2.sum(2.)
```
As mentioned earlier, the bin limit constrains the number of unique bins a histogram can use to capture a distribution. The histogram above was created with a limit of just three bins. When we add a fourth unique value it will create a fourth bin and then merge the nearest two.
```
h2.update(0.5)
list(h2.bins)
```
A larger bin limit means a higher quality picture of the distribution, but it also means a larger memory footprint. In the following example, we create two new histograms based on a sequence of 300K samples from a mixture of four Gaussian distributions (means=0, 1, 2, 3; variance=0.2):
```
mixed_normal_data = np.concatenate((
np.random.normal(0.0, 0.2, 160000),
np.random.normal(1.0, 0.2, 80000),
np.random.normal(2.0, 0.2, 40000),
np.random.normal(3.0, 0.2, 20000)
))
np.random.shuffle(mixed_normal_data)
h3 = StreamHist(maxbins=8).update(mixed_normal_data)
h4 = StreamHist(maxbins=64).update(mixed_normal_data)
```
In the plot below, the red line represents the PDF for the histogram with 8 bins and the blue line represents the PDF for the histogram with 64 bins.
```
l, u = h4.bounds()
x = np.linspace(l, u, 100)
plt.figure()
y1 = [h3.pdf(z) for z in x]
y2 = [h4.pdf(z) for z in x]
plt.plot(x, y1, label="8 Bins")
plt.plot(x, y2, label="64 Bins")
plt.legend(loc="best")
plt.title("Bin (max) counts")
plt.ylabel("Density")
plt.xlabel("Data")
plt.xlim(-1.2, 4)
plt.ylim(-0.05, None)
plt.show()
```
## Bin weighting
Another option when creating a histogram is to use *gap weighting*. When `weighted` is `True`, the histogram is encouraged to spend more of its bins capturing the densest areas of the distribution. For the normal distribution that means better resolution near the mean and less resolution near the tails. The chart below shows a histogram with gap weighting in red and without gap weighting in blue. Near the center of the distribution, red uses more bins and better captures the Gaussian distribution's true curve.
```
h5 = StreamHist(maxbins=8, weighted=True).update(normal_data)
h6 = StreamHist(maxbins=8, weighted=False).update(normal_data)
l, u = h5.bounds()
x = np.linspace(l, u, 100)
plt.figure()
y1 = [h5.pdf(z) for z in x]
y2 = [h6.pdf(z) for z in x]
plt.plot(x, y1, label="Weighted")
plt.plot(x, y2, label="Unweighted")
plt.legend(loc="best")
plt.title("Bin weighting")
plt.ylabel("Density")
plt.xlabel("Data")
plt.xlim(-4.5, 4.5)
plt.ylim(-0.02, None)
plt.show()
```
## Merging
A strength of the streaming histograms is their ability to merge with one another. Histograms can be built on separate data streams (and/or nodes, processes, clusters, etc) and then combined to give a better overall picture.
In this example, we first create 300 samples from the mixed Gaussian data, and then stream each sample through its own `StreamHist` instance (for a total of 300 unique `StreamHist` objects). We then merge the 300 noisy histograms to form a single merged histogram:
```
# Create 300 samples from the mixed Gaussian data
samples = np.split(mixed_normal_data, 300)
# Create 300 histograms from the noisy samples
# This might take a few seconds...
hists = [StreamHist().update(s) for s in samples]
# Merge the 300 histograms
h7 = sum(hists) # How cool is that!
```
In the following plot, the red line shows the density distribution from the merged histogram, and the blue line shows one of (the last one in the list) the original histograms:
```
min, max = h7.bounds()
x = np.linspace(min, max, 100)
plt.figure()
y1 = [h7.pdf(z) for z in x]
y2 = [hists[-1].pdf(z) for z in x]
plt.plot(x, y1, label="Merged")
plt.plot(x, y2, label="Single")
plt.legend(loc="best")
plt.title("Bin merging")
plt.ylabel("Density")
plt.xlabel("Data")
plt.xlim(-1.2, 4)
plt.ylim(-0.05, None)
plt.show()
```
## Missing Values
Information about missing values is captured whenever the input value is `None`. The `missing_count` property retrieves the number of instances with a missing input. For a basic histogram, this count is likely sufficient. It is provided in the case that this type of information is relevant for more complex summaries.
```
h8 = StreamHist().update([None, 7, None])
h8.missing_count
```
## Performance-related concerns
### Freezing a `StreamHist`
While the ability to adapt to non-stationary data streams is a strength of the histograms, it is also computationally expensive. If your data stream is stationary, you can increase the histogram's performance by setting the `freeze` threshold parameter. After the number of inserts into the histogram have exceeded the `freeze` parameter, the histogram bins are 'locked' into place. As the bin means no longer shift, inserts become computationally cheap. However the quality of the histogram can suffer if the `freeze` parameter is too small.
```
# This takes quite a while (~2.7s each run for the 'frozen' histogram)...
%timeit StreamHist().update(normal_data)
%timeit StreamHist(freeze=1024).update(normal_data)
```
### Sorted list
The bin reservoir used to store the `StreamHist` bins is a sorted list as implemented in the [`SortedContainers`](https://github.com/grantjenks/sorted_containers) library. There are many performance-related reasons for using this library, and [implementation details](http://www.grantjenks.com/docs/sortedcontainers/implementation.html) and [performance comparisons](http://www.grantjenks.com/docs/sortedcontainers/performance.html) are available for those who are interested.
### Update speeds
Currently, `StreamHist` has minimal dependencies. The only non-standard library dependency is [`SortedContainers`](https://github.com/grantjenks/sorted_containers). This has been a concious design choice. However, in order to improve update speeds (and other bottlenecks), we are exploring other options, including the use of [`NumPy`](http://www.numpy.org), which provides fast, powerful array-like objects, useful linear algebra, and other features which may improve scalability and efficiency.
## Rendering/plotting
There are multiple ways to visualize a `StreamHist` histogram. Several of the examples here provide ways of plotting the outputs via `matplotlib`. In addition, there are two methods which provide quick access to histogram plotting functionality: `compute_breaks` which provides histogram breaks similarly to `numpy.histogram` and `print_breaks`, which 'prints' the histogram breaks to the console for quick visualization.
```
from numpy import histogram, allclose
length = normal_data.shape[0]
bins = 25
h9 = StreamHist().update(normal_data)
hist1, bins1 = h9.compute_breaks(bins)
hist2, bins2 = histogram(normal_data, bins=bins)
if allclose(bins1, bins2):
print("The bin breaks are all close")
if allclose(hist1, hist2, rtol=1, atol=length/(bins**2)):
print("The bin counts are all close")
width = 0.7 * (bins2[1] - bins2[0])
c1 = [(a + b)/2. for a, b in zip(bins1[:-1], bins1[1:])]
c2 = [(a + b)/2. for a, b in zip(bins2[:-1], bins2[1:])]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(10, 4))
ax1.bar(c1, hist1, align='center', width=width)
ax2.bar(c2, hist2, align='center', width=width)
ax1.set_title("compute_breaks")
ax2.set_title("numpy.histogram")
ax1.set_ylabel("Frequency")
ax1.set_xlabel("Data")
ax2.set_xlabel("Data")
plt.show()
h9.print_breaks(bins)
```
# License
Copyright © 2015 Carson Farmer [carsonfarmer@gmail.com](mailto:carsonfarmer@gmail.com)
Copyright © 2013 VividCortex
All rights reserved. MIT Licensed.
Copyright © 2013 BigML
Licensed under the Apache License, Version 2.0
| github_jupyter |
```
# utilities
import pandas as pd
import numpy as np
from datetime import date
# figure plotting
from bokeh.io import show, curdoc
from bokeh.layouts import column, gridplot
from bokeh.models import ColumnDataSource, RangeTool, DatetimeTickFormatter, LabelSet
from bokeh.plotting import figure, show
# widgets
from bokeh.layouts import column, widgetbox
from bokeh.models.widgets import Button, Select, DateRangeSlider
# execute backtest script
import os
import sys
import glob
sys.path.append("../jupyter-py/")
from decode_logs import *
sys.path.pop()
output_dir = "../jupyter-py/output/" + get_current_time()
strategy_type = "kalman"
execution_command = "python ../jupyter-py/backtest_pair.py --strategy_type {} --output_dir {}".format(strategy_type, output_dir)
os.system("rm -rf ../jupyter-py/output")
os.system(execution_command)
stock_list = glob.glob("../ib-data/nyse-daily-tech/*.csv")
for i, file in enumerate(stock_list):
stock_list[i] = os.path.basename(file)[:-4]
data, action_df = Decoder.get_strategy_status(output_dir)
metrics = Decoder.get_strategy_performance(output_dir)
metrics
# CODE SECTION: normalized price, figure name = normp_p
# ========== themes & appearance ============= #
STK_1_LINE_COLOR = "#053061"
STK_2_LINE_COLOR = "#67001f"
STK_1_LINE_WIDTH = 1.5
STK_2_LINE_WIDTH = 1.5
WINDOW_SIZE = 10
TITLE = "PRICE OF X vs Y"
HEIGHT = 250
SLIDER_HEIGHT = 150
WIDTH = 600
# ========== data ============= #
# use sample data from ib-data folder
dates = np.array(data['date'], dtype=np.datetime64)
STK_1_source = ColumnDataSource(data=dict(date=dates, close=data['data0']))
STK_2_source = ColumnDataSource(data=dict(date=dates, close=data['data1']))
# ========== plot data points ============= #
# x_range is the zoom in slider setup. Pls ensure both STK_1 and STK_2 have same length, else some issue
normp = figure(plot_height=HEIGHT, plot_width=WIDTH, x_range=(dates[-WINDOW_SIZE], dates[-1]), title=TITLE, toolbar_location=None)
normp.line('date', 'close', source=STK_1_source, line_color = STK_1_LINE_COLOR, line_width = STK_1_LINE_WIDTH)
normp.line('date', 'close', source=STK_2_source, line_color = STK_2_LINE_COLOR, line_width = STK_2_LINE_WIDTH)
normp.yaxis.axis_label = 'Price'
normp.xaxis[0].formatter = DatetimeTickFormatter()
# ========== RANGE SELECT TOOL ============= #
select = figure(title="Drag the middle and edges of the selection box to change the range above",
plot_height=SLIDER_HEIGHT, plot_width=WIDTH, y_range=normp.y_range,
x_axis_type="datetime", y_axis_type=None,
tools="", toolbar_location=None, background_fill_color="#efefef")
range_tool = RangeTool(x_range=normp.x_range)
range_tool.overlay.fill_color = "navy"
range_tool.overlay.fill_alpha = 0.2
select.line('date', 'close', source=STK_1_source, line_color = STK_1_LINE_COLOR, line_width = STK_1_LINE_WIDTH)
select.line('date', 'close', source=STK_2_source, line_color = STK_2_LINE_COLOR, line_width = STK_2_LINE_WIDTH)
select.ygrid.grid_line_color = None
select.add_tools(range_tool)
select.toolbar.active_multi = range_tool
normp_p = column(normp, select)
normp.x_range
# CODE SECTION: spread plot, figure name = spread_p
import bokeh.models as bkm
# ========== themes & appearance ============= #
palette = ["#053061", "#67001f"]
LINE_WIDTH = 1.5
LINE_COLOR = palette[-1]
TITLE = "RULE BASED SPREAD TRADING"
# ========== data ============= #
# TODO: get action_source array
# TODO: map actions to colours so can map to palette[i]
# dates = np.array(data['date'], dtype=np.datetime64)
dates = np.array(data['date'], dtype=np.datetime64)
spread_source = ColumnDataSource(data=dict(date=dates, spread=data['spread']))
action_source = ColumnDataSource(action_df)
# action_source['colors'] = [palette[i] x for x in action_source['actions']]
# ========== figure INTERACTION properties ============= #
TOOLS = "hover,pan,wheel_zoom,box_zoom,reset,save"
spread_p = figure(tools=TOOLS, toolbar_location="above", plot_height=HEIGHT, plot_width=WIDTH, title=TITLE)
# spread_p.background_fill_color = "#dddddd"
spread_p.xaxis.axis_label = "Backtest Period"
spread_p.yaxis.axis_label = "Spread"
# spread_p.grid.grid_line_color = "white"
# ========== plot data points ============= #
# plot the POINT coords of the ACTIONS
circles = spread_p.circle("date", "spread", size=12, source=action_source, fill_alpha=0.8)
circles_hover = bkm.HoverTool(renderers=[circles], tooltips = [
("Action", "@latest_trade_action"),
("Stock Bought", "@buy_stk"),
("Bought Amount", "@buy_amt"),
("Stock Sold", "@sell_stk"),
("Sold Amount", "@sell_amt")
])
spread_p.add_tools(circles_hover)
# plot the spread over time
spread_p.line('date', 'spread', source=spread_source, line_color = LINE_COLOR, line_width = LINE_WIDTH)
spread_p.xaxis[0].formatter = DatetimeTickFormatter()
# ========== plot label ============= #
# this part you just need to pass BUY or SELL actions
# recommend you use one colour for each action
# x = <col_name_x-axis>, y = <col_name_y-axis> // both from the source dataframe
# labels = LabelSet(x="date", y="spread", text="Prediction", y_offset=8,
# text_font_size="8pt", text_color="colors",
# source=action_source, text_align='center')
# spread_p.add_layout(labels)
# ========== RANGE SELECT TOOL ============= #
# not included for now because sample data x-axis is not datetime. PLS FIX
# select = figure(title="Drag the middle and edges of the selection box to change the range above",
# plot_height=SLIDER_HEIGHT,
# plot_width=WIDTH,
# y_range=spread_p.y_range,
# x_axis_type="datetime",
# y_axis_type=None,
# background_fill_color="#efefef")
# range_tool = RangeTool(x_range=spread_p.x_range)
# range_tool.overlay.fill_color = "navy"
# range_tool.overlay.fill_alpha = 0.2
# select.line('date', 'spread', source=spread_source, line_color = LINE_COLOR, line_width = LINE_WIDTH)
# select.ygrid.grid_line_color = None
# select.add_tools(range_tool)
# select.toolbar.active_multi = range_tool
# show(column(spread_p,select))
# CODE SECTION: portfolio value plot, figure name = pv_p
# ========== themes & appearance ============= #
LINE_COLOR = "#053061"
LINE_WIDTH = 1.5
TITLE = "PORTFOLIO VALUE OVER TIME"
# ========== data ============= #
pv_source = ColumnDataSource(data=dict(date=dates, portfolio_value=data['portfolio_value']))
# ========== plot data points ============= #
# x_range is the zoom in slider setup. Pls ensure both STK_1 and STK_2 have same length, else some issue
pv_p = figure(plot_height=250, plot_width=600, title=TITLE, toolbar_location=None)
pv_p.line('date', 'portfolio_value', source=pv_source, line_color = LINE_COLOR, line_width = LINE_WIDTH)
pv_p.yaxis.axis_label = 'Portfolio Value'
pv_p.xaxis[0].formatter = DatetimeTickFormatter()
# CODE SECTION: setup widgets, widgetbox name = controls_wb
WIDGET_WIDTH = 250
# ========== Select Stocks ============= #
select_stk_1 = Select(width = WIDGET_WIDTH, title='Select Stock 1:', value = stock_list[0], options=stock_list)
select_stk_2 = Select(width = WIDGET_WIDTH, title='Select Stock 2:', value = stock_list[0], options=stock_list)
# ========== Strategy Type ============= #
strategy_list = ['kalman', 'distance', 'cointegration']
select_strategy = Select(width = WIDGET_WIDTH, title='Select Strategy:', value = strategy_list[0], options=strategy_list)
# ========== set start/end date ============= #
# date time variables
MAX_START = date(2014, 1, 1)
MAX_END = date(2018, 12, 30)
DEFAULT_START =date(2016, 1, 1)
DEFAULT_END = date(2018, 1, 30)
STEP = 1
backtest_dates = DateRangeSlider(width = WIDGET_WIDTH, start=MAX_START, end=MAX_END, value=(DEFAULT_START, DEFAULT_END), step=STEP, title="Backtest Date Range:")
start_bt = Button(label="Backtest", button_type="success", width = WIDGET_WIDTH)
# controls = column(select_stk_1, select_stk_2, select_strategy, backtest_dates, start_bt)
controls_wb = widgetbox(select_stk_1, select_stk_2, select_strategy, backtest_dates, start_bt, width=600)
# CODE SECTION: Final layout
grid = gridplot([[controls_wb, normp_p], [pv_p, spread_p]], sizing_mode='fixed')
# CODE SECTION: return curdoc
curdoc().add_root(grid)
# curdoc().title = "DEMO"
show(grid)
# CODE SECTION: setup on_update functions
# this stores the set of params for backtesting
# params[0] = stk_1, params[1] = stk_2, params[2] = strategy_type,
# params[3] = start_date, params[4] = end_date
params = [0, 0, 0, 0, 0]
# ========== before backtest ============= #
def update_stk_1(attrname, old, new):
params[0] = select_stk_1.value
def update_stk_2(attrname, old, new):
params[1] = select_stk_2.value
def update_strategy(attrname, old, new):
params[2] = select_strategy.value
def update_dates(attrname, old, new):
params[3] = backtest_dates.range[0]
params[4] = backtest_dates.range[1]
select_stk_1.on_change('value', update_stk_1)
select_stk_2.on_change('value', update_stk_2)
select_strategy.on_change('value', update_strategy)
backtest_dates.on_change('range', update_dates)
# ========== backtest ============= #
def run_backtest(new):
# do something
start_bt.on_click(run_backtest)
os.getcwd()
import sys
sys.path.insert(0, '../')
from jupyter_py.decode_logs import *
import datetime
datetime.datetime.fromtimestamp
```
| github_jupyter |
*Managerial Problem Solving*
# Tutorial 10 - Regression and Time Series Analysis
Toni Greif<br>
Lehrstuhl für Wirtschaftsinformatik und Informationsmanagement
SS 2019
```
library(tidyverse)
library(TTR)
library(forecast)
```
## Regression Analysis
Predict an economic quantity (=dependent variable) based on known and measurable influence factors (= independent
variables). This mathematical equation can be generalized as follows:
$$Y_i= b_0 + b_1 X_{1_i} + \epsilon_i$$
where, $b_0$ is the intercept and $b_1$ is the slope. Collectively, they are called regression coefficients. $\epsilon_i$ is the error term, the part of $Y_i$ the regression model is unable to explain.
The aim of this exercise is to build a simple regression model that we can use to predict Distance (dist) by establishing a statistically significant linear relationship with Speed (speed). The initial assumption of a linear relationship can be examined by means of a scatter plot.
```
options(repr.plot.width=6, repr.plot.height=4)
scatter.smooth(x=cars$speed, y=cars$dist, main="Dist ~ Speed")
```
To spot any outlier observations in the variable use boxplots. Having outliers in your predictor can drastically affect the predictions.
```
options(repr.plot.width=8, repr.plot.height=4)
par(mfrow=c(1, 3))
boxplot(cars$speed, main="Speed", sub=paste("Outlier rows: ", boxplot.stats(cars$speed)$out))
boxplot(cars$dist, main="Distance", sub=paste("Outlier rows: ", boxplot.stats(cars$dist)$out))
scatter.smooth(x=cars$speed, y=cars$dist, main="Dist ~ Speed")
```
Correlation is a statistical measure that suggests the level of linear dependence between two variables, that occur in pair.
Correlation can take values between -1 to +1. If we observe for every instance where speed increases, the distance also increases along with it, then there is a high positive correlation between them and therefore the correlation between them will be closer to 1. The opposite is true for an inverse relationship, in which case, the correlation between the variables will be close to -1.
```
options(repr.plot.width=6, repr.plot.height=4)
cor(cars$speed, cars$dist)
scatter.smooth(x=cars$speed, y=cars$dist, main="Dist ~ Speed")
```
Now that we have seen the linear relationship graphically in the scatter plot and by computing the correlation, lets build the the linear model.
```
linearMod <- lm(dist ~ speed, data=cars)
```
### Results of Regression Analysis
**Information on model quality:**
- Standard error (SE)
- Information on the deviation of the model from the data
- Pearson correlation coefficient $(R)$
- Magnitude of linear correlation $(-1 \leq R \leq 1)$
- Coefficient of determination $(R^2)$
- Characterizes the 'predictive power' of the model
**Intercept and slope of regression function (Regression coefficients)**
**Confidence intervals**
- Interval in which the true regression coefficient value lies with a probability of 95%
- If 0 is covered by the interval, the coefficient is not statistically significant
- The same information is conveyed by the coefficients’ p-values (p-value < 0.05)
```
summary(linearMod)
```
**Pr(>|t|) or p-value** is the probability that you get a t-value as high or higher than the observed value when the Null Hypothesis (the coefficient is equal to zero or that there is no relationship) is true.
- Pr(>|t|) is low, the coefficients are significant (significantly different from zero)
- Pr(>|t|) is high, the coefficients are not significant
- p-value is less than significance level (< 0.05), we can reject the null hypothesis
- t-tests that can assess only one regression coefficient at a time
- F-test can assess multiple coefficients simultaneously.
**The F-test of the overall significance** is a specific form of the F-test. It compares a model with no predictors to the model that you specify. The hypotheses for the F-test of the overall significance are as follows:
- Null hypothesis: The fit of the intercept-only model and your model are equal.
- Alternative hypothesis: The fit of the intercept-only model is significantly reduced compared to your model.
**The R-Squared value** of the new bigger model will always be greater than that of the smaller subset, ss you add more variables to your model, This is because, since all the variables in the original model is also present, their contribution to explain the dependent variable will be present in the super-set as well.
The adjusted R-Squared penalizes total value for the number of terms in your model.
### Exercise
Load the dataset “income.csv”.
```
income <- read.csv("data/T09/income.csv")
income %>% head()
```
Perform a multiple linear regression. Therefore, use the income as dependent variable and all others parameters as independent variables.
```
fit1a <- lm(Income ~ ., data=income)
summary(fit1a)
```
After fitting the initial model, keep removing the insignificant (5%) independent variables.What independent variables have a significant influence on the life expectancy of the state inhabitants?
```
# - Life.Exp
fit1a <- lm(Income ~ Population +
Illiteracy + Murder + HS.Grad +
Frost + Area, data=income)
summary(fit1a)
# - Frost
fit1a <- lm(Income ~ Population +
Illiteracy + Murder + HS.Grad +
Area, data=income)
summary(fit1a)
# Murder
fit1a <- lm(Income ~ Population +
Illiteracy + HS.Grad +
Area, data=income)
summary(fit1a)
# - Illiteracy
fit1a <- lm(Income ~ Population +
HS.Grad +
Area, data=income)
summary(fit1a)
# Area
fit1a <- lm(Income ~ Population +
HS.Grad, data=income)
summary(fit1a)
```
What share of total variance in the data can be explained by our regression model?
```
#Adjusted R-squared: 0.4345
```
## Time Series Analysis
<img src="images/timeSeries.png" style="width:80%; float:center">
Source: http://statmath.wu.ac.at/courses/mmwi-finmath/Aufbaukurs/handouts/handout-7-Zerlegung.pdf
The file dax.csv provides the monthly closing prices of the DAX index since November 1990. Load the csv-file.
```
dax_data <- read.csv("data/T10/dax.csv")
dax_data %>% head()
```
Create a time series object. Be sure to use the correct frequency as well as the start data.
```
dax <- dax_data %>%
ts(start = c(1990, 11), frequency = 12)
dax
```
### Simple Average
```
SA <- mean(dax_data$x)
SA
options(repr.plot.width=6, repr.plot.height=4)
plot(dax)
abline(h =SA, col = "orange")
```
### Moving Average
Calculate the moving average with 3, 10, 20 and 100 averaging periods. Visualize the DAX index as well as the moving averages in one plot using different colors.
```
options(repr.plot.width=6, repr.plot.height=4)
MA3 <- SMA(dax, 3)
MA10 <- SMA(dax, 10)
MA20 <- SMA(dax, 20)
MA100 <- SMA(dax, 100)
plot(dax)
abline(h =SA, col = "orange")
lines(MA3, col = "green")
lines(MA10, col = "red")
lines(MA20, col = "blue")
lines(MA100, col ="purple")
```
### Weighted moving average
Using the weighted moving average, the sliding window values are given different weights, typically so that more recent points matter more.
```
n= 3
WMA10 <- WMA(dax, n = n, wts = c(0.6,0.3,0.1))
options(repr.plot.width=6, repr.plot.height=4)
plot(dax)
lines(MA3, col = "green")
lines(WMA10, col ="orange")
```
### Single Exponential Smoothing
Here is where things get interesting. Imagine a weighted average where we consider all of the data points, while assigning exponentially smaller weights as we go back in time. For example if we started with 0.9, our weights would be (going back in time):
- 0.91,0.92,0.93,0.94,0.95,0.96,...
- 0.9,0.81,0.729,0.6561,0.59049,0.531441,...
In some way this is very similar to the weighted average above, only the weights are decaying uniformly. The smaller the starting weight, the faster it approaches zero.
But there is a problem: weights do not add up to 1.
We are solving this with a succinct and elegant formula:
$$\hat{Y}_{t}=\alpha Y_t+(1−\alpha)\hat{Y}_{t−1}$$
For details see: https://grisha.org/blog/2016/01/29/triple-exponential-smoothing-forecasting/
Determine the components of the DAX time series (trend, seasonality, random) and visualize them.
```
options(repr.plot.width=6, repr.plot.height=5)
dax_decompose <- decompose(dax)
plot(dax_decompose)
```
Which months are good / bad for stock investors in Germany?
```
dax_decompose$seasonal
```
### Holt-Winters model
The idea behind Holt-Winters model is exponential smoothing applied to both expected value, trend and seasonal effects.
Use the data above to predict the future stock market. Create an additive Holt-Winters model to predict the DAX.
```
options(repr.plot.width=6, repr.plot.height=4)
additiveModel = HoltWinters(dax, seasonal = "additive")
plot(additiveModel)
```
Use the model to predict the index for the next 4 years and visualize it.
```
options(repr.plot.width=6, repr.plot.height=4)
DAXForecast = forecast(additiveModel,h=48)
plot(DAXForecast)
```
A multiple time series is returned with columns fit, lower and upper for the prediction intervals and the lower and upper bounds respectively.
| github_jupyter |
# What is Trend? #
The **trend** component of a time series represents a persistent, long-term change in the mean of the series. The trend is the slowest-moving part of a series, the part representing the largest time scale of importance. In a time series of product sales, an increasing trend might be the effect of a market expansion as more people become aware of the product year by year.
<figure style="padding: 1em;">
<img src="https://i.imgur.com/ZdS4ZoJ.png" width=800, alt="">
<figcaption style="textalign: center; font-style: italic"><center>Trend patterns in four time series.</center></figcaption>
</figure>
In this course, we'll focus on trends in the mean. More generally though, any persistent and slow-moving change in a series could constitute a trend -- time series commonly have trends in their variation for instance.
# Moving Average Plots #
To see what kind of trend a time series might have, we can use a **moving average plot**. To compute a moving average of a time series, we compute the average of the values within a sliding window of some defined width. Each point on the graph represents the average of all the values in the series that fall within the window on either side. The idea is to smooth out any short-term fluctuations in the series so that only long-term changes remain.
<figure style="padding: 1em;">
<img src="https://i.imgur.com/EZOXiPs.gif" width=800, alt="An animated plot showing an undulating curve slowly increasing with a moving average line developing from left to right within a window of 12 points (in red).">
<figcaption style="textalign: center; font-style: italic"><center>A moving average plot illustrating a linear trend. Each point on the curve (blue) is the average of the points (red) within a window of size 12.
</center></figcaption>
</figure>
Notice how the *Mauna Loa* series above has a repeating up and down movement year after year -- a short-term, *seasonal* change. For a change to be a part of the trend, it should occur over a longer period than any seasonal changes. To visualize a trend, therefore, we take an average over a period longer than any seasonal period in the series. For the *Mauna Loa* series, we chose a window of size 12 to smooth over the season within each year.
# Engineering Trend #
Once we've identified the shape of the trend, we can attempt to model it using a time-step feature. We've already seen how using the time dummy itself will model a linear trend:
```
target = a * time + b
```
We can fit many other kinds of trend through transformations of the time dummy. If the trend appears to be quadratic (a parabola), we just need to add the square of the time dummy to the feature set, giving us:
```
target = a * time ** 2 + b * time + c
```
Linear regression will learn the coefficients `a`, `b,` and `c`.
The trend curves in the figure below were both fit using these kinds of features and scikit-learn's `LinearRegression`:
<figure style="padding: 1em;">
<img src="https://i.imgur.com/KFYlgGm.png" width=*00, alt="Above, Cars Sold in Quebec: an undulating plot gradually increasing from 1960-01 to 1968-12 with a linear trend-line superimposed. Below, Plastics Production in Australia: an undulating plot with a concave-up quadratic trend-line superimposed.">
<figcaption style="textalign: center; font-style: italic"><center><strong>Top:</strong> Series with a linear trend. <strong>Below:</strong> Series with a quadratic trend.
</center></figcaption>
</figure>
If you haven't seen the trick before, you may not have realized that linear regression can fit curves other than lines. The idea is that if you can provide curves of the appropriate shape as features, then linear regression can learn how to combine them in the way that best fits the target.
# Example - Tunnel Traffic #
In this example we'll create a trend model for the *Tunnel Traffic* dataset.
```
from pathlib import Path
from warnings import simplefilter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
simplefilter("ignore") # ignore warnings to clean up output cells
# Set Matplotlib defaults
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True, figsize=(11, 5))
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
plot_params = dict(
color="0.75",
style=".-",
markeredgecolor="0.25",
markerfacecolor="0.25",
legend=False,
)
%config InlineBackend.figure_format = 'retina'
# Load Tunnel Traffic dataset
data_dir = Path("../input/ts-course-data")
tunnel = pd.read_csv(data_dir / "tunnel.csv", parse_dates=["Day"])
tunnel = tunnel.set_index("Day").to_period()
```
Let's make a moving average plot to see what kind of trend this series has. Since this series has daily observations, let's choose a window of 365 days to smooth over any short-term changes within the year.
To create a moving average, first use the `rolling` method to begin a windowed computation. Follow this by the `mean` method to compute the average over the window. As we can see, the trend of *Tunnel Traffic* appears to be about linear.
```
moving_average = tunnel.rolling(
window=365, # 365-day window
center=True, # puts the average at the center of the window
min_periods=183, # choose about half the window size
).mean() # compute the mean (could also do median, std, min, max, ...)
ax = tunnel.plot(style=".", color="0.5")
moving_average.plot(
ax=ax, linewidth=3, title="Tunnel Traffic - 365-Day Moving Average", legend=False,
);
```
In Lesson 1, we engineered our time dummy in Pandas directly. From now on, however, we'll use a function from the `statsmodels` library called `DeterministicProcess`. Using this function will help us avoid some tricky failure cases that can arise with time series and linear regression. The `order` argument refers to polynomial order: `1` for linear, `2` for quadratic, `3` for cubic, and so on.
```
from statsmodels.tsa.deterministic import DeterministicProcess
dp = DeterministicProcess(
index=tunnel.index, # dates from the training data
constant=True, # dummy feature for the bias (y_intercept)
order=1, # the time dummy (trend)
drop=True, # drop terms if necessary to avoid collinearity
)
# `in_sample` creates features for the dates given in the `index` argument
X = dp.in_sample()
X.head()
```
(A *deterministic process*, by the way, is a technical term for a time series that is non-random or completely *determined*, like the `const` and `trend` series are. Features derived from the time index will generally be deterministic.)
We create our trend model basically as before, though note the addition of the `fit_intercept=False` argument.
```
from sklearn.linear_model import LinearRegression
y = tunnel["NumVehicles"] # the target
# The intercept is the same as the `const` feature from
# DeterministicProcess. LinearRegression behaves badly with duplicated
# features, so we need to be sure to exclude it here.
model = LinearRegression(fit_intercept=False)
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
```
The trend discovered by our `LinearRegression` model is almost identical to the moving average plot, which suggests that a linear trend was the right decision in this case.
```
ax = tunnel.plot(style=".", color="0.5", title="Tunnel Traffic - Linear Trend")
_ = y_pred.plot(ax=ax, linewidth=3, label="Trend")
```
To make a forecast, we apply our model to "out of sample" features. "Out of sample" refers to times outside of the observation period of the training data. Here's how we could make a 30-day forecast:
```
X = dp.out_of_sample(steps=30)
y_fore = pd.Series(model.predict(X), index=X.index)
y_fore.head()
```
Let's plot a portion of the series to see the trend forecast for the next 30 days:
```
ax = tunnel["2005-05":].plot(title="Tunnel Traffic - Linear Trend Forecast", **plot_params)
ax = y_pred["2005-05":].plot(ax=ax, linewidth=3, label="Trend")
ax = y_fore.plot(ax=ax, linewidth=3, label="Trend Forecast", color="C3")
_ = ax.legend()
```
---
The trend models we learned about in this lesson turn out to be useful for a number of reasons. Besides acting as a baseline or starting point for more sophisticated models, we can also use them as a component in a "hybrid model" with algorithms unable to learn trends (like XGBoost and random forests). We'll learn more about this technique in Lesson 5.
# Your Turn #
[**Model trend in Store Sales**](https://www.kaggle.com/kernels/fork/19616015) and understand the risks of forecasting with high-order polynomials.
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/time-series/discussion) to chat with other learners.*
| github_jupyter |
# Chapter 10 - Turbo-charge your apps with advanced callback options
* Understanding State
* Creating components that control other components
* Allowing users to add dynamic components to the app
* Introducing pattern-matching callbacks
```
import plotly
import plotly.express as px
import plotly.graph_objects as go
import dash
from dash import callback_context
import jupyter_dash as jd
from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Output, Input, State, ALL, ALLSMALLER, MATCH
from dash.exceptions import PreventUpdate
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from dash_table import DataTable
import pandas as pd
import numpy as np
pd.options.display.max_columns = None
for p in [plotly, dash, jd, dcc, html, dbc, pd, np]:
print(f'{p.__name__:-<30}v{p.__version__}')
poverty = pd.read_csv('../data/poverty.csv', low_memory=False)
poverty.head(1)
import time
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.COSMO])
app.layout = html.Div([
html.Br(), html.Br(),
dcc.Dropdown(id='dropdown', options=[{'label': x, 'value': x}
for x in ['one', 'two', 'three']]),
html.Br(),
dcc.Textarea(id='textarea', cols=50, rows=5),
html.Br(),
html.Div(id='output'),
])
@app.callback(Output('output', 'children'),
Input('dropdown', 'value'),
Input('textarea', 'value'))
def display_values(dropdown_val, textarea_val):
time.sleep(4)
return f'You chose "{dropdown_val}" from the dropdown, and wrote "{textarea_val}" in the textarea.'
app.run_server(mode='inline', port=8050, height=400)
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.COSMO])
app.layout = html.Div([
html.Br(), html.Br(),
dcc.Dropdown(id='dropdown', options=[{'label': x, 'value': x}
for x in ['one', 'two', 'three']]),
html.Br(),
dcc.Textarea(id='textarea', cols=50, rows=5),
html.Br(),
dbc.Button("Submit", id='button'),
html.Br(), html.Br(),
html.Div(id='output')
])
@app.callback(Output('output', 'children'),
Input('button', 'n_clicks'),
State('dropdown', 'value'),
State('textarea', 'value'))
def display_values(n_clicks, dropdown_val, textarea_val):
if not n_clicks:
raise PreventUpdate
return f'You chose "{dropdown_val}" from the dropdown, and wrote "{textarea_val}" in the textarea.'
app.run_server(mode='inline', port=8051, height=300)
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.COSMO])
app.layout = html.Div([
html.Br(),html.Br(),
dbc.Button("Add Chart", id='button'),
html.Div(id='output', children=[])
])
@app.callback(Output('output', 'children'),
Input('button', 'n_clicks'),
State('output', 'children'))
def add_new_chart(n_clicks, children):
if not n_clicks:
raise PreventUpdate
new_chart = dcc.Graph(figure=px.bar(height=300, width=500, title=f"Chart {n_clicks}"))
children.append(new_chart)
return children
app.run_server(port=8052)
countries = poverty[poverty['is_country']]['Country Name'].drop_duplicates().sort_values()
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.COSMO])
app.layout = html.Div([
html.Br(),html.Br(),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dbc.Button("Add Chart", id='button'),
html.Div(id='output', children=[])
], lg=4)
]),
])
@app.callback(Output('output', 'children'),
Input('button', 'n_clicks'),
State('output', 'children'))
def add_new_chart(n_clicks, children):
if not n_clicks:
raise PreventUpdate
new_chart = dcc.Graph(id={'type': 'chart', 'index': n_clicks},
figure=px.bar(height=300, width=500,
title=f"Chart {n_clicks}"))
new_dropdown = dcc.Dropdown(id={'type': 'dropdown', 'index': n_clicks},
options=[{'label': c, 'value': c}
for c in poverty[poverty['is_country']]['Country Name'].drop_duplicates().sort_values()])
children.append(html.Div([
new_chart, new_dropdown
]))
return children
@app.callback(Output({'type': 'chart', 'index': MATCH}, 'figure'),
Input({'type': 'dropdown', 'index': MATCH}, 'value'))
def create_population_chart(country):
if not country:
raise PreventUpdate
df = poverty[poverty['Country Name']==country]
fig = px.line(df, x='year', y='Population, total', title=f'Population of {country}')
return fig
app.run_server(port=8053)
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.COSMO])
app.layout = html.Div([
html.Br(),
html.Div(id='feedback'),
dbc.Label("Create your own dropdown, add options one per line:"),
dbc.Textarea(id='text', cols=40, rows=5),
html.Br(),
dbc.Button("Set options", id='button'),
html.Br(),
dcc.Dropdown(id='dropdown'),
dcc.Graph(id='chart')
])
@app.callback(Output('dropdown', 'options'),
Output('feedback', 'children'),
Input('button', 'n_clicks'),
State('text', 'value'))
def set_dropdown_options(n_clicks, options):
if not n_clicks:
raise PreventUpdate
text = options.split()
message = dbc.Alert(f"Success! you added the options: {', '.join(text)}",
color='success',
dismissable=True)
options = [{'label': t, 'value': t} for t in text]
return options, message
@app.callback(Output('chart', 'figure'),
Input('dropdown', 'value'))
def create_population_chart(country_code):
if not country_code:
raise PreventUpdate
df = poverty[poverty['Country Code']==country_code]
return px.line(df, x='year', y='Population, total', title=f"Population of {country_code}")
app.run_server(height=1500, port=8054)
poverty['Country Code'].unique()
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.COSMO])
df = poverty[poverty['is_country']]
import time
app.layout = html.Div([
html.Br(),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dbc.Label('Select the year:'),
dcc.Slider(id='year_cluster_slider',
min=1974, max=2018, step=1, included=False,
value=2018,
marks={year: str(year)
for year in range(1974, 2019, 5)})
], lg=6, md=12),
dbc.Col([
dbc.Label('Select the number of clusters:'),
dcc.Slider(id='ncluster_cluster_slider',
min=2, max=15, step=1, included=False,
value=4,
marks={n: str(n) for n in range(2, 16)}),
], lg=4, md=12)
]),
html.Br(),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dbc.Label('Select Indicators:'),
dcc.Dropdown(id='cluster_indicator_dropdown',optionHeight=40,
multi=True,
value=['GINI index (World Bank estimate)'],
options=[{'label': indicator, 'value': indicator}
for indicator in poverty.columns[3:54]]),
], lg=6),
dbc.Col([
dbc.Label(''),html.Br(),
dbc.Button("Submit", id='clustering_submit_button'),
]),
]),
dcc.Loading([
dcc.Graph(id='clustered_map_chart')
])
], style={'backgroundColor': '#E5ECF6'})
@app.callback(Output('clustered_map_chart', 'figure'),
Input('clustering_submit_button', 'n_clicks'),
State('year_cluster_slider', 'value'),
State('ncluster_cluster_slider', 'value'),
State('cluster_indicator_dropdown', 'value'))
def clustered_map(n_clicks, year, n_clusters, indicators):
if not indicators:
raise PreventUpdate
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
scaler = StandardScaler()
kmeans = KMeans(n_clusters=n_clusters)
df = poverty[poverty['is_country'] & poverty['year'].eq(year)][indicators + ['Country Name', 'year']]
data = df[indicators]
if df.isna().all().any():
return px.scatter(title='No available data for the selected combination of year/indicators.')
data_no_na = imp.fit_transform(data)
scaled_data = scaler.fit_transform(data_no_na)
kmeans.fit(scaled_data)
fig = px.choropleth(df,
locations='Country Name',
locationmode='country names',
color=[str(x) for x in kmeans.labels_],
labels={'color': 'Cluster'},
hover_data=indicators,
height=650,
title=f'Country clusters - {year}. Number of clusters: {n_clusters}<br>Inertia: {kmeans.inertia_:,.2f}',
color_discrete_sequence=px.colors.qualitative.T10)
fig.add_annotation(x=-0.1, y=-0.15,
xref='paper', yref='paper',
text='Indicators:<br>' + "<br>".join(indicators),
showarrow=False)
fig.layout.geo.showframe = False
fig.layout.geo.showcountries = True
fig.layout.geo.projection.type = 'natural earth'
fig.layout.geo.lataxis.range = [-53, 76]
fig.layout.geo.lonaxis.range = [-137, 168]
fig.layout.geo.landcolor = 'white'
fig.layout.geo.bgcolor = '#E5ECF6'
fig.layout.paper_bgcolor = '#E5ECF6'
fig.layout.geo.countrycolor = 'gray'
fig.layout.geo.coastlinecolor = 'gray'
return fig
app.run_server(height=1200, debug=True, mode='inline', port=8055)
```
| github_jupyter |
# QQQ vs DIA
### Entry Threshold: | Exit Threshold: 0.50 | Max Duration:
### MA Period: 30 | MA Type: SMA StdDev | Period: 30 | Total ROI: 128.91 % % | CAGR: 35.25 % % | Max. DD: 26.15 %
### Sharpe Ratio: 1.926
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import math
import statsmodels
from pandas_datareader import data
import datetime
import quandl
start= datetime.datetime(2017, 1, 1) # The start of the year for example
end= datetime.datetime(2020, 9, 30)
ticker_1 = "DIA"
ticker_2 = "QQQ"
df_tickr1 = data.DataReader(name= ticker_1, data_source= "yahoo", start= start, end= end)
df_tickr2 = data.DataReader(name= ticker_2, data_source= "yahoo", start= start, end= end)
df_tickr1.tail()
df_tickr2.tail()
df_tickr1['Adj Close'].plot(label='{}'.format(ticker_1),figsize=(12,8))
df_tickr2['Adj Close'].plot(label='{}'.format(ticker_2))
plt.legend();
spread_ratio = df_tickr1['Adj Close']/df_tickr2['Adj Close']
def zscore(stocks):
return (stocks - stocks.mean()) / np.std(stocks)
zscore(spread_ratio).plot(figsize=(14,8))
plt.axhline(zscore(spread_ratio).mean(), color='black')
plt.axhline(1.5, c='r', ls='--')
plt.axhline(-1.5, c='g', ls='--')
plt.legend(['Spread z-score Boston Sci VS Medtronic', 'Mean', '+1.5 Sd', '-1.5 Sd']);
#1 day moving average of the price spread
spreadratio_mavg1 = spread_ratio.rolling(1).mean()
# 30 day moving average of the price spread
spreadratio_mavg30 = spread_ratio.rolling(30).mean()
# Take a rolling 30 day standard deviation
std_30ratio = spread_ratio.rolling(30).std()
# Compute the z score for each day
zscore_30_1_ratio = (spreadratio_mavg1 - spreadratio_mavg30)/std_30ratio
zscore_30_1_ratio.plot(figsize=(12,8),label='Rolling 30 day Z score for spread ratio')
plt.axhline(0, color='black')
plt.axhline(2.00, color='red', linestyle='--')
plt.axhline(-2.00, color='red',linestyle='--')
plt.legend(['Rolling 30 day z-score spread ratio', 'Mean', '+2 Sd', '-2 Sd']);
from statsmodels.tsa.stattools import coint
y0 = df_tickr1['Adj Close']
y1 = df_tickr2['Adj Close']
t_stat_summary = statsmodels.tsa.stattools.coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic', return_results=True)
print('----------Co-Integration Test-------------------')
print('T-stat for the pair {} VS {}:'.format(ticker_1,ticker_2), t_stat_summary[0])
print('Alpha value for the T-stat: {}'.format(t_stat_summary[1]))
print('--------------------------------------')
print('T-stat 99%: {}'.format(t_stat_summary[2][0]))
print('T-stat 95%: {}'.format(t_stat_summary[2][1]))
print('T-stat 90%: {}'.format(t_stat_summary[2][2]))
from statsmodels.tsa.stattools import adfuller
def adf_check(time_series):
"""
Pass in a time series, returns ADF report
"""
result = adfuller(time_series)
print('\n-----------Augmented Dickey-Fuller Test: ----------\n')
labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']
for value,label in zip(result,labels):
print(label+' : '+str(value) )
if result[1] <= 0.1:
print("\nstrong evidence against the null hypothesis, reject the null hypothesis. Data has no unit root and is stationary\n")
else:
print("\nWeak evidence against null hypothesis, time series has a unit root, indicating it is non-mean reverting\n ")
print("Dickey Fuller Confidence Intervals: ", adfuller(spread_ratio)[4])
adf_check(spread_ratio)
np.corrcoef(df_tickr1['Adj Close'],df_tickr2['Adj Close'])
np.cov(df_tickr1['Adj Close'],df_tickr2['Adj Close'])
```
# Differencing method of spread; GARCH volatility method of spread and final use log returns minus average log returns of spread over GARCH - to be done next
```
zscore_30_1_ratio[-20:]
spread_ratio[-20:]
#differenced spread
t_series_lag_1 = spread_ratio.diff(1)
#t_series_lag_1.plot(figsize=(12,8),label='Timeseries Lagged 1 day Z score')
#plt.axhline(0, color='red')
#plt.axhline(2.0, color='red', linestyle='--')
#plt.axhline(3.0, color='red', linestyle='-')
#plt.axhline(-3.0, color='red', linestyle='-')
#plt.axhline(-2.0, color='red',linestyle='--')
#plt.legend(['Lagged time series day z-score spread ratio', 'Mean', '2.0 Sd', '3.0 Sd']);
#1 day moving average of the price spread
spreadratio_t_series_lag_mavg_1 = t_series_lag_1.rolling(1).mean()
# Timeseries with one lag
spreadratio_t_series_lag_mavg_30 = t_series_lag_1.rolling(60).mean()
# Take a rolling 30 day standard deviation
std_dev_ratio = t_series_lag_1.rolling(30).std()
# Compute the z score for each day
zscore_t_series_lag = (spreadratio_t_series_lag_mavg_1 - spreadratio_t_series_lag_mavg_30)/std_dev_ratio
zscore_t_series_lag.plot(figsize=(12,8),label='Timeseries Lagged 1 day Z score for spread ratio')
plt.axhline(0, color='black')
plt.axhline(2.0, color='red', linestyle='--')
plt.axhline(3.0, color='red', linestyle='-')
plt.axhline(-3.0, color='red', linestyle='-')
plt.axhline(-2.0, color='red',linestyle='--')
plt.legend(['Lagged time series day z-score spread ratio', 'Mean', '2.0 Sd', '3.0 Sd']);
#zscore_t_series_lag[-20:]
from statsmodels.tsa.stattools import adfuller
t_series_dropna = t_series_lag_1.dropna()
result = adfuller(t_series_dropna)
result
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = statsmodels.graphics.tsaplots.plot_acf(t_series_dropna.iloc[-50:], ax=ax1)
ax2 = fig.add_subplot(212)
fig = statsmodels.graphics.tsaplots.plot_pacf(t_series_dropna.iloc[-50:], ax=ax2)
from statsmodels.tsa.arima_model import ARIMA
model = statsmodels.tsa.arima_model.ARIMA(t_series_dropna, order=(1,0,1))
results = model.fit()
print(results.summary())
```
# Estimating GARCH
```
from arch import arch_model
```
| github_jupyter |
```
# TODO add intro with objectives
# ## [markdown]
# Let's first load the data as we did in the previous notebook. TODO add link.
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
target_name = "class"
target = df[target_name].to_numpy()
data = df.drop(columns=[target_name, "fnlwgt"])
```
## Working with categorical variables
As we have seen in the previous section, a numerical variable is a continuous
quantity represented by a real or integer number. Those variables can be
naturally handled by machine learning algorithms that typically composed of
a sequence of arithmetic instructions such as additions and multiplications.
By opposition, categorical variables have discrete values typically represented
by string labels taken in a finite list of possible choices. For instance, the
variable `native-country` in our dataset is a categorical variable because it
encodes the data using a finite list of possible countries (along with the `?`
marker when this information is missing):
```
data["native-country"].value_counts()
```
In the remainder of this section, we will present different strategies to
encode categorical data into numerical data which can be used by a
machine-learning algorithm.
```
data.dtypes
categorical_columns = [c for c in data.columns
if data[c].dtype.kind not in ["i", "f"]]
categorical_columns
data_categorical = data[categorical_columns]
data_categorical.head()
print(f"The datasets is composed of {data_categorical.shape[1]} features")
```
### Encoding ordinal categories
The most intuitive strategy is to encode each category with a number.
The `OrdinalEncoder` will transform the data in such manner.
```
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder()
data_encoded = encoder.fit_transform(data_categorical)
print(f"The dataset encoded contains {data_encoded.shape[1]} features")
data_encoded[:5]
```
We can see that all categories have been encoded for each feature
independently. We can also notice that the number of features before and after
the encoding is the same.
However, one has to be careful when using this encoding strategy. Using this
integer representation can lead the downstream models to make the assumption
that the categories are ordered: 0 is smaller than 1 which is smaller than 2,
etc.
By default, `OrdinalEncoder` uses a lexicographical strategy to map string
category labels to integers. This strategy is completely arbitrary and often be
meaningless. For instance suppose the dataset has a categorical variable named
"size" with categories such as "S", "M", "L", "XL". We would like the integer
representation to respect the meaning of the sizes by mapping them to increasing
integers such as 0, 1, 2, 3. However lexicographical strategy used by default
would map the labels "S", "M", "L", "XL" to 2, 1, 0, 3.
The `OrdinalEncoder` class accepts a "categories" constructor argument to pass
an the correct ordering explicitly.
If a categorical variable does not carry any meaningful order information then
this encoding might be misleading to downstream statistical models and you might
consider using one-hot encoding instead (see below).
Note however that the impact a violation of this ordering assumption is really
dependent on the downstream models (for instance linear models are much more
sensitive than models built from a ensemble of decision trees).
### Encoding nominal categories (without assuming any order)
`OneHotEncoder` is an alternative encoder that can prevent the dowstream
models to make a false assumption about the ordering of categories. For a
given feature, it will create as many new columns as there are possible
categories. For a given sample, the value of the column corresponding to the
category will be set to `1` while all the columns of the other categories will
be set to `0`.
```
print(f"The dataset is composed of {data_categorical.shape[1]} features")
data_categorical.head()
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False)
data_encoded = encoder.fit_transform(data_categorical)
print(f"The dataset encoded contains {data_encoded.shape[1]} features")
data_encoded
```
Let's wrap this numpy array in a dataframe with informative column names as provided by the encoder object:
```
columns_encoded = encoder.get_feature_names(data_categorical.columns)
pd.DataFrame(data_encoded, columns=columns_encoded).head()
```
Look at how the workclass variable of the first 3 records has been encoded and compare this to the original string representation.
The number of features after the encoding is than 10 times larger than in the
original data because some variables such as `occupation` and `native-country`
have many possible categories.
We can now integrate this encoder inside a machine learning pipeline as in the
case with numerical data: let's train a linear classifier on
the encoded data and check the performance of this machine learning pipeline
using cross-validation.
```
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
model = make_pipeline(
OneHotEncoder(handle_unknown='ignore'),
LogisticRegression(solver='lbfgs', max_iter=1000)
)
scores = cross_val_score(model, data_categorical, target)
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +/- {scores.std():.3f}")
```
As you can see, this representation of the categorical variables of the data is slightly more predictive of the revenue than the numerical variables that we used previously.
## Exercise 1:
- Try to fit a logistic regression model on categorical data transformed by
the OrdinalEncoder instead. What do you observe?
Use the dedicated notebook to do this exercise.
## Using numerical and categorical variables together
In the previous sections, we saw that we need to treat data specifically
depending of their nature (i.e. numerical or categorical).
Scikit-learn provides a `ColumnTransformer` class which will dispatch some
specific columns to a specific transformer making it easy to fit a single
predictive model on a dataset that combines both kinds of variables together
(heterogeneously typed tabular data).
We can first define the columns depending on their data type:
* **binary encoding** will be applied to categorical columns with only too
possible values (e.g. sex=male or sex=female in this example). Each binary
categorical columns will be mapped to one numerical columns with 0 or 1
values.
* **one-hot encoding** will be applied to categorical columns with more that
two possible categories. This encoding will create one additional column for
each possible categorical value.
* **numerical scaling** numerical features which will be standardized.
```
binary_encoding_columns = ['sex']
one_hot_encoding_columns = ['workclass', 'education', 'marital-status',
'occupation', 'relationship',
'race', 'native-country']
scaling_columns = ['age', 'education-num', 'hours-per-week',
'capital-gain', 'capital-loss']
```
We can now create our `ColumnTransfomer` by specifying a list of triplet
(preprocessor name, transformer, columns). Finally, we can define a pipeline
to stack this "preprocessor" with our classifier (logistic regression).
```
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer([
('binary-encoder', OrdinalEncoder(), binary_encoding_columns),
('one-hot-encoder', OneHotEncoder(handle_unknown='ignore'),
one_hot_encoding_columns),
('standard-scaler', StandardScaler(), scaling_columns)
])
model = make_pipeline(
preprocessor,
LogisticRegression(solver='lbfgs', max_iter=1000)
)
```
The final model is more complex than the previous models but still follows the
same API:
- the `fit` method is called to preprocess the data then train the classifier;
- the `predict` method can make predictions on new data;
- the `score` method is used to predict on the test data and compare the
predictions to the expected test labels to compute the accuracy.
```
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=42
)
model.fit(data_train, target_train)
model.predict(data_test)[:5]
target_test[:5]
data_test.head()
model.score(data_test, target_test)
```
This model can also be cross-validated as usual (instead of using a single
train-test split):
```
scores = cross_val_score(model, data, target, cv=5)
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
```
The compound model has a higher predictive accuracy than the
two models that used numerical and categorical variables in
isolation.
# Fitting a more powerful model
Linear models are very nice because they are usually very cheap to train,
small to deploy, fast to predict and give a good baseline.
However it is often useful to check whether more complex models such as
ensemble of decision trees can lead to higher predictive performance.
In the following we try a scalable implementation of the Gradient Boosting
Machine algorithm. For this class of models, we know that contrary to linear
models, it is useless to scale the numerical features and furthermore it is
both safe and significantly more computationally efficient use an arbitrary
integer encoding for the categorical variable even if the ordering is
arbitrary. Therefore we adapt the preprocessing pipeline as follows:
```
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
# For each categorical column, extract the list of all possible categories
# in some arbritrary order.
categories = [data[column].unique() for column in data[categorical_columns]]
preprocessor = ColumnTransformer([
('categorical', OrdinalEncoder(categories=categories), categorical_columns),
], remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
model.fit(data_train, target_train)
print(model.score(data_test, target_test))
```
We can observe that we get significantly higher accuracies with the Gradient
Boosting model. This is often what we observe whenever the dataset has a large
number of samples and limited number of informative features (e.g. less than
1000) with a mix of numerical and categorical variables.
This explains why Gradient Boosted Machines are very popular among datascience
practitioners who work with tabular data.
## Exercise 2:
- Check that scaling the numerical features does not impact the speed or
accuracy of HistGradientBoostingClassifier
- Check that one-hot encoding the categorical variable does not improve the
accuracy of HistGradientBoostingClassifier but slows down the training.
Use the dedicated notebook to do this exercise.
| github_jupyter |
# Using Python in HPC
Brian Skjerven
Marco DeLapierre
Maciej Cytowski

## Overview
* Python: The Good and the Bad
* Best Practices for Python in HPC
* Python Libraries
* Numerical (NumPy, SciPy)
* I/O (H5Py, PyTables)
* Machine Learning (PyTorch)
* Parallel Python
* PySpark
* Other Topics (e.g., ML, CUDA)
# Jupyter Notebooks
We've built a Docker container that contains a Jypyter notebook installation, along with all of the packages required. To use it you'll need to first install [Docker](https://www.docker.com/get-started) if you haven't already.
Once you've started Docker (and it may take a minute or two to fully start up), open up a terminal and try running:
```console
docker run hello-world
```
That should start up and run a small Docker container.
We have a git repository with all of the examples and Jupyter notebooks you can clone:
```console
https://github.com/PawseySC/Using-Python-in-HPC.git
```
The final part we need is the Jupyter container. We've provided the Dockerfiles for building this image in the git repo (under `docker/hpc-jupyter`). If you want to build the image yourself you can use the following commands (replace `<your Docker ID>` with your own Docker login if you have one:
```console
cd Using-Python-in-HPC/docker/hpc-jupyter
docker build -t <your Docker ID>/hpc-jupyter .
```
That will take a while to build, so got get a cup of coffee (and best to do this before the course starts).
Alternatively, you can download the image we've built. It's fairly large (~7GB) so best to do this on a good connection and before the course starts. Simply run
```console
docker pull pawsey/hpc-jupyter:latest
```
and sit back and wait.
Once you have the Jupyter image we need to start it up. We want to start up the container in the top-level directory of the git repo:
```console
cd /path/to/Using-Python-in-HPC
docker run --name=jupyter -d -p 8888:8888 -w /home/joyvan -v `pwd`:/home/joyvan pawsey/hpc-jupyter
```
* **--name=jupyter**
Give our container a name (Docker will automatically name it if you don't)
* **-d**
Run our Jupyter container in the background (we can get our terminal back)
* **-p 8888:8888**
We'll use a web browser to access our Jupyter notebooks server, and we need to forward a port on the host machine (your laptop) to the Jupyter server running inside our container
* **-w /home/joyvan**
`-w` sets the working directory when we start our container. The user `joyvan` is the default user for Jupyter
* **-v paths**
We can mount volumes on the host system into the container. This is useful for adding data or scripts that you don't want to ship with the container. We're going to mount the current directory (with all of our notebooks and code) to `/home/joyvan`
* **pawsey/hpc-jupyter**
The image we want to use. This assumes you're downloading the Pawsey image. If you've built it yourself simply replace it with the name you gave your image.
Once you start your container you should see a long string of numbers & letters...that's just an ID for our container.
You can now open a web browser and go to **`localhost:8888`**. You should see a Jupyter login screen:

To get the login token we need to query our running container:
```console
docker logs juypter
Executing the command: jupyter notebook
WARNING: The conda.compat module is deprecated and will be removed in a future release.
[I 00:04:13.383 NotebookApp] [nb_conda_kernels] enabled, 2 kernels found
[I 00:04:13.393 NotebookApp] Writing notebook server cookie secret to /home/jovyan/.local/share/jupyter/runtime/notebook_cookie_secret
[I 00:04:14.627 NotebookApp] Loading IPython parallel extension
[I 00:04:14.662 NotebookApp] [jupyter_nbextensions_configurator] enabled 0.4.1
[I 00:04:14.709 NotebookApp] JupyterLab extension loaded from /opt/conda/lib/python3.7/site-packages/jupyterlab
[I 00:04:14.710 NotebookApp] JupyterLab application directory is /opt/conda/share/jupyter/lab
[I 00:04:14.717 NotebookApp] [nb_conda] enabled
[I 00:04:14.718 NotebookApp] Serving notebooks from local directory: /home/joyvan
[I 00:04:14.718 NotebookApp] The Jupyter Notebook is running at:
[I 00:04:14.718 NotebookApp] http://(454f883cf4c9 or 127.0.0.1):8888/?token=9e40d10dad209943941d20c3e98c5669168f31d7e7af13a5
[I 00:04:14.718 NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 00:04:14.722 NotebookApp]
To access the notebook, open this file in a browser:
file:///home/jovyan/.local/share/jupyter/runtime/nbserver-8-open.html
Or copy and paste one of these URLs:
http://(454f883cf4c9 or 127.0.0.1):8888/?token=9e40d10dad209943941d20c3e98c5669168f31d7e7af13a5
```
The last line has the token we need (in this case, **`9e40d10dad209943941d20c3e98c5669168f31d7e7af13a`**). Copy and paste that into the top text box.
You should now be logged in to our Jupyter notebook server.

* Dynamic, interpreted, interactive
* Object-oriented
* Automatic memory management
* Range of external libraries and modules
* Extendable with C, C++, Fortran
* More on this later
* Easy to learn
* Equally easy to do “dumb” things
# Obligatory xkcd

# Space Invaders
Here's 37 lines of Python code that creates a fully functioning game
```python
import math, time
import tkinter as tk
from tkinter import Tk, Canvas, PhotoImage, Label, Button
class Game(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
parent.title("Space Invaders")
canvas, aliens, lasers = Canvas(parent, width=800, height=400, bg='black'), {}, {}
canvas.pack()
i1, i2 = PhotoImage(format = 'gif', file = "alien.gif"), PhotoImage(format = 'gif', file = "laser.gif")
for x, y, p in [(100+40*j, 160-20*i, 100*i) for i in range(8) for j in range(15)]:
aliens[canvas.create_image(x, y, image = i1)] = p
canvas.bind('<Button-1>', lambda e: lasers.update({canvas.create_image(e.x, 390, image=i2): 10}))
while aliens:
try:
for l in lasers:
canvas.move(l, 0, -5)
if canvas.coords(l)[1]<0:
canvas.delete(l); del lasers[l]
for a in aliens:
canvas.move(a, 2.0*math.sin(time.time()),0)
p = canvas.coords(a)
items = canvas.find_overlapping(p[0]-5, p[1]-5, p[0]+5, p[1]+5)
for i in items[1:2]:
canvas.delete(a); del aliens[a]; canvas.delete(i); del lasers[i]
time.sleep(0.02); root.update()
except: pass
if __name__ == "__main__":
root = tk.Tk()
Game(root).pack(side="top", fill="both", expand=True)
root.mainloop()
```

Try that in C++
(code is available in this repo)
# 1D Heat Equation

* Time-dependent
* Finite differences, Backward Euler
* Benchmark:
* Python 2.7.13 (using NumPy)
* GCC 7.2.0 (using PETSc)
* Serial test (Single node of Magnus)
* Discretisation using 10-100 million points

* Left axis is Python runtime
* Right axis is PETSc runtime
* Python failed at 100K grid points
* Memory limitation (64GB on Magnus)
* PETSc also ran on 10 million & 100 million grid points
# Python Performance
Simple fact: Python is slower than a compiled language like C or Fortran
Why?
* Python objects can be inefficient
* Compiled vs Interpreted languages
* Dynamic typing
# Interpreters
Ok...time to be pedantic
Python is technically a specification for a language that can be implemented any number of ways. When people refer to using Python, they're also talking about the **interpreter** that they use. The interpreter is a separate program that executes the source code, as opposed to a compiled language where source code is translated into instructions that your target machine can understand.
There are benefits to interpreted languages:
* Development speed (no need to compile)
* Easier to implement (compilers are hard!)
* Code distribution is easier
And disadvantages:
* Performance issues
* Security (code injection)
Most common is **CPython**, but there are other options:
* Jython (Java-based)
* IronPython (.NET Framework/C#)
* PyPY (Python written in...Python)
There are trade-offs with different interpreters; e.g. PyPy gives good performance but is beta for Python 3.
For HPC applications I'd recommend sticking with CPython (or whatever is on your particular system) and focus your efforts on other areas for performance improvement (more on that later).
```
# Simple function in Python
def test_func(n):
x = 1
for i in range(n):
x += x * i
# Time our function
%timeit test_func(10000)
```
Here's the Fortran equivalent:
```
%%file test_func.f
subroutine test_func(n)
integer :: n, i
double precision :: x
x = 1
do i = 0,n-1
x = x + (x * 1)
enddo
end subroutine test_func
```
We can use the `f2py` tool to compile our Fortran code and call it from Python:
```
!f2py -c test_func.f -m test_func > /dev/null
from test_func import test_func
%timeit test_func(10000)
```
# Python's Memory Layout

* NumPy array is simply a Python object pointing to a C array (which is contiguous)
<br>
* Python list points to a contiguous block of pointers
* Each points to a different Python object
* Each object is spread out in memory
# Static vs. Dynamic Typing
Python knows nothing (at runtime) about the variables in your code
* Are they integers? floats? strings
Only knows they exist as Python objects
# Integer Addition in C
```c
int a = 1;
int b = 2;
intc c = a+b;
```
That corresponds to the following steps:
1. Assign `<int> 1` to `a`
<br>
2. Assign `<int> 2` to `b`
<br>
3. Call `<binary_add<int,int>(a,b)`
<br>
4. Assign result to `c`
<img src="../img/c_integer.png" alt="C integer" style="width: 200px;"/>
# Integer Addition in Python
```python
a = 1
b = 2
c = a + b
```
1. Assign `1` to `a`
Set `a->PyObject_HEAD->typecde` to integer
Set `a->val=1`
<br>
2. Assign `2` to `b`
Set `b->PyObject_HEAD->typecde` to integer
Set `b->val=2`
<br>
3. Call `binary_add(a,b)`
Find typecode in `a->PyObject_HEAD`
`a` is an integer; value is `a->val`
Find typecode in `b->PyObject_HEAD`
`b` is an integer; value is `b->val`
Call `binary_add<int, int>(a->val, b->val)
Result is `result`, and type is integer
<br>
4. Creat Python object `c`
Set `c->PyObject_HEAD->typecode` to integer
Set `c->val` to `result`
<img src="../img/python_integer.png" alt="Python integer" style="width: 200px;"/>
# So....what do we do?

# Leverage the best of both C & Python
* **petsc4py**
* Python bindings for PETSc (linear algebra package written in C)
* Uses standard Python objects (e.g. NumPy arrays)
* Uses a wrapper around compiled PETSc libraries
* Compute intensive kernels now done in C libraries
* Use of NumPy arrays means that memory layout is contiguous, and arrays can be accessed by Python and C code
<br>
* Same 1-D Heat Equation
* 130 lines of Python code
* Performance?


```
%matplotlib inline
# Solves Heat equation on a periodic domain, using raw VecScatter
from __future__ import division
import sys, petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
from mpi4py import MPI
import numpy
import matplotlib
class Heat(object):
def __init__(self,comm,N):
self.comm = comm
self.N = N # global problem size
self.h = 1/N # grid spacing on unit interval
self.n = N // comm.size + int(comm.rank < (N % comm.size)) # owned part of global problem
self.start = comm.exscan(self.n)
if comm.rank == 0: self.start = 0
gindices = numpy.arange(self.start-1, self.start+self.n+1, dtype=int) % N # periodic
self.mat = PETSc.Mat().create(comm=comm)
size = (self.n, self.N) # local and global sizes
self.mat.setSizes((size,size))
self.mat.setFromOptions()
self.mat.setPreallocationNNZ((3,1)) # Conservative preallocation for 3 "local" columns and one non-local
# Allow matrix insertion using local indices [0:n+2]
lgmap = PETSc.LGMap().create(list(gindices), comm=comm)
self.mat.setLGMap(lgmap, lgmap)
# Global and local vectors
self.gvec = self.mat.getVecRight()
self.lvec = PETSc.Vec().create(comm=PETSc.COMM_SELF)
self.lvec.setSizes(self.n+2)
self.lvec.setUp()
# Configure scatter from global to local
isg = PETSc.IS().createGeneral(list(gindices), comm=comm)
self.g2l = PETSc.Scatter().create(self.gvec, isg, self.lvec, None)
self.tozero, self.zvec = PETSc.Scatter.toZero(self.gvec)
self.history = []
if False: # Print some diagnostics
print('[%d] local size %d, global size %d, starting offset %d' % (comm.rank, self.n, self.N, self.start))
self.gvec.setArray(numpy.arange(self.start,self.start+self.n))
self.gvec.view()
self.g2l.scatter(self.gvec, self.lvec, PETSc.InsertMode.INSERT)
for rank in range(comm.size):
if rank == comm.rank:
print('Contents of local Vec on rank %d' % rank)
self.lvec.view()
comm.barrier()
def evalSolution(self, t, x):
assert t == 0.0, "only for t=0.0"
coord = numpy.arange(self.start, self.start+self.n) / self.N
x.setArray((numpy.abs(coord-0.5) < 0.1) * 1.0)
def evalFunction(self, ts, t, x, xdot, f):
self.g2l.scatter(x, self.lvec, PETSc.InsertMode.INSERT) # lvec is a work vector
h = self.h
with self.lvec as u, xdot as udot:
f.setArray(udot*h + 2*u[1:-1]/h - u[:-2]/h - u[2:]/h) # Scale equation by volume element
def evalJacobian(self, ts, t, x, xdot, a, A, B):
h = self.h
for i in range(self.n):
lidx = i + 1
gidx = self.start + i
B.setValuesLocal([lidx], [lidx-1,lidx,lidx+1], [-1/h, a*h+2/h, -1/h])
B.assemble()
if A != B: A.assemble() # If operator is different from preconditioning matrix
return True # same nonzero pattern
def monitor(self, ts, i, t, x):
if self.history:
lasti, lastt, lastx = self.history[-1]
if i < lasti + 4 or t < lastt + 1e-4: return
self.tozero.scatter(x, self.zvec, PETSc.InsertMode.INSERT)
xx = self.zvec[:].tolist()
self.history.append((i, t, xx))
def plotHistory(self):
try:
from matplotlib import pylab, rcParams
except ImportError:
print("matplotlib not available")
raise SystemExit
rcParams.update({'text.usetex':True, 'figure.figsize':(10,6)})
#rc('figure', figsize=(600,400))
pylab.title('Heat: TS \\texttt{%s}' % ts.getType())
x = numpy.arange(self.N) / self.N
for i,t,u in self.history:
pylab.plot(x, u, label='step=%d t=%8.2g'%(i,t))
pylab.xlabel('$x$')
pylab.ylabel('$u$')
pylab.legend(loc='upper right')
#pylab.savefig('heat-history.png')
pylab.show()
OptDB = PETSc.Options()
ode = Heat(MPI.COMM_WORLD, OptDB.getInt('n',10000))
x = ode.gvec.duplicate()
f = ode.gvec.duplicate()
ts = PETSc.TS().create(comm=ode.comm)
ts.setType(ts.Type.ROSW) # Rosenbrock-W. ARKIMEX is a nonlinearly implicit alternative.
ts.setIFunction(ode.evalFunction, ode.gvec)
ts.setIJacobian(ode.evalJacobian, ode.mat)
ts.setMonitor(ode.monitor)
ts.setTime(0.0)
ts.setTimeStep(ode.h**2)
ts.setMaxTime(1)
ts.setMaxSteps(100)
ts.setMaxSNESFailures(-1) # allow an unlimited number of failures (step will be rejected and retried)
snes = ts.getSNES() # Nonlinear solver
snes.setTolerances(max_it=10) # Stop nonlinear solve after 10 iterations (TS will retry with shorter step)
ksp = snes.getKSP() # Linear solver
ksp.setType(ksp.Type.CG) # Conjugate gradients
pc = ksp.getPC() # Preconditioner
if False: # Configure algebraic multigrid, could use run-time options instead
pc.setType(pc.Type.GAMG) # PETSc's native AMG implementation, mostly based on smoothed aggregation
OptDB['mg_coarse_pc_type'] = 'svd' # more specific multigrid options
OptDB['mg_levels_pc_type'] = 'sor'
ts.setFromOptions() # Apply run-time options, e.g. -ts_adapt_monitor -ts_type arkimex -snes_converged_reason
ode.evalSolution(0.0, x)
ts.solve(x)
if ode.comm.rank == 0:
print('steps %d (%d rejected, %d SNES fails), nonlinear its %d, linear its %d'
% (ts.getStepNumber(), ts.getStepRejections(), ts.getSNESFailures(),
ts.getSNESIterations(), ts.getKSPIterations()))
ode.plotHistory()
```
# Moral of the story
* Need a balance between development effort and computational performance
<br>
* Python is an excellent "glue" or framework to use for your application
<br>
* Need to use a compiled language like C or Fortran for the computational kernels
* But I don't know Fortran?
* Chances are there's already a module for you...
# Python Tools

| github_jupyter |
```
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sparse_shift.plotting import plot_dag
import pickle
```
## DAGs in triangle MEC
```
with open("./dag_dict_all_triangles.pkl", "rb") as f:
dag_dict = pickle.load(f)
# Restrict to MEC
dag_dict = {
key: dag for key, dag in dag_dict.items() if np.sum(dag) == 3
}
true_dag = 'DAG-1'
true_parents = dag_dict[true_dag]
for key, dag in dag_dict.items():
plot_dag(
dag,
highlight_edges=dag-true_parents,
title=key)
df = pd.read_csv('./cdnod_triangle_pvalues.csv', header=[0, 1], delimiter=', ', engine='python')
df.head(5)
dag2mech = {
dag: {
f'X{i+1}': 'PA=(' + ','.join(
list(np.where(pars == 1)[0].astype(str))
) + ')'
for i, pars in enumerate(parents)
}
for dag, parents in dag_dict.items()
}
alpha = 0.05
dfs = []
for dag_key in dag2mech.keys():
for variable, mech in dag2mech[dag_key].items():
power_df = df['Params']
power_df.loc[:, 'variable'] = variable
power_df.loc[:, 'mechanism'] = mech
power_df.loc[:, 'test_power'] = (df[dag_key][variable] <= alpha).astype(int)
dfs.append(power_df)
power_df = pd.concat(dfs, axis=0)
g = sns.relplot(
data=power_df.groupby(
['intervention_targets', 'n_samples', 'variable', 'mechanism']
).mean().reset_index(),
col='variable',
row='intervention_targets',
x = 'n_samples',
y = 'test_power',
hue='mechanism',
kind='line',
height=3, aspect=1.5,
# legend=False,
)
plt.show()
alpha = 0.05 / 3
df_mat = []
for key, idx in df['Params'].groupby(['n_samples', 'intervention_targets']).indices.items():
for dag_key in dag_dict.keys():
n_changes = (df[dag_key].iloc[idx] <= alpha).sum(1)
for n_env in [i for i in range(1, n_changes.shape[0]+1) if (n_changes.shape[0] % i) == 0]:
for i, subgroup in enumerate(np.split(n_changes, int(n_changes.shape[0] / n_env))):
df_mat.append(
list(key) + [i, 2*n_env, dag_key, np.sum(subgroup), np.mean(subgroup)]
)
metric_df = pd.DataFrame(df_mat, columns=[
'n_samples', 'intervention_targets', 'rep', 'n_environments', 'dag',
'n_changes', 'mean_changes',
])
changes_df = pd.pivot_table(metric_df, index=[
'n_samples', 'intervention_targets',
'n_environments', 'rep'],
values='mean_changes',
columns='dag').reset_index()
dags = metric_df['dag'].unique()
changes_df['mean_min_changes'] = changes_df[dags].min(1)
changes_df['min_set_size'] = np.sum([changes_df[dag] == changes_df['mean_min_changes'] for dag in dags], 0)
changes_df['true_in_min_set'] = changes_df[true_dag] == changes_df['mean_min_changes']
for targets in changes_df['intervention_targets'].unique():
print(targets)
sub_df = changes_df[changes_df['intervention_targets'] == targets]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
sns.lineplot(
data=sub_df.groupby(['n_samples', 'n_environments', 'rep']).mean().reset_index(),
x='n_samples', y='true_in_min_set', hue='n_environments', ci=None,
ax=axes[0] , palette='flare')
axes[0].set_title('True DAG in minimal set')
sns.lineplot(
data=sub_df.groupby(['n_samples', 'n_environments', 'rep']).mean().reset_index(),
x='n_samples', y='min_set_size', hue='n_environments', ci=None,
ax=axes[1], palette='flare'
)
axes[1].set_title('Minimal set size')
sns.lineplot(
data=sub_df.groupby(['n_samples', 'n_environments', 'rep']).mean().reset_index(),
x='n_samples', y='mean_min_changes', hue='n_environments', ci=None,
ax=axes[2], palette='flare'
)
axes[2].set_title('Mean minimal number of changes')
plt.show()
df = pd.read_csv('./bivariate_cdnod_pvalues.csv', header=[0, 1], delimiter=', ', engine='python')
alpha = 0.05 / 3
for params_index in df['Params']['params_index'].unique():
sub_df = df[df['Params']['params_index'] == params_index]
targets = sub_df['Params', 'intervention_targets'].iloc[0]
print('Targets: ', targets)
changes = np.asarray([
np.sum(sub_df[key].to_numpy() < alpha, axis=1)
for key in dag_dict.keys()
])
counts = [
np.mean(changes == val, axis=1)
for val in [0, 1, 2, 3]
]
labels = list(dag_dict.keys())
labels[np.where(np.asarray(labels) == 'DAG-23')[0][0]] = 'True DAG'
fig, ax = plt.subplots()
bottom = np.zeros(counts[0].shape[0])
for i in range(len(counts)):
ax.bar(labels, counts[i], bottom=bottom,
label=f'{i} changes')
bottom += counts[i]
ax.set_ylabel('Fraction of simulations')
plt.xticks(rotation=90, ha="center")
ax.set_title(f'Changes in DAGs across intervention: {targets}')
ax.legend(bbox_to_anchor=(1.01, 1.02))
plt.show()
df = pd.read_csv('./icp_triangle_changes.csv', header=[0, 1], delimiter=', ', engine='python')
alpha = 0.05 / 3
for params_index in df['Params']['params_index'].unique():
sub_df = df[df['Params']['params_index'] == params_index]
targets = sub_df['Params', 'intervention_targets'].iloc[0]
print('Targets: ', targets)
changes = np.asarray([
np.sum(sub_df[key].to_numpy() < alpha, axis=1)
for key in dag_dict.keys()
])
counts = [
np.mean(changes == val, axis=1)
for val in [0, 1, 2, 3]
]
labels = list(dag_dict.keys())
labels[np.where(np.asarray(labels) == 'DAG-23')[0][0]] = 'True DAG'
fig, ax = plt.subplots()
bottom = np.zeros(counts[0].shape[0])
for i in range(len(counts)):
ax.bar(labels, counts[i], bottom=bottom,
label=f'{i} changes')
bottom += counts[i]
ax.set_ylabel('Fraction of simulations')
plt.xticks(rotation=90, ha="center")
ax.set_title(f'Changes in DAGs across intervention: {targets}')
ax.legend(bbox_to_anchor=(1.01, 1.02))
plt.show()
changes.shape
```
| github_jupyter |
```
import selenium
from time import sleep
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
driver=webdriver.Chrome('./chromedriver')
driver.get("https://tw.voicetube.com/")
```
https://www.itread01.com/content/1504789095.html
https://stackoverflow.com/questions/35606708/what-is-the-difference-between-and-in-xpath
Absolute vs relative XPaths (/ vs .)
/ introduces an absolute location path, starting at the root of the document.
. introduces a relative location path, starting at the context node.
Named element vs any element (ename vs *)
/ename selects an ename root element
./ename selects all ename child elements of the current node.
/* selects the root element, regardless of name.
./* or * selects all child elements of the context node, regardless of name.
descendant-or-self axis (//*)
//ename selects all ename elements in a document.
.//ename selects all ename elements at or beneath the context node.
//* selects all elements in a document, regardless of name.
.//* selects all elements, regardless of name, at or beneath the context node.
With these concepts in mind, here are answers to your specific questions...
.//*[@id='Passwd'] means to select all elements at or beneath the current context node that have an id attribute value equal to 'Passwd'.
//child::input[@type='password'] can be simplified to //input[@type='password'] and means to select all input elements in the document that have an
type attribute value equal to 'password'.\
## 統計 xxx 節點為 xx
```
ele=driver.find_element_by_xpath('//*[count(input)=2]/..')
print(ele.tag_name)
ele=driver.find_element_by_xpath('//*[count(input)=2]/..')
print(ele.tag_name)
ele=driver.find_element_by_xpath('//*[count(input)=1]')
print(ele.tag_name)
```
## 找tag為xxx的元素
```
ele=driver.find_element_by_xpath('//*[local-name()="input"]')
print(ele.tag_name)
ele.get_attribute('name')
```
## 找開頭為xxx的元素
```
ele=driver.find_element_by_xpath('//*[starts-with(local-name(),"i")]')
print(ele.tag_name)
ele.get_attribute('name')
```
## 找包含為xxx的元素
```
ele=driver.find_element_by_xpath('//*[contains(local-name(),"i")]')
print(ele.tag_name)
ele=driver.find_element_by_xpath('//*[contains(local-name(),"i")][last()]')
print(ele.tag_name)
ele.get_attribute('name')
```
## 找長度為xxx的元素
```
ele=driver.find_element_by_xpath('//*[string-length(local-name())=6]')
print(ele.tag_name)
ele.get_attribute('name')
```
## 多個路徑查找
```
ele=driver.find_element_by_xpath('//title | //input[last()]')
print(ele.tag_name)
ele.get_attribute('name')
```
| github_jupyter |

# Chapter 5: Introduction to NumPy
<h2>Chapter Outline<span class="tocSkip"></span></h2>
<hr>
<div class="toc"><ul class="toc-item"><li><span><a href="#1.-Introduction-to-NumPy" data-toc-modified-id="1.-Introduction-to-NumPy-1">1. Introduction to NumPy</a></span></li><li><span><a href="#2.-NumPy-Arrays" data-toc-modified-id="2.-NumPy-Arrays-2">2. NumPy Arrays</a></span></li><li><span><a href="#3.-Array-Operations-and-Broadcasting" data-toc-modified-id="3.-Array-Operations-and-Broadcasting-3">3. Array Operations and Broadcasting</a></span></li><li><span><a href="#4.-Indexing-and-slicing" data-toc-modified-id="4.-Indexing-and-slicing-4">4. Indexing and slicing</a></span></li><li><span><a href="#5.-More-Useful-NumPy-Functions" data-toc-modified-id="5.-More-Useful-NumPy-Functions-5">5. More Useful NumPy Functions</a></span></li></ul></div>
## Chapter Learning Objectives
<hr>
- Use NumPy to create arrays with built-in functions inlcuding `np.array()`, `np.arange()`, `np.linspace()` and `np.full()`, `np.zeros()`, `np.ones()`
- Be able to access values from a NumPy array by numeric indexing and slicing and boolean indexing
- Perform mathematical operations on and with arrays.
- Explain what broadcasting is and how to use it.
- Reshape arrays by adding/removing/reshaping axes with `.reshape()`, `np.newaxis()`, `.ravel()`, `.flatten()`
- Understand how to use built-in NumPy functions like `np.sum()`, `np.mean()`, `np.log()` as stand alone functions or as methods of numpy arrays (when available)
## 1. Introduction to NumPy
<hr>

NumPy stands for "Numerical Python" and it is the standard Python library used for working with arrays (i.e., vectors & matrices), linear algerba, and other numerical computations. NumPy is written in C, making NumPy arrays faster and more memory efficient than Python lists or arrays, read more: ([link 1](https://www.datadiscuss.com/proof-that-numpy-is-much-faster-than-normal-python-array/), [link 2](https://www.jessicayung.com/numpy-arrays-memory-and-strides/), [link 3](https://www.labri.fr/perso/nrougier/from-python-to-numpy/)).
NumPy can be installed using `conda` (if not already):
```
conda install numpy
```
## 2. NumPy Arrays
<hr>
### What are Arrays?
Arrays are "n-dimensional" data structures that can contain all the basic Python data types, e.g., floats, integers, strings etc, but work best with numeric data. NumPy arrays ("ndarrays") are homogenous, which means that items in the array should be of the same type. ndarrays are also compatible with numpy's vast collection of in-built functions!

Source: [Medium.com](https://medium.com/hackernoon/10-machine-learning-data-science-and-deep-learning-courses-for-programmers-7edc56078cde)
Usually we import numpy with the alias `np` (to avoid having to type out n-u-m-p-y every time we want to use it):
```
import numpy as np
```
A numpy array is sort of like a list:
```
my_list = [1, 2, 3, 4, 5]
my_list
my_array = np.array([1, 2, 3, 4, 5])
my_array
```
But it has the type `ndarray`:
```
type(my_array)
```
Unlike a list, arrays can only hold a single type (usually numbers):
```
my_list = [1, "hi"]
my_list
my_array = np.array((1, "hi"))
my_array
```
Above: NumPy converted the integer `1` into the string `'1'`!
### Creating arrays
ndarrays are typically created using two main methods:
1. From existing data (usually lists or tuples) using `np.array()`, like we saw above; or,
2. Using built-in functions such as `np.arange()`, `np.linspace()`, `np.zeros()`, etc.
```
my_list = [1, 2, 3]
np.array(my_list)
```
Just like you can have "multi-dimensional lists" (by nesting lists in lists), you can have multi-dimensional arrays (indicated by double square brackets `[[ ]]`):
```
list_2d = [[1, 2], [3, 4], [5, 6]]
list_2d
array_2d = np.array(list_2d)
array_2d
```
You'll probably use the built-in numpy array creators quite often. Here are some common ones (hint - don't forget to check the docstrings for help with these functions, if you're in Jupyter, remeber the `shift + tab` shortcut):
```
np.arange(1, 5) # from 1 inclusive to 5 exclusive
np.arange(0, 11, 2) # step by 2 from 1 to 11
np.linspace(0, 10, 5) # 5 equally spaced points between 0 and 10
np.ones((2, 2)) # an array of ones with size 2 x 2
np.zeros((2, 3)) # an array of zeros with size 2 x 3
np.full((3, 3), 3.14) # an array of the number 3.14 with size 3 x 3
np.full((3, 3, 3), 3.14) # an array of the number 3.14 with size 3 x 3 x 3
np.random.rand(5, 2) # random numbers uniformly distributed from 0 to 1 with size 5 x 2
```
There are many useful attributes/methods that can be called off numpy arrays:
```
print(dir(np.ndarray))
x = np.random.rand(5, 2)
x
x.transpose()
x.mean()
x.astype(int)
```
### Array Shapes
As you just saw above, arrays can be of any dimension, shape and size you desire. In fact, there are three main array attributes you need to know to work out the characteristics of an array:
- `.ndim`: the number of dimensions of an array
- `.shape`: the number of elements in each dimension (like calling `len()` on each dimension)
- `.size`: the total number of elements in an array (i.e., the product of `.shape`)
```
array_1d = np.ones(3)
print(f"Dimensions: {array_1d.ndim}")
print(f" Shape: {array_1d.shape}")
print(f" Size: {array_1d.size}")
```
Let's turn that print action into a function and try out some other arrays:
```
def print_array(x):
print(f"Dimensions: {x.ndim}")
print(f" Shape: {x.shape}")
print(f" Size: {x.size}")
print("")
print(x)
array_2d = np.ones((3, 2))
print_array(array_2d)
array_4d = np.ones((1, 2, 3, 4))
print_array(array_4d)
```
After 3 dimensions, printing arrays starts getting pretty messy. As you can see above, the number of square brackets (`[ ]`) in the printed output indicate how many dimensions there are: for example, above, the output starts with 4 square brackets `[[[[` indicative of a 4D array.
### 1-d Arrays
One of the most confusing things about numpy is 1-d arrays (vectors) can have 3 possible shapes!
```
x = np.ones(5)
print_array(x)
y = np.ones((1, 5))
print_array(y)
z = np.ones((5, 1))
print_array(z)
```
We can use `np.array_equal()` to determine if two arrays have the same shape and elements:
```
np.array_equal(x, x)
np.array_equal(x, y)
np.array_equal(x, z)
np.array_equal(y, z)
```
The shape of your 1-d arrays can actually have big implications on your mathematical oeprations!
```
print(f"x: {x}")
print(f"y: {y}")
print(f"z: {z}")
x + y # makes sense
y + z # wait, what?
```
What happened in the cell above is "broadcasting" and we'll discuss it below.
## 3. Array Operations and Broadcasting
<hr>
### Elementwise operations
Elementwise operations refer to operations applied to each element of an array or between the paired elements of two arrays.
```
x = np.ones(4)
x
y = x + 1
y
x - y
x == y
x * y
x ** y
x / y
np.array_equal(x, y)
```
### Broadcasting
ndarrays with different sizes cannot be directly used in arithmetic operations:
```
a = np.ones((2, 2))
b = np.ones((3, 3))
a + b
```
`Broadcasting` describes how NumPy treats arrays with different shapes during arithmetic operations. The idea is to wrangle data so that operations can occur element-wise.
Let's see an example. Say I sell pies on my weekends. I sell 3 types of pies at different prices, and I sold the following number of each pie last weekend. I want to know how much money I made per pie type per day.

```
cost = np.array([20, 15, 25])
print("Pie cost:")
print(cost)
sales = np.array([[2, 3, 1], [6, 3, 3], [5, 3, 5]])
print("\nPie sales (#):")
print(sales)
```
How can we multiply these two arrays together? We could use a loop:

```
total = np.zeros((3, 3)) # initialize an array of 0's
for col in range(sales.shape[1]):
total[:, col] = sales[:, col] * cost
total
```
Or we could make them the same size, and multiply corresponding elements "elementwise":

```
cost = np.repeat(cost, 3).reshape((3, 3))
cost
cost * sales
```
Congratulations! You just broadcasted! Broadcasting is just Numpy eessentially doing the `np.repeat()` for you under the hood:
```
cost = np.array([20, 15, 25]).reshape(3, 1)
print(f" cost shape: {cost.shape}")
sales = np.array([[2, 3, 1], [6, 3, 3], [5, 3, 5]])
print(f"sales shape: {sales.shape}")
sales * cost
```
In NumPy the smaller array is “broadcast” across the larger array so that they have compatible shapes:

Source: [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) by Jake VanderPlas (2016)
Why should you care about broadcasting? Well, it's cleaner and faster than looping and it also affects the array shapes resulting from arithmetic operations. Below, we can time how long it takes to loop vs broadcast:
```
cost = np.array([20, 15, 25]).reshape(3, 1)
sales = np.array([[2, 3, 1],
[6, 3, 3],
[5, 3, 5]])
total = np.zeros((3, 3))
time_loop = %timeit -q -o -r 3 for col in range(sales.shape[1]): total[:, col] = sales[:, col] * np.squeeze(cost)
time_vec = %timeit -q -o -r 3 cost * sales
print(f"Broadcasting is {time_loop.average / time_vec.average:.2f}x faster than looping here.")
```
Of course, not all arrays are compatible! NumPy compares arrays element-wise. It starts with the trailing dimensions, and works its way forward. Dimensions are compatible if:
- **they are equal**, or
- **one of them is 1**.
Use the code below to test out array compatibitlity:
```
a = np.ones((3, 2))
b = np.ones((3, 2, 1))
print(f"The shape of a is: {a.shape}")
print(f"The shape of b is: {b.shape}")
print("")
try:
print(f"The shape of a + b is: {(a + b).shape}")
except:
print(f"ERROR: arrays are NOT broadcast compatible!")
```
### Reshaping Arrays
There are 3 key reshaping methods I want you to know about for reshaping numpy arrays:
- `.rehshape()`
- `np.newaxis`
- `.ravel()`/`.flatten()`
```
x = np.full((4, 3), 3.14)
x
```
You'll reshape arrays farily often and the `.reshape()` method is pretty intuitive:
```
x.reshape(6, 2)
x.reshape(2, -1) # using -1 will calculate the dimension for you (if possible)
a = np.ones(3)
print_array(a)
b = np.ones((3, 2))
print_array(b)
```
If I want to add these two arrays I won't be able to because their dimensions are not compatible:
```
a + b
```
Sometimes you'll want to add dimensions to an array for broadcasting purposes like this. We can do that with `np.newaxis` (note that `None` is an alias for `np.newaxis`). We can add a dimension to `a` to make the arrays compatible:
```
print_array(a[:, np.newaxis]) # same as a[:, None]
a[:, np.newaxis] + b
```
Finally, sometimes you'll want to "flatten" arrays to a single dimension using `.ravel()` or `.flatten()`. `.flatten()` used to return a copy and `.ravel()` a view/reference but now they both return a copy so I can't think of an important reason to use one over the other 🤷♂️
```
x
print_array(x.flatten())
print_array(x.ravel())
```
## 4. Indexing and slicing
<hr>
Concepts of indexing should be pretty familiar by now. Indexing arrays is similar to indexing lists but there are just more dimensions.
### Numeric Indexing
```
x = np.arange(10)
x
x[3]
x[2:]
x[:4]
x[2:5]
x[2:3]
x[-1]
x[-2]
x[5:0:-1]
```
For 2D arrays:
```
x = np.random.randint(10, size=(4, 6))
x
x[3, 4] # do this
x[3][4] # i do not like this as much
x[3]
len(x) # generally, just confusing
x.shape
x[:, 2] # column number 2
x[2:, :3]
x.T
x
x[1, 1] = 555555
x
z = np.zeros(5)
z
z[0] = 5
z
```
### Boolean Indexing
```
x = np.random.rand(10)
x
x + 1
x_thresh = x > 0.5
x_thresh
x[x_thresh] = 0.5 # set all elements > 0.5 to be equal to 0.5
x
x = np.random.rand(10)
x
x[x > 0.5] = 0.5
x
```
## 5. More Useful NumPy Functions
Numpy has many built-in functions for mathematical operations, really it has almost every numerical operation you might want to do in its library. I'm not going to explore the whole library here, but as an example of some of the available functions, consider working out the hypotenuse of a triangle that with sides 3m and 4m:

```
sides = np.array([3, 4])
```
There are several ways we could solve this problem. We could directly use Pythagoras's Theorem:
$$c = \sqrt{a^2+b^2}$$
```
np.sqrt(np.sum([np.power(sides[0], 2), np.power(sides[1], 2)]))
```
We can leverage the fact that we're dealing with a numpy array and apply a "vectorized" operation (more on that in a bit) to the whole vector at one time:
```
(sides ** 2).sum() ** 0.5
```
Or we can simply use a numpy built-in function (if it exists):
```
np.linalg.norm(sides) # you'll learn more about norms in 573
np.hypot(*sides)
```
### Vectorization
Broadly speaking, "vectorization" in NumPy refers to the use of optmized C code to perform an operation. Long-story-short, because numpy arrays are homogenous (contain the same dtype), we don't need to check that we can perform an operation on elements of a sequence before we do the operation which results in a huge speed-up. You can kind of think of this concept as NumPy being able to perform an operation on the whole array at the same time rather than one-by-one (this is not actually the case, a super-efficient C loop is still running under the hood, but that's an irrelevant detail). You can read more about vectorization [here](https://www.pythonlikeyoumeanit.com/Module3_IntroducingNumpy/VectorizedOperations.html) but all you need to know is that most operations in NumPy are vectorized, so just try to do things at an "array-level" rather than an "element-level", e.g.:
```
# DONT DO THIS
array = np.array(range(5))
for i, element in enumerate(array):
array[i] = element ** 2
array
# DO THIS
array = np.array(range(5))
array **= 2
```
Let's do a quick timing experiment:
```
# loop method
array = np.array(range(5))
time_loop = %timeit -q -o -r 3 for i, element in enumerate(array): array[i] = element ** 2
# vectorized method
array = np.array(range(5))
time_vec = %timeit -q -o -r 3 array ** 2
print(f"Vectorized operation is {time_loop.average / time_vec.average:.2f}x faster than looping here.")
```
| github_jupyter |
##### Copyright 2021 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TFX Keras Component Tutorial
***A Component-by-Component Introduction to TensorFlow Extended (TFX)***
Note: We recommend running this tutorial in a Colab notebook, with no setup required! Just click "Run in Google Colab".
<div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/tfx/components_keras">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/tfx/components_keras.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/tfx/tree/master/docs/tutorials/tfx/components_keras.ipynb">
<img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td>
<td><a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/tfx/docs/tutorials/tfx/components_keras.ipynb">
<img width=32px src="https://www.tensorflow.org/images/download_logo_32px.png">Download notebook</a></td>
</table></div>
This Colab-based tutorial will interactively walk through each built-in component of TensorFlow Extended (TFX).
It covers every step in an end-to-end machine learning pipeline, from data ingestion to pushing a model to serving.
When you're done, the contents of this notebook can be automatically exported as TFX pipeline source code, which you can orchestrate with Apache Airflow and Apache Beam.
Note: This notebook demonstrates the use of native Keras models in TFX pipelines. **TFX only supports the TensorFlow 2 version of Keras**.
## Background
This notebook demonstrates how to use TFX in a Jupyter/Colab environment. Here, we walk through the Chicago Taxi example in an interactive notebook.
Working in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts.
### Orchestration
In a production deployment of TFX, you will use an orchestrator such as Apache Airflow, Kubeflow Pipelines, or Apache Beam to orchestrate a pre-defined pipeline graph of TFX components. In an interactive notebook, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells.
### Metadata
In a production deployment of TFX, you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL or SQLite, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in an ephemeral SQLite database in the `/tmp` directory on the Jupyter notebook or Colab server.
## Setup
First, we install and import the necessary packages, set up paths, and download data.
### Upgrade Pip
To avoid upgrading Pip in a system when running locally, check to make sure that we're running in Colab. Local systems can of course be upgraded separately.
```
try:
import colab
!pip install --upgrade pip
except:
pass
```
### Install TFX
**Note: In Google Colab, because of package updates, the first time you run this cell you must restart the runtime (Runtime > Restart runtime ...).**
```
!pip install -U tfx
```
## Did you restart the runtime?
If you are using Google Colab, the first time that you run the cell above, you must restart the runtime (Runtime > Restart runtime ...). This is because of the way that Colab loads packages.
### Import packages
We import necessary packages, including standard TFX component classes.
```
import os
import pprint
import tempfile
import urllib
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
tf.get_logger().propagate = False
pp = pprint.PrettyPrinter()
from tfx import v1 as tfx
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip
```
Let's check the library versions.
```
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.__version__))
```
### Set up pipeline paths
```
# This is the root directory for your TFX pip package installation.
_tfx_root = tfx.__path__[0]
# This is the directory containing the TFX Chicago Taxi Pipeline example.
_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')
# This is the path where your model will be pushed for serving.
_serving_model_dir = os.path.join(
tempfile.mkdtemp(), 'serving_model/taxi_simple')
# Set up logging.
absl.logging.set_verbosity(absl.logging.INFO)
```
### Download example data
We download the example dataset for use in our TFX pipeline.
The dataset we're using is the [Taxi Trips dataset](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) released by the City of Chicago. The columns in this dataset are:
<table>
<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>
<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>
<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>
<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>
<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>
<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>
</table>
With this dataset, we will build a model that predicts the `tips` of a trip.
```
_data_root = tempfile.mkdtemp(prefix='tfx-data')
DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'
_data_filepath = os.path.join(_data_root, "data.csv")
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
```
Take a quick look at the CSV file.
```
!head {_data_filepath}
```
*Disclaimer: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.*
### Create the InteractiveContext
Last, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook.
```
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext. Calls to InteractiveContext are no-ops outside of the
# notebook.
context = InteractiveContext()
```
## Run TFX components interactively
In the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts.
### ExampleGen
The `ExampleGen` component is usually at the start of a TFX pipeline. It will:
1. Split data into training and evaluation sets (by default, 2/3 training + 1/3 eval)
2. Convert data into the `tf.Example` format (learn more [here](https://www.tensorflow.org/tutorials/load_data/tfrecord))
3. Copy data into the `_tfx_root` directory for other components to access
`ExampleGen` takes as input the path to your data source. In our case, this is the `_data_root` path that contains the downloaded CSV.
Note: In this notebook, we can instantiate components one-by-one and run them with `InteractiveContext.run()`. By contrast, in a production setting, we would specify all the components upfront in a `Pipeline` to pass to the orchestrator (see the [Building a TFX Pipeline Guide](https://www.tensorflow.org/tfx/guide/build_tfx_pipeline)).
```
example_gen = tfx.components.CsvExampleGen(input_base=_data_root)
context.run(example_gen)
```
Let's examine the output artifacts of `ExampleGen`. This component produces two artifacts, training examples and evaluation examples:
```
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
```
We can also take a look at the first three training examples:
```
# Get the URI of the output artifact representing the training examples, which is a directory
train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
```
Now that `ExampleGen` has finished ingesting the data, the next step is data analysis.
### StatisticsGen
The `StatisticsGen` component computes statistics over your dataset for data analysis, as well as for use in downstream components. It uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.
`StatisticsGen` takes as input the dataset we just ingested using `ExampleGen`.
```
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
```
After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots!
```
context.show(statistics_gen.outputs['statistics'])
```
### SchemaGen
The `SchemaGen` component generates a schema based on your data statistics. (A schema defines the expected bounds, types, and properties of the features in your dataset.) It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.
Note: The generated schema is best-effort and only tries to infer basic properties of the data. It is expected that you review and modify it as needed.
`SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default.
```
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
context.run(schema_gen)
```
After `SchemaGen` finishes running, we can visualize the generated schema as a table.
```
context.show(schema_gen.outputs['schema'])
```
Each feature in your dataset shows up as a row in the schema table, alongside its properties. The schema also captures all the values that a categorical feature takes on, denoted as its domain.
To learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen).
### ExampleValidator
The `ExampleValidator` component detects anomalies in your data, based on the expectations defined by the schema. It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.
`ExampleValidator` will take as input the statistics from `StatisticsGen`, and the schema from `SchemaGen`.
```
example_validator = tfx.components.ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
```
After `ExampleValidator` finishes running, we can visualize the anomalies as a table.
```
context.show(example_validator.outputs['anomalies'])
```
In the anomalies table, we can see that there are no anomalies. This is what we'd expect, since this the first dataset that we've analyzed and the schema is tailored to it. You should review this schema -- anything unexpected means an anomaly in the data. Once reviewed, the schema can be used to guard future data, and anomalies produced here can be used to debug model performance, understand how your data evolves over time, and identify data errors.
### Transform
The `Transform` component performs feature engineering for both training and serving. It uses the [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/get_started) library.
`Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code.
Let's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)). First, we define a few constants for feature engineering:
Note: The `%%writefile` cell magic will save the contents of the cell as a `.py` file on disk. This allows the `Transform` component to load your code as a module.
```
_taxi_constants_module_file = 'taxi_constants.py'
%%writefile {_taxi_constants_module_file}
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
```
Next, we write a `preprocessing_fn` that takes in raw data as input, and returns transformed features that our model can train on:
```
_taxi_transform_module_file = 'taxi_transform.py'
%%writefile {_taxi_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
import taxi_constants
_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = taxi_constants.VOCAB_SIZE
_OOV_SIZE = taxi_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS
_FARE_KEY = taxi_constants.FARE_KEY
_LABEL_KEY = taxi_constants.LABEL_KEY
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[key] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[key] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[key] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[key] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_LABEL_KEY] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
```
Now, we pass in this feature engineering code to the `Transform` component and run it to transform your data.
```
transform = tfx.components.Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_taxi_transform_module_file))
context.run(transform)
```
Let's examine the output artifacts of `Transform`. This component produces two types of outputs:
* `transform_graph` is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).
* `transformed_examples` represents the preprocessed training and evaluation data.
```
transform.outputs
```
Take a peek at the `transform_graph` artifact. It points to a directory containing three subdirectories.
```
train_uri = transform.outputs['transform_graph'].get()[0].uri
os.listdir(train_uri)
```
The `transformed_metadata` subdirectory contains the schema of the preprocessed data. The `transform_fn` subdirectory contains the actual preprocessing graph. The `metadata` subdirectory contains the schema of the original data.
We can also take a look at the first three transformed examples:
```
# Get the URI of the output artifact representing the transformed examples, which is a directory
train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
```
After the `Transform` component has transformed your data into features, and the next step is to train a model.
### Trainer
The `Trainer` component will train a model that you define in TensorFlow. Default Trainer support Estimator API, to use Keras API, you need to specify [Generic Trainer](https://github.com/tensorflow/community/blob/master/rfcs/20200117-tfx-generic-trainer.md) by setup `custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor)` in Trainer's contructor.
`Trainer` takes as input the schema from `SchemaGen`, the transformed data and graph from `Transform`, training parameters, as well as a module that contains user-defined model code.
Let's see an example of user-defined model code below (for an introduction to the TensorFlow Keras APIs, [see the tutorial](https://www.tensorflow.org/guide/keras)):
```
_taxi_trainer_module_file = 'taxi_trainer.py'
%%writefile {_taxi_trainer_module_file}
from typing import List, Text
import os
import absl
import datetime
import tensorflow as tf
import tensorflow_transform as tft
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
import taxi_constants
_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = taxi_constants.VOCAB_SIZE
_OOV_SIZE = taxi_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS
_MAX_CATEGORICAL_FEATURE_VALUES = taxi_constants.MAX_CATEGORICAL_FEATURE_VALUES
_LABEL_KEY = taxi_constants.LABEL_KEY
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: tfx.components.DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_LABEL_KEY),
tf_transform_output.transformed_metadata.schema)
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _DENSE_FLOAT_FEATURE_KEYS
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _VOCAB_FEATURE_KEYS
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _BUCKET_FEATURE_KEYS
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_CATEGORICAL_FEATURE_KEYS,
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build a simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _DENSE_FLOAT_FEATURE_KEYS
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _VOCAB_FEATURE_KEYS
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _BUCKET_FEATURE_KEYS
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _CATEGORICAL_FEATURE_KEYS
})
# TODO(b/161952382): Replace with Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(1)(
tf.keras.layers.concatenate([deep, wide]))
model = tf.keras.Model(input_layers, output)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
```
Now, we pass in this model code to the `Trainer` component and run it to train the model.
```
trainer = tfx.components.Trainer(
module_file=os.path.abspath(_taxi_trainer_module_file),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=tfx.proto.TrainArgs(num_steps=10000),
eval_args=tfx.proto.EvalArgs(num_steps=5000))
context.run(trainer)
```
#### Analyze Training with TensorBoard
Take a peek at the trainer artifact. It points to a directory containing the model subdirectories.
```
model_artifact_dir = trainer.outputs['model'].get()[0].uri
pp.pprint(os.listdir(model_artifact_dir))
model_dir = os.path.join(model_artifact_dir, 'Format-Serving')
pp.pprint(os.listdir(model_dir))
```
Optionally, we can connect TensorBoard to the Trainer to analyze our model's training curves.
```
model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri
%load_ext tensorboard
%tensorboard --logdir {model_run_artifact_dir}
```
### Evaluator
The `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library. The `Evaluator` can also optionally validate that a newly trained model is better than the previous model. This is useful in a production pipeline setting where you may automatically train and validate a model every day. In this notebook, we only train one model, so the `Evaluator` automatically will label the model as "good".
`Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values (e.g. how does your model perform on taxi trips that start at 8am versus 8pm?). See an example of this configuration below:
```
eval_config = tfma.EvalConfig(
model_specs=[
# This assumes a serving model with signature 'serving_default'. If
# using estimator based EvalSavedModel, add signature_name: 'eval' and
# remove the label_key.
tfma.ModelSpec(
signature_name='serving_default',
label_key='tips'
)
],
metrics_specs=[
tfma.MetricsSpec(
# The metrics added here are in addition to those saved with the
# model (assuming either a keras model or EvalSavedModel is used).
# Any metrics added into the saved model (for example using
# model.compile(..., metrics=[...]), etc) will be computed
# automatically.
# To add validation thresholds for metrics saved with the model,
# add them keyed by metric name to the thresholds map.
metrics=[
tfma.MetricConfig(class_name='ExampleCount'),
tfma.MetricConfig(class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
]
)
],
slicing_specs=[
# An empty slice spec means the overall slice, i.e. the whole dataset.
tfma.SlicingSpec(),
# Data can be sliced along a feature column. In this case, data is
# sliced along feature column trip_start_hour.
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
])
```
Next, we give this configuration to `Evaluator` and run it.
```
# Use TFMA to compute a evaluation statistics over features of a model and
# validate them against a baseline.
# The model resolver is only required if performing model validation in addition
# to evaluation. In this case we validate against the latest blessed model. If
# no model has been blessed before (as in this case) the evaluator will make our
# candidate the first blessed model.
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
context.run(model_resolver)
evaluator = tfx.components.Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
context.run(evaluator)
```
Now let's examine the output artifacts of `Evaluator`.
```
evaluator.outputs
```
Using the `evaluation` output we can show the default visualization of global metrics on the entire evaluation set.
```
context.show(evaluator.outputs['evaluation'])
```
To see the visualization for sliced evaluation metrics, we can directly call the TensorFlow Model Analysis library.
```
import tensorflow_model_analysis as tfma
# Get the TFMA output result path and load the result.
PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri
tfma_result = tfma.load_eval_result(PATH_TO_RESULT)
# Show data sliced along feature column trip_start_hour.
tfma.view.render_slicing_metrics(
tfma_result, slicing_column='trip_start_hour')
```
This visualization shows the same metrics, but computed at every feature value of `trip_start_hour` instead of on the entire evaluation set.
TensorFlow Model Analysis supports many other visualizations, such as Fairness Indicators and plotting a time series of model performance. To learn more, see [the tutorial](https://www.tensorflow.org/tfx/tutorials/model_analysis/tfma_basic).
Since we added thresholds to our config, validation output is also available. The precence of a `blessing` artifact indicates that our model passed validation. Since this is the first validation being performed the candidate is automatically blessed.
```
blessing_uri = evaluator.outputs['blessing'].get()[0].uri
!ls -l {blessing_uri}
```
Now can also verify the success by loading the validation result record:
```
PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri
print(tfma.load_validation_result(PATH_TO_RESULT))
```
### Pusher
The `Pusher` component is usually at the end of a TFX pipeline. It checks whether a model has passed validation, and if so, exports the model to `_serving_model_dir`.
```
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
context.run(pusher)
```
Let's examine the output artifacts of `Pusher`.
```
pusher.outputs
```
In particular, the Pusher will export your model in the SavedModel format, which looks like this:
```
push_uri = pusher.outputs['pushed_model'].get()[0].uri
model = tf.saved_model.load(push_uri)
for item in model.signatures.items():
pp.pprint(item)
```
We're finished our tour of built-in TFX components!
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Mandelbrot set
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/tutorials/non-ml/mandelbrot.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/non-ml/mandelbrot.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
Visualizing the [Mandelbrot set](https://en.wikipedia.org/wiki/Mandelbrot_set) doesn't have anything to do with machine learning, but it makes for a fun example of how one can use TensorFlow for general mathematics. This is actually a pretty naive implementation of the visualization, but it makes the point. (We may end up providing a more elaborate implementation down the line to produce more truly beautiful images.)
## Basic setup
You'll need a few imports to get started.
```
from __future__ import absolute_import, division, print_function, unicode_literals
# Import libraries for simulation
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
import numpy as np
# Imports for visualization
import PIL.Image
from io import BytesIO
from IPython.display import clear_output, Image, display
```
Now you'll define a function to actually display the image once you have iteration counts.
```
def DisplayFractal(a, fmt='jpeg'):
"""Display an array of iteration counts as a
colorful picture of a fractal."""
a_cyclic = (6.28*a/20.0).reshape(list(a.shape)+[1])
img = np.concatenate([10+20*np.cos(a_cyclic),
30+50*np.sin(a_cyclic),
155-80*np.cos(a_cyclic)], 2)
img[a==a.max()] = 0
a = img
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
```
# Session and variable initialization
For playing around like this, an interactive session is often used, but a regular session would work as well.
```
sess = tf.InteractiveSession()
```
It's handy that you can freely mix NumPy and TensorFlow.
```
# Use NumPy to create a 2D array of complex numbers
Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
Z = X+1j*Y
```
Now you define and initialize TensorFlow tensors.
```
xs = tf.constant(Z.astype(np.complex64))
zs = tf.Variable(xs)
ns = tf.Variable(tf.zeros_like(xs, tf.float32))
```
TensorFlow requires that you explicitly initialize variables before using them.
```
tf.global_variables_initializer().run()
```
# Defining and running the computation
Now you specify more of the computation...
```
# Compute the new values of z: z^2 + x
zs_ = zs*zs + xs
# Have we diverged with this new value?
not_diverged = tf.abs(zs_) < 4
# Operation to update the zs and the iteration count.
#
# Note: We keep computing zs after they diverge! This
# is very wasteful! There are better, if a little
# less simple, ways to do this.
#
step = tf.group(
zs.assign(zs_),
ns.assign_add(tf.cast(not_diverged, tf.float32))
)
```
... and run it for a couple hundred steps
```
for i in range(200): step.run()
```
Let's see what you've got.
```
DisplayFractal(ns.eval())
```
Not bad!
| github_jupyter |
# Finn's original architecture (with ReLU by default)
## DNA model
| Epoch | Loss |
|-------|------------|
| 0 | 0.00885869 |
| 1 | 0.00435413 |
| 2 | 0.00305164 |
| 3 | 0.00242613 |
| 4 | 0.0022955 |
| 5 | 0.00239393 |
| 6 | 0.00235859 |
| 7 | 0.00217946 |
| 8 | 0.00212445 |
| 9 | 0.00261186 |
| 10 | 0.00179996 |
| 11 | 0.00207464 |
| 12 | 0.00266546 |
| 13 | 0.0020954 |
| 14 | 0.0018548 |
| 15 | 0.00192524 |
| 16 | 0.0021378 |
| 17 | 0.00173302 |
| 18 | 0.00217417 |
| 19 | 0.00203546 |
| 20 | 0.00244354 |
| 21 | 0.00195195 |
| 22 | 0.002178 |
| 23 | 0.00172652 |
| 24 | 0.00152009 |
| 25 | 0.0018982 |
| 26 | 0.0021023 |
| 27 | 0.00209651 |
| 28 | 0.00174374 |
| 29 | 0.00183438 |
| 30 | 0.00209399 |
## STP Model
| Epoch | Loss |
|-------|------------|
| 0 | 0.160026 |
| 1 | 0.163783 |
| 2 | 0.0813082 |
| 3 | 0.0431279 |
| 4 | 0.0255204 |
| 5 | 0.0157737 |
| 6 | 0.0104776 |
| 7 | 0.00837719 |
| 8 | 0.00575892 |
| 9 | 0.00472899 |
| 10 | 0.0038674 |
| 11 | 0.00316728 |
| 12 | 0.00318339 |
| 13 | 0.00281497 |
| 14 | 0.00257577 |
| 15 | 0.00261247 |
| 16 | 0.00334366 |
| 17 | 0.00215146 |
| 18 | 0.00241919 |
| 19 | 0.00301727 |
| 20 | 0.00247901 |
| 21 | 0.00256165 |
| 22 | 0.00222308 |
# CDNA Model
| Epoch | Loss |
|-------|------------|
| 0 | 0.0157513 |
| 1 | 0.00734751 |
| 2 | 0.00380376 |
| 3 | 0.00280797 |
| 4 | 0.00220003 |
| 5 | 0.00229634 |
| 6 | 0.0021313 |
| 7 | 0.00219271 |
| 8 | 0.00230658 |
| 9 | 0.00201319 |
| 10 | 0.00209318 |
| 11 | 0.00226459 |
| 12 | 0.00201937 |
| 13 | 0.00200107 |
| 14 | 0.00181302 |
| 15 | 0.00218307 |
| 16 | 0.00174426 |
| 17 | 0.00147213 |
| 18 | 0.00186553 |
| 19 | 0.0019825 |
| 20 | 0.00220671 |
# Layers activation
The layers activation were generated at epoch 50 for the four convolutions and the three deconvolutions:

# Finn's original architecture (without ReLU)
## DNA Model
| Epoch | Loss |
|-------|------------|
| 0 | 0.00841984 |
| 1 | 0.0031973 |
| 2 | 0.00216421 |
| 3 | 0.00214872 |
| 4 | 0.00167969 |
| 5 | 0.00169004 |
| 6 | 0.00189414 |
| 7 | 0.00192797 |
| 8 | 0.00173829 |
| 9 | 0.00150361 |
| 10 | 0.00218793 |
| 11 | 0.00186441 |
| 12 | 0.00172768 |
| 13 | 0.00181104 |
| 14 | 0.00205145 |
| 15 | 0.00207403 |
| 16 | 0.00199683 |
| 17 | 0.00194295 |
| 18 | 0.00222396 |
| 19 | 0.00202953 |
| 20 | 0.0023129 |
## CDNA Model
| Epoch | Loss |
|-------|------------|
| 0 | 0.0152586 |
| 1 | 0.00721063 |
| 2 | 0.00273944 |
| 3 | 0.00176518 |
| 4 | 0.00226083 |
| 5 | 0.00203949 |
| 6 | 0.00194076 |
| 7 | 0.00178836 |
| 8 | 0.0023125 |
| 9 | 0.00232345 |
| 10 | 0.00183922 |
| 11 | 0.00182784 |
| 12 | 0.00229121 |
| 13 | 0.00195413 |
| 14 | 0.00184336 |
| 15 | 0.00206291 |
| 16 | 0.00188789 |
| 17 | 0.0024183 |
| 18 | 0.00234721 |
| 19 | 0.00204554 |
| 20 | 0.00162369 |
## STP Model
| Epoch | Loss |
|-------|------------|
| 0 | 0.174032 |
| 1 | 0.144849 |
| 2 | 0.0617461 |
| 3 | 0.0286316 |
| 4 | 0.0138448 |
| 5 | 0.00981063 |
| 6 | 0.00871292 |
| 7 | 0.00855063 |
| 8 | 0.00879612 |
| 9 | 0.0070808 |
| 10 | 0.00722346 |
| 11 | 0.0071705 |
| 12 | 0.0063866 |
| 13 | 0.0062518 |
| 14 | 0.00600738 |
| 15 | 0.00571284 |
| 16 | 0.00499165 |
| 17 | 0.00478733 |
| 18 | 0.00449591 |
| 19 | 0.00435347 |
| 20 | 0.00405586 |
| github_jupyter |
<a href="https://colab.research.google.com/github/tcardlab/optimus_bind_sample/blob/develop/notebooks/3_0_TJC_Cleaning_Code_While_No_Testing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**Open in Colab, gets cut off on github**
I'm reviewing and cleaning up SKEMPItoPandas and found 2 quirks.
- Skempi_df['Temperature'][6665:9]=''
for whatever reason, these blanks did not default to 298.0 and are nan
- Unfortunately, using df.fillna to patch those 4 values introduce changes. All values of temperature test as equal, but in the calculation the results are different. I am uncertain which output you would consider correct.
**Original values vs df.fillna(298)**
```
ddgMedian
expected (tmp was nan in original):
6665 4G0N_A_B [3.2996884744981614 vs 2.9170490380241167]
6666 1C1Y_A_B [nan vs 2.619152014020517]
6667 1LFD_A_B [3.8958503047001924 vs 3.3114853574091185]
6668 1LFD_A_B [nan vs -0.21121814832220487]
unexpected(same pdb #'s):
6472 1LFD_A_B [3.8958503047001924 vs 3.3114853574091185] 310.0
6493 4G0N_A_B [3.2996884744981614 vs 2.9170490380241167] 308.0
```
is it possible that changing nan to default affected the median calculation
```
df.groupby(...)['ddG'].transform('median')
```
Thus creating two unexpected outputs with similar pdb#'s?
The worst-case scenario is we use df.dropna for temperature too. I have tested it and it produces consistent results.
```
import pandas as pd
import numpy as np
import re
link = 'https://life.bsc.es/pid/skempi2/database/download/skempi_v2.csv'
'''Proper python retreival'''
#from urllib.request import urlretrieve
#csv_path, _ = urlretrieve(link,f'skempi_v2.0.csv')
'''Direct import to Pandas'''
#data = pd.read_csv(link, sep=';')
#print(data)
'''OS get'''
!wget $link -O skempi_v2.0.csv #-O to rename
```
#original
```
def SKEMPItoPandas(SKEMPI_loc):
'''
Purpose:
1. Loads SKEMPI CSV file.
2. Calculates ddG
3. For multiple measurements, keeps the median value
4. Eliminates entries with mutations on both sides of the interface
Input:
SKEMPI_loc : Location of SKEMPI CSV file
Output:
SKEMPI_df : Pandas dataframe
'''
# fix this
pd.options.mode.chained_assignment = None # default='warn'
# Constants
R = 1.9872036e-3 # Ideal Gas Constant in kcal
SKEMPI_df = pd.read_csv(SKEMPI_loc, sep=';')
# Convert non numeric temperature comments to numeric values. Default is 298K
ConvertTemp = lambda x: int(re.search(r'\d+', x).group(0) or 298)
BadTemps = SKEMPI_df.Temperature.str.isnumeric() == 0
SKEMPI_df['Temperature'].loc[BadTemps] = SKEMPI_df['Temperature'].loc[BadTemps].map(ConvertTemp)
SKEMPI_df['Temperature'] = pd.to_numeric(SKEMPI_df['Temperature'], errors='coerce')
# Drop missing values
#SKEMPI_df.dropna(subset=['Temperature'], inplace=True)
SKEMPI_df.dropna(subset=['Affinity_wt_parsed'], inplace=True)
SKEMPI_df.dropna(subset=['Affinity_mut_parsed'], inplace=True)
# Calculate free energies
SKEMPI_df['dgWT'] = -R*SKEMPI_df['Temperature']*np.log(SKEMPI_df['Affinity_wt_parsed'])
SKEMPI_df['dgMut'] = -R*SKEMPI_df['Temperature']*np.log(SKEMPI_df['Affinity_mut_parsed'])
SKEMPI_df['ddG'] = SKEMPI_df['dgWT']-SKEMPI_df['dgMut']
# Create a key for unique mutations based on PDB and
SKEMPI_df['MutKey'] = SKEMPI_df['#Pdb']+'_'+SKEMPI_df['Mutation(s)_PDB']
# Replace multiple measurements of the same mutation with the group mean
# May consider grouping by experimental method as well
SKEMPI_df['ddgMedian'] = SKEMPI_df.groupby('MutKey')['ddG'].transform('median')
SKEMPI_df = SKEMPI_df.drop_duplicates(subset=['MutKey', 'Temperature'], keep='first', inplace=False)
# Flag multiple mutations in the same protein
SKEMPI_df['NumMutations'] = SKEMPI_df['Mutation(s)_PDB'].str.count(',')+1
# Extract Chains and remove cross chain mutations. Chain is the second position in the mutation code
SKEMPI_df['Prot1Chain'] = SKEMPI_df['#Pdb'].str.split('_').str[1]
SKEMPI_df['Prot2Chain'] = SKEMPI_df['#Pdb'].str.split('_').str[2]
SKEMPI_df['MutSplit'] = SKEMPI_df['Mutation(s)_PDB'].str.split(',')
def ChainCheck(df):
if df['NumMutations'] == 1:
CrossChain = False
return CrossChain
else:
Chain = df['MutSplit'][0][1]
if Chain in df['Prot1Chain']:
ChainSet = df['Prot1Chain']
elif Chain in df['Prot2Chain']:
ChainSet = df['Prot2Chain']
for i in range(len(df['MutSplit'])):
Chain = df['MutSplit'][i][1]
if Chain in ChainSet:
CrossChain = False
else:
CrossChain = True
break
return CrossChain
SKEMPI_df['CrossChain'] = SKEMPI_df.apply(ChainCheck, axis=1)
SKEMPI_SingleSided = SKEMPI_df[SKEMPI_df.CrossChain == False]
NumProteins = SKEMPI_SingleSided['#Pdb'].nunique()
NumMutations = SKEMPI_SingleSided['#Pdb'].count()
print("There are %s unique single sided mutations in %s proteins" % (NumMutations, NumProteins))
return SKEMPI_SingleSided
og_output = SKEMPItoPandas('skempi_v2.0.csv')
```
#Changed
##v1.0
```
def ChainCheck(df):
if df['NumMutations'] == 1:
CrossChain = False
return CrossChain
else:
Chain = df['MutSplit'][0][1]
if Chain in df['Prot1Chain']:
ChainSet = df['Prot1Chain']
elif Chain in df['Prot2Chain']:
ChainSet = df['Prot2Chain']
for i in range(len(df['MutSplit'])):
Chain = df['MutSplit'][i][1]
if Chain in ChainSet:
CrossChain = False
else:
CrossChain = True
break
return CrossChain
def gibbsEq(Kd, tmp):
R = 1.9872036e-3 # Ideal Gas Constant in kcal
ΔG = -R * tmp * np.log(Kd) #log is ln in np
return ΔG
def SKEMPItoPandas1(SKEMPI_loc):
'''
Purpose:
1. Loads SKEMPI CSV file.
2. Calculates ddG
3. For multiple measurements, keeps the median value
4. Eliminates entries with mutations on both sides of the interface
Input:
SKEMPI_loc : Location of SKEMPI CSV file
Output:
SKEMPI_df : Pandas dataframe
'''
SKEMPI_df = pd.read_csv(SKEMPI_loc, sep=';')
# Convert non numeric temperature comments to numeric values.
# Default is 298K
SKEMPI_df['Temperature'] = SKEMPI_df['Temperature'].str.extract(r'(\d+)')
SKEMPI_df['Temperature'] = pd.to_numeric(SKEMPI_df['Temperature'],
errors='coerce')
SKEMPI_df['Temperature'].fillna(298, inplace=True)
# Drop missing values
#SKEMPI_df.dropna(subset=['Temperature'], inplace=True)
SKEMPI_df.dropna(subset=['Affinity_wt_parsed'], inplace=True)
SKEMPI_df.dropna(subset=['Affinity_mut_parsed'], inplace=True)
# Calculate free energies
SKEMPI_df['dgWT'] = gibbsEq(SKEMPI_df['Affinity_wt_parsed'],
SKEMPI_df['Temperature'])
SKEMPI_df['dgMut'] = gibbsEq(SKEMPI_df['Affinity_mut_parsed'],
SKEMPI_df['Temperature'])
SKEMPI_df['ddG'] = SKEMPI_df['dgWT']-SKEMPI_df['dgMut']
# Create a key for unique mutations based on PDB and
SKEMPI_df['MutKey'] = SKEMPI_df['#Pdb']+'_'+SKEMPI_df['Mutation(s)_PDB']
# Replace multiple measurements of the same mutation with the group mean
# May consider grouping by experimental method as well
SKEMPI_df['ddgMedian'] = SKEMPI_df.groupby('MutKey')['ddG'].transform('median')
SKEMPI_df = SKEMPI_df.drop_duplicates(subset=['MutKey', 'Temperature'],
keep='first', inplace=False)
# Flag multiple mutations in the same protein
SKEMPI_df['MutSplit'] = SKEMPI_df['Mutation(s)_PDB'].str.split(',')
SKEMPI_df['NumMutations'] = SKEMPI_df['MutSplit'].apply(len)
# Extract Chains and remove cross chain mutations.
# Chain is the second position in the mutation code
SKEMPI_df['Prot1Chain'] = SKEMPI_df['#Pdb'].str.split('_').str[1]
SKEMPI_df['Prot2Chain'] = SKEMPI_df['#Pdb'].str.split('_').str[2]
SKEMPI_df['CrossChain'] = SKEMPI_df.apply(ChainCheck, axis=1)
SKEMPI_SingleSided = SKEMPI_df[SKEMPI_df.CrossChain == False]
NumProteins = SKEMPI_SingleSided['#Pdb'].nunique()
NumMutations = SKEMPI_SingleSided['#Pdb'].count()
print("There are %s unique single sided mutations in %s proteins" % (NumMutations, NumProteins))
return SKEMPI_SingleSided
new_output = SKEMPItoPandas1('skempi_v2.0.csv')
new_output
```
##v1.1 as class?
###Base Case
https://www.kaggle.com/vinceniko/custom-pandas-subclass
```
import pandas as pd
class Pandas_Subclass(pd.DataFrame):
"""
A way to create a Pandas subclass which initializes from another Pandas object without passing it into the class constructor.
Allows complete overwriting of parent class constructor.
Allows custom methods to be added onto Pandas objects (which can be created within the constructer itself).
Ie. pass in a file_path to the class constructor which then calls pd.read_csv within __init__ which then assigns the returned DataFrame to self.
Params:
file_path (str): file_path passed into pd.read_csv().
"""
def __init__(self, file_path):
super().__init__(pd.read_csv(file_path)) # initialize subclass from DataFrame instance
# self.__dict__.update(pd.read_csv(file_path).__dict__) # the unpythonic way to do it
def custom_method(self):
print(self) # returns .csv as Dataframe
print(type(self)) # returns <class '__main__.Pandas_Subclass'>
if __name__ == '__main__':
df = Pandas_Subclass('../input/winemag-data_first150k.csv')
df.custom_method()
```
###Initial Attempt
```
def ChainCheck(df):
'''
No idea what is happening here, too many if's to think about atm...
'''
if df['NumMutations'] == 1:
crossChain = False
return crossChain
else:
Chain = df['MutSplit'][0][1]
if Chain in df['Prot1Chain']:
ChainSet = df['Prot1Chain']
elif Chain in df['Prot2Chain']:
ChainSet = df['Prot2Chain']
for i in range(len(df['MutSplit'])):
Chain = df['MutSplit'][i][1]
if Chain in ChainSet:
crossChain = False
else:
crossChain = True
break
return crossChain
class MutantDataSet(pd.DataFrame):
'''
Subclassed Pandsas DataFrame
Not sure what to think yet....
'''
#WHY cant I get this to work?
def __init__(self, data, sep=',', index=None, columns=None, dtype=None,
copy=True,):
'''Initialize subclass from DataFrame instance.'''
#from csv
if type(data)==str:
data=pd.read_csv(data, sep=sep)
super(MutantDataSet, self).__init__(data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy)
# def __init__(self, data, index=None, columns=None, dtype=str,
# copy=True, sep=';'):
# super(MutantDataSet, self).__init__(data=pd.read_csv(data, sep=sep),
# index=index,
# columns=columns,
# dtype=dtype,
# copy=copy)
def Mutations(self, row):
'''Returns dictionary of mutation identifiers.'''
keys = ['initAA', 'chain', 'loc', 'mutAA'] # code key
mut_codes = self.loc[row]['Mutation(s)_cleaned'].split(',')
unzip_code = zip(*[re.findall('(\d+|.)', mut) for mut in mut_codes])
mut_dct = dict(zip(keys, unzip_code))
return mut_dct
def to_numeric(self, keys):
'''
converts column of single or list of keys to numeric values
'''
self[keys] = self[keys].apply(pd.to_numeric, errors='coerce')
return self[keys]
def to_numeric2(self, keys):
'''
converts column of single or list of keys to numeric values
not as good
'''
keys = [keys] if type(keys)==str else keys
for k in keys:
self[k] = pd.to_numeric(self[k], errors='coerce')
return self
def gibbsEq(self, Kd_key, tmp_key='Temperature'):
R = 1.9872036e-3 # Ideal Gas Constant in kcal
ΔG = -R * self[tmp_key] * np.log(self[Kd_key]) #log is ln in np
return ΔG
def ddG(self, wild, mutant, tmp_key='Temperature'):
self['dgWT'] = self.gibbsEq(wild, tmp_key)
self['dgMut'] = self.gibbsEq(mutant, tmp_key)
self['ddG'] = self['dgWT']-self['dgMut']
return self
def grouped_avg(self, group_keys, avg_key):
'''
DANGEROUS! not sure if median value will be returned to correct indecies
'''
averaged = self.groupby(group_keys)[avg_key].transform('median')
return averaged
def find_cross_chains(self):
self['Prot1Chain'] = self['#Pdb'].str.split('_').str[1]
self['Prot2Chain'] = self['#Pdb'].str.split('_').str[2]
crossChain = self.apply(ChainCheck, axis=1)
return crossChain
@property
def _constructor(self):
return MutantDataSet # Class Name
'''
1) clean each dataset to create consistant MutantDataSet's
1a) store indeviduals in ~/data/intermediate
2) combine into uniform MutantDataSet
2a) store in ~/data/final
'''
#1 – clean skempi
def clean_Skempi(path):
#initialize class
skempi = MutantDataSet(path, sep=';') # not working atm...
#skempi_df = pd.read_csv(path, sep=';')
#skempi = MutantDataSet(skempi_df)
# Convert non-numeric temperature comments to numeric values. Default is 298K
skempi['Temperature'] = skempi['Temperature'].str.extract(r'(\d+)')
skempi['Temperature'] = skempi.to_numeric('Temperature')
skempi['Temperature'].fillna(298, inplace=True) #6665-6668 blank
# Calculate free energies
dropna_lst = ['Affinity_wt_parsed','Affinity_mut_parsed']
skempi.dropna(subset=dropna_lst, inplace=True)
skempi = skempi.ddG('Affinity_wt_parsed', 'Affinity_mut_parsed')
#Average and duplicate ddG/tmp values
group_keys = ['#Pdb', 'Mutation(s)_PDB']
skempi['ddG'] = skempi.grouped_avg(group_keys, 'ddG')
skempi = skempi.drop_duplicates(subset=[*group_keys,'Temperature'],
keep='first', inplace=False)
# Flag multiple mutations in the same protein
skempi['MutSplit'] = skempi['Mutation(s)_PDB'].str.split(',')
skempi['NumMutations'] = skempi['MutSplit'].apply(len)
# Extract Chains and remove cross chain mutations.
skempi['CrossChain'] = skempi.find_cross_chains()
SKEMPI_SingleSided = skempi[skempi.CrossChain == False]
return SKEMPI_SingleSided
#import os
#path=os.path.abspath('skempi_v2.0.csv')
skempi_final = clean_Skempi(path)
#skempi_final = clean_Skempi('skempi_v2.0.csv')
NumProteins = skempi_final['#Pdb'].nunique()
NumMutations = skempi_final['#Pdb'].count()
print("There are %s unique single sided mutations in %s proteins" %
(NumMutations, NumProteins))
#1a – store skempi in ~/data/intermediate
#1 – clean Other
#other = MutantDataSet('other.csv')
#1a – store Other in ~/data/intermediate
#2 – combine
#2a – store in ~/data/final
```
####tests(ignore)
```
#skempi = MutantDataSet('skempi_v2.0.csv', sep=';')
#skempi.to_numeric()
skempi_df = pd.read_csv('skempi_v2.0.csv', sep=';')
skempi = MutantDataSet(skempi_df)
'''To numeric test'''
#test1=skempi
#skempi=skempi.to_numeric("Temperature")
#test1=skempi.to_numeric2("Temperature")
#print(skempi.equals(test1))
'''Drop multiple at the same time test.'''
test2=skempi
skempi.dropna(subset=['Affinity_wt_parsed'], inplace=True)
skempi.dropna(subset=['Affinity_mut_parsed'], inplace=True)
test2.dropna(subset=['Affinity_wt_parsed','Affinity_mut_parsed'], inplace=True)
print(skempi.equals(test2))
'''Calculate free energies'''
#skempi['dgWT'] = gibbsEq(skempi, 'Affinity_wt_parsed', 'Temperature')
#skempi['dgMut'] = skempi.gibbsEq('Affinity_mut_parsed', 'Temperature')
#SKEMPI_df['dgWT'] = gibbsEq(SKEMPI_df['Affinity_wt_parsed'], SKEMPI_df['Temperature'])
```
###Final
for unknown reasons
```skempi['Temperature'].fillna(value=298, inplace=True)```
introduces knew changes outside on nan values
it may be reasonable to add **Temperature** to the **dropna_lst**
```
def ChainCheck(df):
'''
No idea what is happening here, too many if's to think about atm...
should be method, but not working... i'figure it out later
'''
if df['NumMutations'] == 1:
crossChain = False
return crossChain
else:
Chain = df['MutSplit'][0][1]
if Chain in df['Prot1Chain']:
ChainSet = df['Prot1Chain']
elif Chain in df['Prot2Chain']:
ChainSet = df['Prot2Chain']
for i in range(len(df['MutSplit'])):
Chain = df['MutSplit'][i][1]
if Chain in ChainSet:
crossChain = False
else:
crossChain = True
break
return crossChain
class MutantDataSet(pd.DataFrame):
'''
Subclassed Pandsas DataFrame
Not sure what to think yet....
'''
def __init__(self, data, sep=',', index=None, columns=None, dtype=None,
copy=True,):
'''Initialize subclass from DataFrame instance or csv path.'''
if type(data)==str:
data=pd.read_csv(data, sep=sep)
super(MutantDataSet, self).__init__(data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy)
def Mutations(self, row):
'''Returns dictionary of mutation identifiers.'''
keys = ['initAA', 'chain', 'loc', 'mutAA'] # code key
mut_codes = self.loc[row]['Mutation(s)_cleaned'].split(',')
unzip_code = zip(*[re.findall('(\d+|.)', mut) for mut in mut_codes])
mut_dct = dict(zip(keys, unzip_code))
return mut_dct
def to_numeric(self, keys):
'''
converts column of single or list of keys to numeric values
'''
self[keys] = self[keys].apply(pd.to_numeric, errors='coerce')
return self[keys]
def gibbsEq(self, Kd_key, tmp_key='Temperature'):
R = 1.9872036e-3 # Ideal Gas Constant in kcal
ΔG = -R * self[tmp_key] * np.log(self[Kd_key]) #log is ln in np
return ΔG
def solve_ddG(self, wild, mutant, tmp_key='Temperature'):
self['dgWT'] = self.gibbsEq(wild, tmp_key)
self['dgMut'] = self.gibbsEq(mutant, tmp_key)
self['ddG'] = self['dgWT']-self['dgMut']
return self
def grouped_avg(self, group_keys, avg_key):
'''
rename to grouped_med...
'''
averaged = self.groupby(group_keys)[avg_key].transform('median')
return averaged # returns series
def find_cross_chains(self):
self['Prot1Chain'] = self['#Pdb'].str.split('_').str[1]
self['Prot2Chain'] = self['#Pdb'].str.split('_').str[2]
crossChain = self.apply(ChainCheck, axis=1)
return crossChain
@property
def _constructor(self):
return MutantDataSet # Class Name
'''
1) clean each dataset to create consistant MutantDataSet's
1a) store indeviduals in ~/data/intermediate
2) combine into uniform MutantDataSet
2a) store in ~/data/final
'''
#1 – clean skempi
def clean_Skempi(path):
# Initialize class
skempi = MutantDataSet(path, sep=';')
# Convert non-numeric temperature comments to numeric values. Default is 298K
skempi['Temperature'] = skempi['Temperature'].str.extract(r'(\d+)')
skempi['Temperature'] = skempi.to_numeric('Temperature')
skempi['Temperature'].fillna(value=298, inplace=True) #6665-6668 blank ### TOGGLE ME ###
# Calculate free energies
dropna_lst = ['Affinity_wt_parsed','Affinity_mut_parsed'] #, 'Temperature']
skempi.dropna(subset=dropna_lst, inplace=True)
skempi = skempi.solve_ddG('Affinity_wt_parsed', 'Affinity_mut_parsed')
# Median and duplicate ddG/tmp values
group_keys = ['#Pdb', 'Mutation(s)_PDB']
skempi['ddgMedian'] = skempi.groupby(group_keys)['ddG'].transform('median')
#skempi['ddgMedian'] = skempi.grouped_avg(group_keys, 'ddG')
skempi = skempi.drop_duplicates(subset=[*group_keys,'Temperature'],
keep='first', inplace=False)
# Flag multiple mutations in the same protein
skempi['MutSplit'] = skempi['Mutation(s)_PDB'].str.split(',')
skempi['NumMutations'] = skempi['MutSplit'].apply(len)
# Extract Chains and remove cross chain mutations.
skempi['CrossChain'] = skempi.find_cross_chains()
SKEMPI_SingleSided = skempi[skempi.CrossChain == False]
return SKEMPI_SingleSided
skempi_final = clean_Skempi('skempi_v2.0.csv')
NumProteins = skempi_final['#Pdb'].nunique()
NumMutations = skempi_final['#Pdb'].count()
print("There are %s unique single sided mutations in %s proteins" %
(NumMutations, NumProteins))
#1a – store skempi in ~/data/intermediate
##skempi_final.to_csv('~/data/intermediate')
#1 – clean Other
#other = MutantDataSet('other.csv')
#1a – store Other in ~/data/intermediate
#2 – combine
#2a – store in ~/data/final
#skempi_final[['#Pdb', 'Mutation(s)_PDB', 'Mutation(s)_cleaned']]
#skempi_final.get(['#Pdb', 'Mutation(s)_PDB', 'Mutation(s)_cleaned'])
#skempi_final
#skempi_final['ddG']
skempi_final.ddG
```
##Alternative
An alternative is to keep mut-class seperate from initial dataframe. then transfer only required columns
I change my mind. its better to just index a list of cols from the output.
#Final Comparison
Only difference was the recently discovered bug. All checks out!
(their index is different due to deletions from the original dataframe)
```
keylst = ['#Pdb',
'Mutation(s)_PDB',
'Mutation(s)_cleaned',
'iMutation_Location(s)',
'Affinity_mut_parsed',
'Affinity_wt_parsed',
'Reference',
'Protein 1',
'Protein 2',
'Temperature',
'dgWT',
'dgMut',
'ddG',
'ddgMedian',
'MutSplit',
'NumMutations',
'Prot1Chain',
'Prot2Chain',
'CrossChain']
path = 'skempi_v2.0.csv'
print('Print Test')
#OG
OG = SKEMPItoPandas(path).get(keylst)
#v1.0
v1 = SKEMPItoPandas1(path).get(keylst)
#v1.1Final
v2 = clean_Skempi(path).get(keylst)
NumProteins = v2['#Pdb'].nunique()
NumMutations = v2['#Pdb'].count()
print("There are %s unique single sided mutations in %s proteins\n" %
(NumMutations, NumProteins))
print('Equivalency Test')
print('\tToggle df[tmp].fillna(298) in v1.1 to switch equivalence with OG & v1')
print('OG==v1?\n\t',OG.equals(v1), '– failure due to nan temp bug')
print('OG==v1.1?\n\t',OG.equals(v2), '– no mutKey, no nan tmp')
print('v1==v1.1?\n\t',v1.equals(v2), '– may fail as mutkey DNE in v1.1\n')
print('Test index equivalence')
print(OG.index.equals(v1.index))
print(OG.index.equals(v2.index))
print(v1.index.equals(v2.index))
print()
print('Find Differences')
for i in OG.index.values:
a0, a1, a2 = OG['ddgMedian'][i], v1['ddgMedian'][i], v2['ddgMedian'][i]
if a0!=a1 or a0!=a2: # or a1!=a2:
print(i, a0, a1, a2)
print('\t', OG['#Pdb'][i], v1['#Pdb'][i], v2['#Pdb'][i])
print('\t', OG.Temperature[i], v1.Temperature[i], v2.Temperature[i])
#x=OG
#print('\t', f"-R*{x['Temperature'][i]}*np.log({x['Affinity_wt_parsed'][i]})")
#print('\t', f"-R*{x['Temperature'][i]}*np.log({x['Affinity_mut_parsed'][i]})")
R = 1.9872036e-3
a = -R*298.0*np.log(4.4e-09)
b = -R*298.0*np.log(3e-11)
a-b
#per col,item comparison
init = OG
compare = v2
keys =['#Pdb',
'Mutation(s)_PDB',
'Mutation(s)_cleaned',
'iMutation_Location(s)',
'Affinity_mut_parsed',
'Affinity_wt_parsed',
'Reference',
'Protein 1',
'Protein 2',
'Temperature',
'dgWT',
'dgMut',
'ddG',
'ddgMedian',
'MutSplit',
'NumMutations',
'Prot1Chain',
'Prot2Chain',
'CrossChain']
for col in keys: #list(init) #nan has weird bool behavior, ignoring bad columns
try:
print(col)
for i, val1, val2 in zip(init.index, init[col], compare[col]):
if val1 != val2:
print(i, val1, val2, init['#Pdb'][i], compare['#Pdb'][i])
except:
pass
```
#Work
##Temp formatting
```
#initialize dataframes
SKEMPI_df = pd.read_csv('skempi_v2.0.csv', sep=';')
test = SKEMPI_df.copy()
print('following entry has nan tmp')
print(np.array(SKEMPI_df.iloc[[6665]]), '\n')
'''Origional method'''
# Convert non numeric temperature comments to numeric values. Default is 298K
ConvertTemp = lambda x: int(re.search(r'\d+', x)[0] or 298)
BadTemps = SKEMPI_df.Temperature.str.isnumeric() == False
print("nan val tests 'False'? map not applied, thus unaltered")
print(BadTemps.iloc[[6665]], '\n')
SKEMPI_df['Temperature'].loc[BadTemps] = SKEMPI_df['Temperature'].loc[BadTemps].map(ConvertTemp)
#SKEMPI_df['Temperature'] = SKEMPI_df['Temperature'].apply(ConvertTemp)
SKEMPI_df['Temperature'] = pd.to_numeric(SKEMPI_df['Temperature'], errors='coerce')
'''
New Method:
-likely a tad slower as regex is applied to all rather than binry mapping
-no error
-handled nan issue
'''
test['Temperature'] = test['Temperature'].str.extract(r'(\d+)')
test['Temperature'] = pd.to_numeric(test['Temperature'], errors='coerce')
test['Temperature'].fillna(298, inplace=True)
SKEMPI_df.equals(og)
'''both versions are equal but include nan'''
#print(SKEMPI_df['Temperature'].isnull().values.any())
#print(SKEMPI_df[SKEMPI_df['Temperature'].isnull()]) #['Temperature'])
#print(np.array(SKEMPI_df.iloc[[6665]]))
for init, new in zip(SKEMPI_df['Temperature'], test['Temperature']):
if init!=new:
print(init, type(init),':' ,new, type(new))
'''strange, i cant reproduce the issue'''
df = pd.DataFrame('', index=[0,1,2,3], columns=['A']) #str(np.nan)
print('init empty data\n', df)
baddies=df["A"].str.isnumeric() == False
print('\nfind non-numeric', baddies, sep='\n')
print('convert temps', df['A'].loc[baddies].map(ConvertTemp))
print(pd.to_numeric(df["A"], errors='coerce'))
test['Temperature'][6663:6670]
```
##other
```
SKEMPI_df['NumMutations'] = SKEMPI_df['Mutation(s)_PDB'].str.count(',')+1
largest = [0,0]
for i,str_lst in enumerate(SKEMPI_df['Mutation(s)_PDB']):
lst=str_lst.split(',')
split_len = len(lst)
camma_len = SKEMPI_df['NumMutations'][i]
if split_len != camma_len:
print(i,lst, SKEMPI_df['NumMutations'][i], SKEMPI_df['Mutation(s)_PDB'][i], str_lst)
pass
longest = max(len(el) for el in lst)
if longest>largest[0]:
largest=[longest,i]
print(largest)
print(SKEMPI_df.loc[largest[1]])
```
##why didnt this work???
OG version my have error. when grouping + avg is commented out on my version they test as equivalent.
```
#skempi['ddG'] = skempi.groupby(group_keys)['ddG'].transform('median')
skempi = skempi.drop_duplicates(subset=['#Pdb', 'Mutation(s)_PDB', 'Temperature'], keep='first', inplace=False)
```
EDIT: I didnt realize it OG was getting stored to ddgMedian. i was comparing it to before the averaging, hence commenting out my averageing step tested as equal.
**testing ddgMedian to my overwritten ddG passes. Both are correct.**
```
def ChainCheck(df):
if df['NumMutations'] == 1:
CrossChain = False
return CrossChain
else:
Chain = df['MutSplit'][0][1]
if Chain in df['Prot1Chain']:
ChainSet = df['Prot1Chain']
elif Chain in df['Prot2Chain']:
ChainSet = df['Prot2Chain']
for i in range(len(df['MutSplit'])):
Chain = df['MutSplit'][i][1]
if Chain in ChainSet:
CrossChain = False
else:
CrossChain = True
break
return CrossChain
def gibbsEq(Kd, tmp):
R = 1.9872036e-3 # Ideal Gas Constant in kcal
ΔG = -R * tmp * np.log(Kd) #log is ln in np
return ΔG
#v1.0 function exploded
'''
Purpose:
1. Loads SKEMPI CSV file.
2. Calculates ddG
3. For multiple measurements, keeps the median value
4. Eliminates entries with mutations on both sides of the interface
Input:
SKEMPI_loc : Location of SKEMPI CSV file
Output:
SKEMPI_df : Pandas dataframe
'''
SKEMPI_df = pd.read_csv('skempi_v2.0.csv', sep=';')
# Convert non numeric temperature comments to numeric values.
# Default is 298K
SKEMPI_df['Temperature'] = SKEMPI_df['Temperature'].str.extract(r'(\d+)')
SKEMPI_df['Temperature'] = pd.to_numeric(SKEMPI_df['Temperature'],
errors='coerce')
SKEMPI_df['Temperature'].fillna(298, inplace=True)
# Drop missing values
SKEMPI_df.dropna(subset=['Affinity_wt_parsed'], inplace=True)
SKEMPI_df.dropna(subset=['Affinity_mut_parsed'], inplace=True)
# Calculate free energies
SKEMPI_df['dgWT'] = gibbsEq(SKEMPI_df['Affinity_wt_parsed'],
SKEMPI_df['Temperature'])
SKEMPI_df['dgMut'] = gibbsEq(SKEMPI_df['Affinity_mut_parsed'],
SKEMPI_df['Temperature'])
SKEMPI_df['ddG'] = SKEMPI_df['dgWT']-SKEMPI_df['dgMut']
################################################################################
# initialize diplucate
skempi=SKEMPI_df
# OG version
SKEMPI_df['MutKey'] = SKEMPI_df['#Pdb']+'_'+SKEMPI_df['Mutation(s)_PDB']
SKEMPI_df['ddgMedian'] = SKEMPI_df.groupby('MutKey')['ddG'].transform('median')
SKEMPI_df = SKEMPI_df.drop_duplicates(subset=['MutKey', 'Temperature'],
keep='first', inplace=False)
def grouped_avg(df, group_keys, avg_key):
'''
DANGEROUS! not sure if median value will be returned to correct indecies
'''
averaged = df.groupby(group_keys)[avg_key].transform('median')
return averaged # returns series
#my version
group_keys = ['#Pdb', 'Mutation(s)_PDB']
## explicit version
#skempi['ddG'] = skempi.groupby(group_keys)['ddG'].transform('median')
#skempi = skempi.drop_duplicates(subset=['#Pdb', 'Mutation(s)_PDB', 'Temperature'], keep='first', inplace=False)
## condensed version
skempi['ddG'] = grouped_avg(skempi, group_keys, 'ddG')
skempi = skempi.drop_duplicates(subset=[*group_keys, 'Temperature'], keep='first', inplace=False)
print('Find Differences')
for i, v in enumerate(zip(SKEMPI_df['ddgMedian'], skempi.ddG)): #'ddgMedian' #'ddG'
if v[0] != v[1]:
print(i, v[0], v[1])
def ChainCheck(df):
if df['NumMutations'] == 1:
CrossChain = False
return CrossChain
else:
Chain = df['MutSplit'][0][1]
if Chain in df['Prot1Chain']:
ChainSet = df['Prot1Chain']
elif Chain in df['Prot2Chain']:
ChainSet = df['Prot2Chain']
for i in range(len(df['MutSplit'])):
Chain = df['MutSplit'][i][1]
if Chain in ChainSet:
CrossChain = False
else:
CrossChain = True
break
return CrossChain
def gibbsEq(Kd, tmp):
R = 1.9872036e-3 # Ideal Gas Constant in kcal
ΔG = -R * tmp * np.log(Kd) #log is ln in np
return ΔG
#v1.0 function exploded
'''
Purpose:
1. Loads SKEMPI CSV file.
2. Calculates ddG
3. For multiple measurements, keeps the median value
4. Eliminates entries with mutations on both sides of the interface
Input:
SKEMPI_loc : Location of SKEMPI CSV file
Output:
SKEMPI_df : Pandas dataframe
'''
SKEMPI_df = pd.read_csv('skempi_v2.0.csv', sep=';')
# Convert non numeric temperature comments to numeric values.
# Default is 298K
SKEMPI_df['Temperature'] = SKEMPI_df['Temperature'].str.extract(r'(\d+)')
SKEMPI_df['Temperature'] = pd.to_numeric(SKEMPI_df['Temperature'],
errors='coerce')
SKEMPI_df['Temperature'].fillna(298, inplace=True)
# Drop missing values
SKEMPI_df.dropna(subset=['Affinity_wt_parsed'], inplace=True)
SKEMPI_df.dropna(subset=['Affinity_mut_parsed'], inplace=True)
################################################################################
# initialize diplucate
skempi=SKEMPI_df
# OG version
R = 1.9872036e-3 # Ideal Gas Constant in kcal
SKEMPI_df['dgWT'] = -R*SKEMPI_df['Temperature']*np.log(SKEMPI_df['Affinity_wt_parsed'])
SKEMPI_df['dgMut'] = -R*SKEMPI_df['Temperature']*np.log(SKEMPI_df['Affinity_mut_parsed'])
SKEMPI_df['ddG'] = SKEMPI_df['dgWT']-SKEMPI_df['dgMut']
skempi['dgWT'] = gibbsEq(SKEMPI_df['Affinity_wt_parsed'],
SKEMPI_df['Temperature'])
skempi['dgMut'] = gibbsEq(SKEMPI_df['Affinity_mut_parsed'],
SKEMPI_df['Temperature'])
skempi['ddG'] = SKEMPI_df['dgWT']-SKEMPI_df['dgMut']
print('Find Differences')
for i, v in enumerate(zip(SKEMPI_df.ddG, skempi.ddG)):
if v[0] != v[1]:
print(i, v[0], v[1])
skempi = MutantDataSet('skempi_v2.0.csv', sep=';')
######### new version #######
# Convert non-numeric temperature comments to numeric values. Default is 298K
skempi['Temperature'] = skempi['Temperature'].str.extract(r'(\d+)')
skempi['Temperature'] = skempi.to_numeric('Temperature')
skempi['Temperature'].fillna(298, inplace=True) #6665-6668 blank
# Drop missing values
skempi.dropna(subset=['Affinity_wt_parsed'], inplace=True)
skempi.dropna(subset=['Affinity_mut_parsed'], inplace=True)
skempi['dgWT'] = -R*skempi['Temperature']*np.log(skempi['Affinity_wt_parsed'])
skempi['dgMut'] = -R*skempi['Temperature']*np.log(skempi['Affinity_mut_parsed'])
skempi['ddG'] = skempi['dgWT']-skempi['dgMut']
######### og version #######
og_skempi = MutantDataSet('skempi_v2.0.csv', sep=';')
# Convert non-numeric temperature comments to numeric values. Default is 298K
og_skempi['Temperature'] = og_skempi['Temperature'].str.extract(r'(\d+)')
og_skempi['Temperature'] = og_skempi.to_numeric('Temperature')
#og_skempi['Temperature'].fillna(298, inplace=True) #6665-6668 blank
# Drop missing values
og_skempi.dropna(subset=['Affinity_wt_parsed'], inplace=True)
og_skempi.dropna(subset=['Affinity_mut_parsed'], inplace=True)
og_skempi['dgWT'] = -R*og_skempi['Temperature']*np.log(og_skempi['Affinity_wt_parsed'])
og_skempi['dgMut'] = -R*og_skempi['Temperature']*np.log(og_skempi['Affinity_mut_parsed'])
og_skempi['ddG'] = og_skempi['dgWT']-og_skempi['dgMut']
print('Find Differences')
for i, v in enumerate(zip(og_skempi.ddG, skempi.ddG)):
if v[0]!=v[1]:
try:
print(i, v[0], v[1])
except:
pass
```
| github_jupyter |
```
%matplotlib inline
import os
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import unicodedata
from os import path
matplotlib.style.use('ggplot')
pylab.rcParams['figure.figsize'] = 18, 10 # that's default image size for this interactive session
experiments = [
"mfl",
"bow_logreg",
"bopos_logreg",
"pos_logreg",
"wordvec_mlp_2_0",
"wordvecpos_mlp_2_0"
]
experiments_names = [
"Baseline",
"Bag-of-Words w/Logistic Regression",
"Bag-of-PoS w/Logistic Regression",
"BoW with PoS w/Logistic Regression",
"Word Embeddings w/Multilayer Perceptron",
"Word Embeddings with PoS w/Multilayer Perceptron"
]
directory = "../resources/results/results_supervised_sensem/"
lemmas_file = "../resources/sensem/lemmas"
lemmas_amount = 215
with open(lemmas_file, "r") as f:
lemmas = unicodedata.normalize("NFC", f.read().decode("utf-8")).strip().split()
accuracies = pd.DataFrame({e: np.zeros(lemmas_amount, dtype=np.float) for e in experiments})
most_common_precision = pd.DataFrame({e: np.zeros(lemmas_amount, dtype=np.float) for e in experiments})
less_common_recall = pd.DataFrame({e: np.zeros(lemmas_amount, dtype=np.float) for e in experiments})
for lidx, lemma in enumerate(lemmas):
lidx = "{:03}".format(lidx)
if not path.isdir(path.join(directory, lidx)):
continue
for experiment in experiments:
accuracy_file = path.join(directory, lidx, experiment, "accuracy")
mcp_file = path.join(directory, lidx, experiment, "most_common_precision")
lcr_file = path.join(directory, lidx, experiment, "less_common_recall")
with open(accuracy_file, "r") as f:
accuracies[experiment][int(lidx)] = np.mean([float(acc.strip()) for acc in f.readlines()])
with open(mcp_file, "r") as f:
most_common_precision[experiment][int(lidx)] = np.mean([float(mcp.strip()) for mcp in f.readlines()])
with open(lcr_file, "r") as f:
less_common_recall[experiment][int(lidx)] = np.mean([float(lcr.strip()) for lcr in f.readlines()])
accuracies.to_csv("accuracies_sensem_supervised.csv")
most_common_precision.to_csv("mcp_sensem_supervised.csv")
less_common_recall.to_csv("lcr_sensem_supervised.csv")
accuracies_boxplot = accuracies.plot(kind='box', rot=5, patch_artist=True)
x = accuracies_boxplot.set_xticklabels(experiments_names)
x = accuracies_boxplot.set_xlabel("Experiment")
x = accuracies_boxplot.set_ylim((-0.01, 1.01))
x = accuracies_boxplot.set_ylabel("Accuracy")
x = accuracies_boxplot.set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
x = accuracies_boxplot.set_yticklabels([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
```
| github_jupyter |
## Image网 Submission `128x128`
This contains a submission for the Image网 leaderboard in the `128x128` category.
In this notebook we:
1. Train on 1 pretext task:
- Train a network to do image inpatining on Image网's `/train`, `/unsup` and `/val` images.
2. Train on 4 downstream tasks:
- We load the pretext weights and train for `5` epochs.
- We load the pretext weights and train for `20` epochs.
- We load the pretext weights and train for `80` epochs.
- We load the pretext weights and train for `200` epochs.
Our leaderboard submissions are the accuracies we get on each of the downstream tasks.
```
import json
import torch
import numpy as np
from functools import partial
from fastai2.basics import *
from fastai2.vision.all import *
torch.cuda.set_device(3)
# Chosen parameters
lr=2e-2
sqrmom=0.99
mom=0.95
beta=0.
eps=1e-4
bs=64
sa=1
m = xresnet34
act_fn = Mish
pool = MaxPool
nc=20
source = untar_data(URLs.IMAGEWANG_160)
len(get_image_files(source/'unsup')), len(get_image_files(source/'train')), len(get_image_files(source/'val'))
# Use the Ranger optimizer
opt_func = partial(ranger, mom=mom, sqr_mom=sqrmom, eps=eps, beta=beta)
m_part = partial(m, c_out=nc, act_cls=torch.nn.ReLU, sa=sa, pool=pool)
model_meta[m_part] = model_meta[xresnet34]
save_name = 'imagewang_contrast_kornia_160ep'
```
## Pretext Task: Contrastive Learning
```
#export
from pytorch_metric_learning import losses
class XentLoss(losses.NTXentLoss):
def forward(self, output1, output2):
stacked = torch.cat((output1, output2), dim=0)
labels = torch.arange(output1.shape[0]).repeat(2)
return super().forward(stacked, labels, None)
class ContrastCallback(Callback):
run_before=Recorder
def __init__(self, size=256, aug_targ=None, aug_pos=None, temperature=0.1):
self.aug_targ = ifnone(aug_targ, get_aug_pipe(size))
self.aug_pos = ifnone(aug_pos, get_aug_pipe(size))
self.temperature = temperature
def update_size(self, size):
pipe_update_size(self.aug_targ, size)
pipe_update_size(self.aug_pos, size)
def begin_fit(self):
self.old_lf = self.learn.loss_func
self.old_met = self.learn.metrics
self.learn.metrics = []
self.learn.loss_func = losses.NTXentLoss(self.temperature)
def after_fit(self):
self.learn.loss_fun = self.old_lf
self.learn.metrics = self.old_met
def begin_batch(self):
xb, = self.learn.xb
xb_targ = self.aug_targ(xb)
xb_pos = self.aug_pos(xb)
self.learn.xb = torch.cat((xb_targ, xb_pos), dim=0),
self.learn.yb = torch.arange(xb_targ.shape[0]).repeat(2),
#export
def pipe_update_size(pipe, size):
for tf in pipe.fs:
if isinstance(tf, RandomResizedCropGPU):
tf.size = size
def get_dbunch(size, bs, workers=8, dogs_only=False):
path = URLs.IMAGEWANG_160 if size <= 160 else URLs.IMAGEWANG
source = untar_data(path)
folders = ['unsup', 'val'] if dogs_only else None
files = get_image_files(source, folders=folders)
tfms = [[PILImage.create, ToTensor, RandomResizedCrop(size, min_scale=0.9)],
[parent_label, Categorize()]]
# dsets = Datasets(files, tfms=tfms, splits=GrandparentSplitter(train_name='unsup', valid_name='val')(files))
dsets = Datasets(files, tfms=tfms, splits=RandomSplitter(valid_pct=0.1)(files))
# batch_tfms = [IntToFloatTensor, *aug_transforms(p_lighting=1.0, max_lighting=0.9)]
batch_tfms = [IntToFloatTensor]
dls = dsets.dataloaders(bs=bs, num_workers=workers, after_batch=batch_tfms)
dls.path = source
return dls
size = 128
bs = 256
dbunch = get_dbunch(160, bs)
len(dbunch.train.dataset)
dbunch.show_batch()
# # xb = TensorImage(torch.randn(1, 3,128,128))
# afn_tfm, lght_tfm = aug_transforms(p_lighting=1.0, max_lighting=0.8, p_affine=1.0)
# # lght_tfm.split_idx = None
# xb.allclose(afn_tfm(xb)), xb.allclose(lght_tfm(xb, split_idx=0))
import kornia
#export
def get_aug_pipe(size, stats=None, s=.7):
stats = ifnone(stats, imagenet_stats)
rrc = kornia.augmentation.RandomResizedCrop((size,size), scale=(0.2, 0.9), ratio=(3/4, 4/3))
rhf = kornia.augmentation.RandomHorizontalFlip()
rcj = kornia.augmentation.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
tfms = [rrc, rhf, rcj, Normalize.from_stats(*stats)]
pipe = Pipeline(tfms)
pipe.split_idx = 0
return pipe
aug = get_aug_pipe(size)
aug2 = get_aug_pipe(size)
cbs = ContrastCallback(size=size, aug_targ=aug, aug_pos=aug2, temperature=0.1)
xb,yb = dbunch.one_batch()
nrm = Normalize.from_stats(*imagenet_stats)
xb_dec = nrm.decodes(aug(xb))
show_images([xb_dec[0], xb[0]])
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 128))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[], loss_func=CrossEntropyLossFlat(), cbs=cbs, pretrained=False,
config={'custom_head':ch}
).to_fp16()
learn.unfreeze()
learn.fit_flat_cos(160, 5e-2, wd=1e-2, pct_start=0.5)
torch.save(learn.model[0].state_dict(), f'{save_name}.pth')
# learn.save(save_name)
```
## Downstream Task: Image Classification
```
def get_dbunch(size, bs, workers=8, dogs_only=False):
path = URLs.IMAGEWANG_160 if size <= 160 else URLs.IMAGEWANG
source = untar_data(path)
if dogs_only:
dog_categories = [f.name for f in (source/'val').ls()]
dog_train = get_image_files(source/'train', folders=dog_categories)
valid = get_image_files(source/'val')
files = dog_train + valid
splits = [range(len(dog_train)), range(len(dog_train), len(dog_train)+len(valid))]
else:
files = get_image_files(source)
splits = GrandparentSplitter(valid_name='val')(files)
item_aug = [RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5)]
tfms = [[PILImage.create, ToTensor, *item_aug],
[parent_label, Categorize()]]
dsets = Datasets(files, tfms=tfms, splits=splits)
batch_tfms = [IntToFloatTensor, Normalize.from_stats(*imagenet_stats)]
dls = dsets.dataloaders(bs=bs, num_workers=workers, after_batch=batch_tfms)
dls.path = source
return dls
def do_train(size=128, bs=64, lr=1e-2, epochs=5, runs=5, dogs_only=False, save_name=None):
dbunch = get_dbunch(size, bs, dogs_only=dogs_only)
for run in range(runs):
print(f'Run: {run}')
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 20))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func, normalize=False,
metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy(),
# metrics=[accuracy,top_k_accuracy], loss_func=CrossEntropyLossFlat(),
pretrained=False,
config={'custom_head':ch})
if save_name is not None:
state_dict = torch.load(f'{save_name}.pth')
learn.model[0].load_state_dict(state_dict)
# state_dict = torch.load('imagewang_inpainting_15_epochs_nopretrain.pth')
# learn.model[0].load_state_dict(state_dict)
learn.unfreeze()
learn.fit_flat_cos(epochs, lr, wd=1e-2)
```
### 5 Epochs
```
epochs = 5
runs = 5
do_train(epochs=epochs, runs=runs, lr=2e-2, dogs_only=False, save_name=save_name)
```
### 20 Epochs
```
# LATEST
do_train(epochs=epochs, runs=runs, lr=3e-2, dogs_only=False, save_name=save_name)
# LATEST
do_train(epochs=epochs, runs=runs, lr=2e-2, dogs_only=False, save_name=save_name)
epochs = 20
runs = 1
# LATEST
do_train(epochs=epochs, runs=runs, lr=1e-2, dogs_only=False, save_name=save_name)
```
## 80 epochs
```
epochs = 80
runs = 1
do_train(epochs=epochs, runs=runs, lr=2e-2, dogs_only=False, save_name=save_name)
do_train(epochs=epochs, runs=runs, dogs_only=False, save_name=save_name)
```
Accuracy: **62.18%**
### 200 epochs
```
epochs = 200
runs = 1
do_train(epochs=epochs, runs=runs, dogs_only=False, save_name=save_name)
```
Accuracy: **62.03%**
| github_jupyter |
# Environment: Python 3.5
```
from ast import literal_eval
from os import listdir
from os.path import isfile, join
from scipy.sparse import csr_matrix, load_npz, save_npz
from tqdm import tqdm
from sklearn.preprocessing import normalize
import seaborn as sns
import datetime
import json
import numpy as np
import pandas as pd
import time
import yaml
import scipy.sparse as sparse
from ast import literal_eval
# For Python2 this have to be done
# from __future__ import division
import gzip
import string
import itertools
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize
from keras.preprocessing.text import Tokenizer
ITEM_ID = 'business_id'
USER_ID = 'user_id'
RATING = 'rating'
BINARY_RATING = 'Binary'
REVIEW_TEXT = 'review_text'
TIMESTAMP = 'review_date'
# TIMESTAMP = 'timestamp'
DATA_PATH = '../../data/yelp/'
DATA_NAME = 'toronto_reviews.csv'
# Load Original Data
dataset_name = 'yelp'
df = pd.read_csv('../../data/'+ dataset_name +'/toronto_reviews.csv')
# df_train = pd.read_csv('../../data/'+ dataset_name +'/Train.csv')
# df_valid = pd.read_csv('../../data/'+ dataset_name +'/Valid.csv')
# df_test = pd.read_csv('../../data/'+ dataset_name +'/Test.csv')
# key_phrase = pd.read_csv('../data/'+ dataset_name +'/KeyPhrases.csv')
df.head(2)
```
### Sushi on bloor
```
np.where(df.business_id == '6n_MDeYxU1ihB38be9TkVA')
```
### Spicy Mafia
```
np.where(df.business_id == 'l_uAw0K2lkOsyVJATcnwsA')
df.loc[539160]
df['review_text'][539159]
```
# Filter
```
df[ITEM_ID].nunique()
df[USER_ID].nunique()
len(df)
def filter_dataset(df, threshold=3, popularity=True, filter_by_review_count=True,
user_review_threshold=10, item_review_threshold=10,
num_user=None, num_item=None, user_ratio=0.25, item_ratio=0.2, postive_negative = 1):
# Binarize rating
df[BINARY_RATING] = (df[RATING] > threshold)*1
# Filter dataset only based on positive ratings
df = df[df[BINARY_RATING] == postive_negative]
print("The total number of users is {}".format(df[USER_ID].nunique()))
print("The total number of items is {} \n".format(df[ITEM_ID].nunique()))
values = df[ITEM_ID].value_counts().keys().tolist()
counts = df[ITEM_ID].value_counts().tolist()
item_df = pd.DataFrame.from_dict({ITEM_ID: values, "count": counts})
values = df[USER_ID].value_counts().keys().tolist()
counts = df[USER_ID].value_counts().tolist()
user_df = pd.DataFrame.from_dict({USER_ID: values, "count": counts})
if popularity:
print("Filter dataset by popularity. \n")
if filter_by_review_count:
print("Filter dataset by review count. \n")
filtered_item_df = item_df[item_df["count"] >= item_review_threshold]
filtered_item_id = filtered_item_df[ITEM_ID].values
filtered_user_df = user_df[user_df["count"] >= user_review_threshold]
filtered_user_id = filtered_user_df[USER_ID].values
else:
print("Filter dataset by user and item number. \n")
filtered_item_id = item_df[ITEM_ID].unique()[:num_item]
filtered_user_id = user_df[USER_ID].unique()[:num_user]
else:
print("Filter dataset by sampling. \n")
np.random.seed(8292)
filtered_item_id = np.take(item_df[ITEM_ID].unique(),
indices=np.random.choice(len(item_df), int(item_ratio*len(item_df))))
filtered_user_id = np.take(user_df[USER_ID].unique(),
indices=np.random.choice(len(user_df), int(user_ratio*len(user_df))))
df = df.loc[(df[USER_ID].isin(filtered_user_id)) & (df[ITEM_ID].isin(filtered_item_id))]
# df = df.loc[(df[ITEM_ID].isin(filtered_item_id))]
print("Number of User: {}".format(df[USER_ID].nunique()))
print("Number of Item: {}".format(df[ITEM_ID].nunique()))
return df
```
# Filter the dataset by popularity then by number of users and items
```
filtered_df = filter_dataset(df, threshold=3, popularity=True, filter_by_review_count=True,
user_review_threshold=5, item_review_threshold=5,
num_user=15000, num_item=1000, user_ratio=None, item_ratio=None, postive_negative = 1)
len(filtered_df)
```
# Analyze filtered dataset
```
filtered_df = filtered_df.reset_index(drop = True)
values = filtered_df[USER_ID].value_counts().keys().tolist()
counts = filtered_df[USER_ID].value_counts().tolist()
user_df = pd.DataFrame.from_dict({USER_ID: values, "count": counts})
values = filtered_df[ITEM_ID].value_counts().keys().tolist()
counts = filtered_df[ITEM_ID].value_counts().tolist()
item_df = pd.DataFrame.from_dict({ITEM_ID: values, "count": counts})
item_df.tail()
item_df["count"].mean()
user_df[user_df["count"] >= 20]
users = user_df[user_df["count"] >= 20][USER_ID].values
filtered_df = filtered_df.loc[filtered_df[USER_ID].isin(users)].reset_index(drop=True)
# pos_df
# Export the filtered dataset
# pos_df.to_csv(DATA_PATH+DATA_NAME+".csv", header=False)
items = filtered_df[ITEM_ID].values
```
# Get the Final DF with reviews
```
cur_df = df.loc[(df[USER_ID].isin(users)) & (df[ITEM_ID].isin(items))].reset_index(drop=True)
# cur_df
cur_df[USER_ID].nunique()
cur_df[ITEM_ID].nunique()
```
# Split dataset
```
# No split is done for now
```
# Pre-process Reviews
```
def preprocess(text):
# text = text.replace('.',' ').replace('/',' ').replace('quot;', ' ').replace('amp;', '').replace('-', ' ')
text = text.replace('.',' ').replace('/t',' ').replace('\t',' ').replace('/',' ').replace('-',' ')
# Tokenize
text = nltk.word_tokenize(text)
# Lowercase
text = [w.lower() for w in text]
# Remove Punctuation
table = str.maketrans('', '', string.punctuation)
text = [w.translate(table) for w in text]
# Remove tokens that are not alphabetic
text = [w for w in text if w.isalpha()]
# Remove Stopwords
# Get english stopwords
en_stopwords = set(stopwords.words('english'))
en_stopwords.remove('off')
text = [w for w in text if w not in en_stopwords]
# Lemmatizing
lemmatizer = WordNetLemmatizer()
text = [lemmatizer.lemmatize(w) for w in text]
text = " " + " ".join(str(x) for x in text) + " "
text = text.replace('whitish', 'white')
text = text.replace('bisquity', ' biscuit ')
text = text.replace('carmel', ' caramel ')
text = text.replace('flower', ' floral ')
text = text.replace('piny', ' pine ')
text = text.replace('off white', 'offwhite')
text = text.replace('goden', 'gold')
text = text.replace('yello', 'yellow')
text = text.replace('reddish', ' red ')
text = text.replace('favorite', 'favourite ')
# Reset to token
text = nltk.word_tokenize(text)
table = str.maketrans('', '', string.punctuation)
text = [w.translate(table) for w in text]
text = [w for w in text if w.isalpha()]
# en_stopwords = set(stopwords.words('english'))
text = [w for w in text if w not in en_stopwords]
lemmatizer = WordNetLemmatizer()
text = [lemmatizer.lemmatize(w) for w in text]
return text
cur_df["review"] = cur_df[REVIEW_TEXT].apply(preprocess)
cur_df["conca_review"] = cur_df["review"].apply(lambda x: " " + " ".join(str(x) for x in x) + " ")
cur_df['review_text'][10]
```
cur_df.to_pickle('../../data/yelp/cur_df.pkl')
```
# Load Data
cur_df = pd.read_csv('../../data/yelp/Data.csv', index_col=0, encoding='latin-1')
```
# Index words since no further changes will be made on the words
```
tokenizer = Tokenizer()
token_list = cur_df["review"].tolist()
tokenizer.fit_on_texts(token_list)
df_word_index = pd.DataFrame(list(tokenizer.word_index.items()), columns=['word','index'])
from nltk.probability import FreqDist
fdist1 = FreqDist(token_flatten_list)
# Keyphrases are chosen mannually from top-1000
fdist1.most_common(250)
category_key = ['chinese', 'fast', 'thai', 'bar', 'fry', 'fried', 'dessert', 'dinner', 'lunch', 'soup',
'mexico', 'italian','mexican','vietnamese','buffet','takeout','casual','pub','bakery','indian','classic',
'modern','french','asian','birthday', 'vegetarian', 'downtown', 'bbq','japanese','breakfast','seafood',
'brunch']
food_key = ['taco', 'curry', 'potato', 'crispy', 'shrimp', 'bread', 'chocolate', 'ramen', 'pizza', 'beer', 'sandwich', 'cake',
'sushi', 'egg', 'fish', 'coffee', 'burger', 'cheese', 'salad', 'pork', 'beef', 'tea', 'noodle',
'meat', 'chicken', 'dim sum', 'cocktail', 'ice cream','squid','tempura','tapioca','donut','olive',
'espresso','octopus','croissant','banana','cookie','honey','cone','scallop','congee',
'skewer','miso','lettuce','pop','strawberry','apple','avocado','juice','booth','calamari','kimchi','patty',
'sesame','tart','four','crepe','tuna','wrap','lemon','vegan','coconut','corn','poutine','toast','belly','bubble',
'oyster','cocktail', 'cheesecake', 'fruit', 'sausage','latte','matcha','pancake','duck','tofu','sashimi',
'lamb','mango','bacon','tomato','lobster','wine','rib','waffle','bun','wing','dumpling','bean','steak','salmon',
'pasta','milk','fried chicken','milk tea','green tea','bubble tea','pork belly','spring roll','fried rice',
'pork bone soup']
seasoning=['sugar','oil','soy','leaf','spice','butter','ginger','pepper','peanut','garlic']
infrastruture_key = [ 'parking', 'store','shopping','nail','theatre','movie','washroom',
'window','station','chair', 'markham','plaza','market', 'mall']
# or we call this comment
service_key = ['quick', 'clean', 'busy', 'fresh', 'friendly','convenient','refill','soggy','greeted','bright','crowded','overpriced',
'cheaper','immediately','dog','quiet','efficient','spacious','pleasant','fair','complaint','disappointing','fancy',
'comfortable', 'dark','cozy','helpful','tax','nicely','honestly', 'pricey','yummy','music','chip','attentive',
'reasonable','wait']
taste_key = ['traditional', 'spicy','flavorful','fluffy','smooth','frozen','sweetness','mayo','gravy','healthy','rare',
'refreshing','crunchy','chili','crust','stick','steamed','greasy','dip','gelato','salt','stuffed','topped','smoked',
'roasted','seasoned','chewy','pot','solid','sour', 'baked', 'juicy','creamy','deep fried']
bigram_key = ['ice cream', 'come back', 'go back', 'fried chicken', 'deep fried', 'milk tea', 'green tea', 'bubble tea',
'pork belly', 'pad thai', 'spring roll']
from_pmi = ['lactose intolerant', 'dietary restriction', 'gong cha', 'general tao', 'wild boar', 'financial district',
'pale ale', 'public transit', 'balsamic vinegar', 'uber eats', 'alcoholic beverage', 'grand opening', 'north york',
'english muffin', 'accept debit']
# food_quality_pos = []
# food_quality_neg = []
food_quality = ['good dessert','try dessert','dessert','good texture','flavour texture', 'good meat','quality meat',]
service = ['fast service','service excellent','friendly service', 'attentive service', 'excellent service','great service',
'amazing service', 'great customer service', 'fast service', 'good service', 'impressed service',
'busy service', 'slow service', 'bad service', 'disappointed service', 'poor service', 'terrible service',
'violating term service' ]
price = ['good price', 'great price', 'regular price', 'reasonable price','decent price', 'cheap'
,'high price','pricy', 'expensive']
ambiance = ['good vibe','ambiance', ]
location = []
other = ['quick lunch',]
key = category_key + food_key + infrastruture_key + service_key + taste_key + from_pmi # + pos_key + neg_key
len(category_key) + len(food_key) + len(infrastruture_key) + len(service_key) + len(taste_key) + len(from_pmi) # + len(neg_key) + len(pos_key)
```
# Define Bigram Right Type (ADJ/NN)
```
#function to filter for ADJ/NN bigrams
def filter_type(ngram):
if '-pron-' in ngram or 't' in ngram:
return False
for word in ngram:
if word.isspace():
return False
acceptable_types = ('JJ', 'JJR', 'JJS')
ins = ('IN','TO')
second_type = ('NN', 'NNS', 'NNP', 'NNPS')
tags = nltk.pos_tag(ngram)
if len(tags) == 2:
if tags[0][1] in acceptable_types and tags[1][1] in second_type:
return True
else:
return False
elif len(tags) == 3:
if tags[0][1] in acceptable_types and tags[1][1] in ins and tags[2][1] in second_type:
return True
else:
return False
else:
if tags[0][1] in acceptable_types and tags[1][1] in ins and tags[2][1] in acceptable_types and tags[3][1] in second_type:
return True
else:
return False
```
# Bi-gram
```
bigrams = nltk.collocations.BigramAssocMeasures()
tokens = itertools.chain.from_iterable(token_list)
bigramFinder = nltk.collocations.BigramCollocationFinder.from_words(tokens)
bigramFinder.apply_freq_filter(100)
bigram_freq = bigramFinder.ngram_fd.items()
bigramFreqTable = pd.DataFrame(list(bigram_freq),
columns=['ngram','freq']).sort_values(by='freq', ascending=False)
neg_key = ['bad place',
'okay nothing',
'decent place', 'ok nothing',
'decent food', 'second chance',
'terrible service',
'mediocre food', 'decent service',
'eye contact', 'sub par',
'slow service', 'high hope',
'dry side', 'bit bland',
'separate bill', 'high price',
'empty table', 'poor service',
'room temperature', 'little bland',
'good dish', 'bad taste',
'averag price', 'asian legend',
'quick meal', 'good overall',
'bad service', 'salty side',
'high side', 'swiss chalet',
'plus side',
'extra star', 'wow factor',
'long wait time', 'bad day',
'dim sum place', 'bit pricey',
'instant noodle', 'chicken piece',
'good location', 'small portion',
'beef noodle', 'good place',
'much sauce', 'decent portion',
'good nothing', 'deer garden',
'la carnita']
bigramFreqTable[bigramFreqTable["ngram"].str.contains('carnita', regex=False)][:250]['ngram'].values
for i in bigramFreqTable[:100]['ngram']:
print (i)
bigram_key = ['ice cream', 'come back', 'go back', 'fried chicken', 'deep fried', 'milk tea', 'green tea', 'bubble tea',
'pork belly', 'pad thai', 'spring roll', 'fried rice']
bigramFreqTable[bigramFreqTable["ngram"].str.contains('dim', regex=False)][:250]['ngram'].values
bigramFreqTable[bigramFreqTable["ngram"] == ("dim", "sum")]
```
# Bi-gram PMI
```
bigramFinder.apply_freq_filter(100)
bigramPMITable = pd.DataFrame(list(bigramFinder.score_ngrams(bigrams.pmi)),
columns=['bigram','PMI']).sort_values(by='PMI', ascending=False)
bigramPMITable
bigramPMITable = bigramPMITable[bigramPMITable.bigram.map(lambda x: filter_type(x))]
bigramPMITable
bigram_freq_pmi = pd.merge(bigramFreqTable, bigramPMITable, how='right', left_on='ngram', right_on='bigram').sort_values("PMI", ascending=False)
bigram_freq_pmi.head(50)
```
# Trigram
```
trigrams = nltk.collocations.TrigramAssocMeasures()
tokens = itertools.chain.from_iterable(token_list)
trigramFinder = nltk.collocations.TrigramCollocationFinder.from_words(tokens)
trigram_freq = trigramFinder.ngram_fd.items()
trigramFreqTable = pd.DataFrame(list(trigram_freq),
columns=['ngram','freq']).sort_values(by='freq', ascending=False)
trigramFreqTable = trigramFreqTable[trigramFreqTable["freq"] >= 10]
trigramFreqTable
trigram_key = ['pork bone soup' ]
```
# Keyphrases
```
df_keyphrases = pd.DataFrame.from_items([("Phrases", key)])
keyphrases = df_keyphrases['Phrases'].tolist()
df_keyphrases
from nltk.util import ngrams
def return_keyphrase_index(text):
index = [keyphrases.index(key) for key in keyphrases if key in text]
return index
cur_df["keyVector"] = cur_df["conca_review"].apply(return_keyphrase_index)
cur_df['keyphrases_indices_length'] = cur_df['keyVector'].str.len()
cur_df.head()
cur_df['keyVector']
print(cur_df['keyphrases_indices_length'].sum())
print(len(cur_df[cur_df['keyphrases_indices_length'] > 0])/len(cur_df))
print(cur_df['keyphrases_indices_length'].sum() / len(df))
print(cur_df['keyphrases_indices_length'].sum())
print(len(cur_df[cur_df['keyphrases_indices_length'] > 0])/len(cur_df))
print(cur_df['keyphrases_indices_length'].sum() / len(df))
cur_df['keyphrases_indices_length'].mean()
import matplotlib.pyplot as plt
%matplotlib inline
cur_df.hist(column='keyphrases_indices_length')
key_vector_index_list = [item for sublist in cur_df["keyVector"].values for item in sublist]
unique, counts = np.unique(key_vector_index_list, return_counts=True)
[(x, count) for count, x in sorted(zip(counts,keyphrases), key=lambda pair: pair[0], reverse=True)]
cur_df['UserIndex'] = cur_df[USER_ID].astype('category').cat.rename_categories(range(0, cur_df[USER_ID].nunique()))
cur_df['ItemIndex'] = cur_df[ITEM_ID].astype('category').cat.rename_categories(range(0, cur_df[ITEM_ID].nunique()))
cur_df['Binary'] = (cur_df[RATING] > 4)*1
df_user_name = cur_df[['UserIndex',USER_ID]]
df_item_name = cur_df[['ItemIndex',ITEM_ID]]
total_review = len(cur_df)
print("Number of User: {0}".format(df_user_name['UserIndex'].nunique()))
print("Number of Item: {0}".format(df_item_name['ItemIndex'].nunique()))
print("Number of Positive Review: {0}".format(sum(cur_df['Binary'].tolist())))
print("Number of key Phrase: {0}".format(len(df_keyphrases)))
print("Number of reviews: {0}".format(total_review))
print ('positive/all review ratio is around: ', round(36955/157038*100), "%")
cur_df['rating'].hist()
```
# Save processed data
```
dataPath = '../../data/yelp/'
cur_df.to_csv(dataPath+'Data.csv')
df_word_index.to_csv(dataPath+'WordIndex.csv')
df_keyphrases.to_csv(dataPath+'KeyPhrases.csv')
df_user_name.to_csv(dataPath+'UserIndex.csv')
df_item_name.to_csv(dataPath+'ItemIndex.csv')
cur_df = pd.read_csv('../../data/yelp/Data.csv', index_col=0, encoding='latin-1')
len(cur_df)
```
# Data Sparsity
```
len(cur_df)
cur_df[BINARY_RATING] = (cur_df[RATING] >= 4)*1
len(cur_df[cur_df[BINARY_RATING] == 1])
cur_df['UserIndex'].nunique()
cur_df['ItemIndex'].nunique()
36955/(2343*7456)
print("Number of Positive Review: {0}".format(sum(cur_df[BINARY_RATING].tolist())))
coverage_ratio = 1-len(cur_df[cur_df['keyphrases_indices_length'] == 0])/len(cur_df)
print('The current keyphrase set covers {}% reviews'.format(coverage_ratio * 100))
print(cur_df['keyphrases_indices_length'].mean())
print('A review can have at most {} keyphrases in the current keyphrase set'.format(cur_df['keyphrases_indices_length'].max()))
import matplotlib.pyplot as plt
%matplotlib inline
fig1 = plt.figure(figsize=(4, 4), dpi=100)
ax1 = fig1.add_subplot(111)
cur_df.hist(column='keyphrases_indices_length', ax=ax1)
ax1.set_title("Keyphrase converage for yelp Toronto")
plt.xlabel('Number of keyphrases in each review')
plt.ylabel('Number of reviews')
# legend = plt.legend(loc='upper right', shadow=True,prop={'size':10})
plt.tight_layout()
plt.savefig('../figs/keyphrase converage for yelp Toronto _334keyphrase')
```
| github_jupyter |
```
% matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import pandas
import torch, torch.utils.data, torchvision
import PIL
import os.path
import time
# import skimage, skimage.io
import time
import copy
from my_utils import *
MEAN = [0.485, 0.456, 0.406] # expected by pretrained resnet18
STD = [0.229, 0.224, 0.225] # expected by pretrained resnet18
# load the data and add a column for occurrences
df = pandas.read_csv('./data/train.csv')
grouped = df.groupby('Id')
df['occurrences'] = grouped.Id.transform('count')
# define transformations with data augmentation.
# These are the same transformations that were used for the toy_model
transforms_augm = torchvision.transforms.Compose([
torchvision.transforms.RandomRotation(degrees=20.0), # Data augmentation
torchvision.transforms.RandomGrayscale(), # Data augmentation
# torchvision.transforms.Resize((224,int(224.0*16.0/9.0))),
torchvision.transforms.Resize((224,224)),
torchvision.transforms.ToTensor(), # Expected by pretrained neural network
torchvision.transforms.Normalize(MEAN, STD) # Expected by pretrained neural network
])
# Load the full dataset. Using a random subset as the validation set is difficult because
# many of the categories appear only a single time. I could try to use cross-validation instead,
# but for the moment I am just going to ignore the validation set.
full_data = WhaleDataset(df,'./data/',transform=transforms_augm)
full_dataloader = {'train': torch.utils.data.DataLoader(full_data,\
batch_size=256,\
num_workers=4,\
shuffle=True,\
sampler=None)}
# load the full_model
full_model = torch.load('full_model.pt')
# define the device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
full_model.to(device);
# Choose the loss function (criterion) and optimizer.
# I make the same choice as for the toy_model.
criterion = torch.nn.CrossEntropyLoss()
# Note I am only passing the parameters from the final layer to the optimizer.
# Chances are that only the final layer is optimized, but there is some extra
# overhead because I did not declare that the other layers don't need gradients.
full_optimizer = torch.optim.Adam(full_model.fc.parameters(),lr=0.001)
# train the full model
full_model, loss_vals, acc_vals = train_with_restart(
full_model,full_dataloader,criterion,full_optimizer,device,\
use_val=False,num_epochs=30,T_max=15)
torch.save(full_model,'full_model_all_layers_retrained_gpu.pt')
plt.figure()
plt.plot(range(1,31),loss_vals['train'],'-k')
plt.xlabel('Epoch')
plt.ylabel('Training Loss')
# Let's reload the full model and train only the last layer
full_model_fc = torch.load('full_model.pt')
# freeze all layers except the last layer
for param in full_model_fc.parameters():
param.requires_grad = False
for param in full_model_fc.fc.parameters():
param.requires_grad = True
# define the device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
full_model_fc.to(device);
# Choose the loss function (criterion) and optimizer.
# I make the same choice as for the toy_model.
criterion = torch.nn.CrossEntropyLoss()
fc_optimizer = torch.optim.Adam(full_model_fc.fc.parameters(),lr=0.001)
# train the full model
full_model_fc, loss_vals_2, acc_vals_2 = train_with_restart(
full_model_fc,full_dataloader,criterion,fc_optimizer,device,\
use_val=False,num_epochs=30,T_max=15)
torch.save(full_model,'full_model_gpu.pt')
plt.figure()
plt.plot(range(1,31),loss_vals['train'],'-k')
plt.xlabel('Epoch')
plt.ylabel('Training Loss')
```
| github_jupyter |
```
clean_up=True # removes gams-related files in work-folder if true
%run StdPackages.ipynb
os.chdir(py['main'])
import global_settings,ReadData,ShockFunction,Production,Household,GE,Invest,Trade,Government,diagnostics
from DataBase_wheels import small_updates
from gmspython import gmspython_i
os.chdir(curr)
data_folder = os.getcwd()+'\\Data\\IO'
gams_folder = os.getcwd()+'\\gamsmodels\\GE'
```
# Set up DCGE model from saved model components
*The current general equilibrium model is a small open economy that features exogenous long run interest-, inflation-, and growth rates. These settings are defined in the global settings:*
In *Example1.ipynb* the different modules of the model was collected and calibrated to a partial equilibrium scenario. Here, we draw on these modules to set up an integrated model. Note that this allows us to flexibly add/remove certain modules, and re-run the model. This can be relevant in terms of investigating policy scenarios, but also for debugging purposes and to help with numerical complications.
The setup for the model is outlined in *Example1.ipynb*. Here we focus on loading the modules and running the integrated model (in general equilibrium). One difference is the general equilibrium module; the conditions, exogenous/endogenous variables needed here draws on the other modules.
## **1: Load modules**
Load modules:
```
modules = {'p': Production.pr_dynamic(pickle_path=gams_folder+'\\gmspython_p'),
'HH': Household.hh_dynamic(pickle_path=gams_folder+'\\gmspython_HH'),
'inv': Invest.inv_dynamic(pickle_path=gams_folder+'\\gmspython_inv'),
'itory': Invest.itoryD(pickle_path=gams_folder+'\\gmspython_itory'),
'trade': Trade.trade_dynamic(pickle_path=gams_folder+'\\gmspython_trade'),
'G': Government.g_dynamic(pickle_path=gams_folder+'\\gmspython_G')}
```
Load data:
```
GE_data = DataBase.GPM_database(pickle_path=gams_folder+'\\GE_data')
```
## **2: Initialize integrated model**
Initialize *gmspython_i* model and add modules:
```
gm_i = gmspython_i(work_folder=work_folder,database_kw = {'name': 'db_ex1'},**{'name':'ex1'})
[gm_i.add_module(m) for n,m in modules.items()];
```
The integrated model *gm_i* adopts the namespaces from other modules, merges the databases into one, and keeps the modules in the *modules* attribute:
```
gm_i.modules
```
Finally, we define the equilibrium module from the other modules. The GE version 'v1' is suited for the case with demand/supply being defined over sectors and goods:
```
gm = GE.GE_v1(work_folder=work_folder,**{'data_folder': gams_folder,'name':'GE_module'})
```
We initialize the module from the integrated model. The module attemps to figure out which quantities/prices needs to be endogenized in order to make the model square. There are some cases, however, we have to adjust manually. One example is if the supply of a good is exogenously given (as labor is in our simple household model). To adjust for this we add the options that the subset 'qS_endo' (which supply quantities to be endogenized) should not be included. Specifically, we ask not to include the subset 'exo' in the household module:
```
gm_i.get('exo',module='HH')
```
We initialize the module using these settings, and end with applying the 'write' method (writes the relevant gams code for the module, but does not run anything, as opposed to the write_and_run method) as well as adding this module to the integrated model:
```
ctree_kwargs = {'qS_endo': {'not': gm_i.g('exo',module='HH')}}
gm.init_from_model_i(gm_i,ctree_kwargs=ctree_kwargs)
gm.write()
gm.setstate('DC')
gm_i.add_module(gm)
```
## **3: Run and calibrate**
*Compute the value of the disequilibrium:*
```
def s(db):
return db['qS'].rctree_pd({'and': [db['d_qS'],db['n_equi'],db['txE']]}).groupby(['t','n']).sum()
def d(db):
return db['qD'].rctree_pd({'and': [db['d_qD'],db['n_equi'],db['txE']]}).groupby(['t','n']).sum()
def diseq(db):
return (s(db)-d(db)).dropna().unstack()
```
Now that the integrated model *gm_i* has all the relevant modules, we can start by running the baseline model, after merging the 'settings'. Note that we add the option *write=False*, as the integrated model does not need to write any of the gams code: All of it has been written and added via the separate modules:
```
gm_i.merge_settings()
# gm_i.write_and_run(write=False)
```
The result is defined in the model instances (w. default name = 'baseline' if nothing else is supplied):
```
# gm_i.model_instances['baseline'].__dict__
```
We can now calibrate the model by updating the state of the model to 'DC', reset settings,
```
gm_i.setstate('DC')
```
*Time-specific moments:*
```
GE_t = DataBase.GPM_database()
for var in GE_data.variables_flat:
GE_t[var] = DataBase_wheels.repeat_variable_windex(GE_data.get(var),gm_i.get('t0'))
```
*Keep exogenous part:*
```
GE_t = gm_i.slice_exo(GE_t,copy=False)
```
*Calibrate sneakily:*
```
gm_i.initialize_variables()
gm_i.setstate('DC')
kwargs_write ={'end': DB2Gams.run_text(g_exo=gm_i.exo_groups.keys(),g_endo=gm_i.endo_groups.keys(),blocks=gm_i.model.settings.get_conf('blocks'),name=gm_i.model.settings.get_conf('name'))}
gm_i.setstate('B')
gm_i.write_and_run(name='dc',kwargs_write=kwargs_write,write=False,add_checkpoint='dc')
shock_db,kwargs_shock = ShockFunction.sneaky_db(gm_i.model_instances['dc'].out_db,GE_t)
gm_i.model_instances['dc'].solve_sneakily(from_cp=True,cp_init=gm_i.checkpoints['dc'],shock_db=shock_db,kwargs_shock=kwargs_shock,model_name=gm_i.model.settings.conf['DC']['name'])
```
| github_jupyter |
# Operation on Qubits
## basic 1qubit operations
Here we start from the basic learing about quantum computing. If you haven't install blueqat SDK please install first.
```
!pip install blueqat
```
## Step1: Prepare basic circuit
To calculate on the quantum computer, we just make a circuit.
Let's import main component of blueqat and initialize the circuit first.
```
from blueqat import Circuit
#initialization of circuit
Circuit()
```
## Step2: Prepare quantum logic gate
Next we put some quantum logic gate on the circuit
Let's put quantum logic gate connecting with chain method after the initialized circuit. The .x[0] shows we apply Xgate on the 0th qubit.
```
Circuit().x[0]
```
## Step3: Measurement and run
To get the result of this circuit, we have to measure the result of the circuit. Let's put the measurement gate as .m[:] (: shows we apply measurement gate to all of qubits). And .run() with the number of shots we can get the final result.
```
Circuit().x[0].m[:].run(shots=1)
```
Now we get the result of '1' once. This is the result. Usually the circuit start from 0 and applying Xgate, this gate flip the bit of qubits 0 <-> 1, so you get 1 finally.
## Advanced: State vector
For more advanced use of the circuit, we can get the state amplitude (probability distribution) from the state vector. We can get the state vector directly from the simulator with just .run() the circuit without measurement.
```
Circuit().h[0].run()
```
This is the state vector.
## basic 2qubits operations
Here we learn about basic 2qubits operation, by using 2qubits operation we can make much more complicated calculation on quantum logic circuit.
## Step1: Initialize, create circuit and run
The basic preparation is the same as the 1qubit operation. Let's put CX gate this time.
```
Circuit().cx[0,1].m[:].run(shots=1)
```
Now we get the result of '00' once. 00 means that 2qubits are both 0.
## About CX gate
CX quantum logic gate is so called CNOT gate. This time we have 2qubits to use.
One is called "controlled gate" and another is "target gate". Only when the controlled gate is '1' the target gate flips. If the controlled gate is '0' nothing happens.
This time we have both qubits initialized to 0 so nothing happen after the CX gate applied.
## CX gate again
Let's see if the controlled gate is 1. Applying .x[0] gate to the 0th qubits and after we apply cx gate to 0th and 1st qubit.
```
Circuit().x[0].cx[0,1].m[:].run(shots=1)
```
Now we have result of '11'. First the circuit flips the 0th qubit with .x[0] gate and then the .cx[0,1] gate flips 1st qubit 0 to 1 because the controlled gate on 0th qubit is 1. Finally we get both qubit as 1.
## Advanced topic: initialized circuit with number of qubits
Usually the circuit automatically specify the number of qubits used in the circuit, we can specify the number of qubit first and then make the circuit.
```
Circuit(3).x[0].cx[0,1].m[:].run(shots=1)
```
Just put the number of qubits in the Cirucit(). This time 3qubits prepare and just two of them used.
| github_jupyter |
```
!pip install git+https://github.com/desi-bgs/bgs-cmxsv.git --upgrade --user
import numpy as np
import astropy.table as atable
import matplotlib.pyplot as plt
from bgs_sv import sv1
```
# read single exposures from Blanc reduction
Mike Wilson is currently running redrock outputs for single exposures
```
exps = sv1.blanc_nexp1_exposures()
# lets only keep the nightly exposures where we have a corresponding
# deep exposure that we can use as the truth table
deep_exp = sv1.blanc_deep_exposures()
keep = np.zeros(len(exps)).astype(bool)
for i in range(len(exps)):
if exps[i]['TILEID'] in deep_exp['TILEID']:
keep[i] = True
print('%i exposures with corresponding deep exposures' % np.sum(keep))
exps[keep]
```
# read redrock output for nightly combined exposures
```
zbest_deep = sv1.get_zbest(80614, 'deep', targetclass='brightfaint')
zbest_exp = sv1.get_zbest(80614, 20201218, expid=68685, targetclass='brightfaint')
zbest_deep[:5]
zbest_exp[:5]
```
# caclulate $z$ success rate using the deep exposures as the truth table
```
def zsuccess_redrock_criteria(zbest):
''' redshift success criteria based only on redrock output
'''
crit_zwarn = (zbest['ZWARN'] == 0)
crit_dchi2 = (zbest['DELTACHI2'] > 40.)
crit_stype = (zbest['SPECTYPE'] != "STAR") # only galaxy spectra
crit_z_lim = (zbest['Z'] > 0.0) & (zbest['Z'] < 0.6) # rough BGS redshift limit
crit_z_err = (zbest['ZERR'] < (0.0005 * (1. + zbest['Z'])))
# combine all criteria
zsuccess = crit_zwarn & crit_dchi2 & crit_stype & crit_z_lim & crit_z_err
return zsuccess
def zsuccess_truth_criteria(zbest, z_true, silent=True):
''' redshift success criteria based on redrock output *and* comparison
to "true" redshift
'''
crit_zwarn = (zbest['ZWARN'] == 0)
crit_dchi2 = (zbest['DELTACHI2'] > 40.)
crit_stype = (zbest['SPECTYPE'] != "STAR") # only galaxy spectra
crit_z_lim = (zbest['Z'] > 0.0) & (zbest['Z'] < 0.6) # rough BGS redshift limit
crit_z_err = (zbest['ZERR'] < (0.0005 * (1. + zbest['Z'])))
dz_1pz = np.abs(z_true - zbest['Z']) / (1. + z_true)
crit_ztrue = (dz_1pz < 0.003)
# combine all criteria
zsuccess = crit_zwarn & crit_dchi2 & crit_stype & crit_z_lim & crit_z_err & crit_ztrue
if not silent:
print('%i of %i pass the redshift success criteria' % (np.sum(zsuccess), len(zsuccess)))
print('%.2f redshift success rate' % (np.sum(zsuccess)/len(zsuccess)))
return zsuccess
def zsuccess_rate(prop, zsuccess_cond, range=None, nbins=20, bin_min=2):
''' measure the redshift success rate along with property `prop`
:params prop:
array of properties (i.e. Legacy r-band magnitude)
:params zsuccess_cond:
boolean array indicating redshift success
:params range: (default: None)
range of the `prop`
:params nbins: (default: 20)
number of bins to divide `prop` by
:params bin_min: (default: 2)
minimum number of objects in bin to exlcude it
:return wmean:
weighted mean of `prop` in the bins
:return e1:
redshift success rate in the bins
:return ee1:
simple poisson error on the success rate
'''
h0, bins = np.histogram(prop, bins=nbins, range=range)
hv, _ = np.histogram(prop, bins=bins, weights=prop)
h1, _ = np.histogram(prop[zsuccess_cond], bins=bins)
good = h0 > bin_min
hv = hv[good]
h0 = h0[good]
h1 = h1[good]
wmean = hv / h0 # weighted mean
rate = h1.astype("float") / (h0.astype('float') + (h0==0))
e_rate = np.sqrt(rate * (1 - rate)) / np.sqrt(h0.astype('float') + (h0 == 0))
return wmean, rate, e_rate
# only use targets where we have accurate redshifts in the deep exposure
_zbest_deep = sv1.get_zbest(80614, 'deep', targetclass='brightfaint')
zs_deep = zsuccess_redrock_criteria(_zbest_deep)
zbest_deep = _zbest_deep[zs_deep]['TARGETID', 'Z']
zbest_deep.rename_column('Z', 'Z_TRUE') # deep exposure redshift is the "true redshift"
_zbest_exp = sv1.get_zbest(80614, 20201218, expid=68685, targetclass='brightfaint')
zbest_exp = atable.join(zbest_deep, _zbest_exp, keys='TARGETID', join_type='left')
# calculate redshift successs rate
zs_exp = zsuccess_truth_criteria(zbest_exp, zbest_exp['Z_TRUE'])
r_mag = 22.5 - 2.5 * np.log10(zbest_exp['FLUX_R'])
r_mid, zs, zs_err = zsuccess_rate(r_mag, zs_exp)
fig = plt.figure(figsize=(8,6))
sub = fig.add_subplot(111)
sub.errorbar(r_mid, zs, yerr=zs_err, fmt='.C0')
sub.plot(r_mid, zs)
sub.set_xlabel('$r$ mag', fontsize=25)
sub.set_ylabel('$z$ success rate', fontsize=25)
```
# redshift success for the single exposures for all tiles with deep exposure
```
#for tileid in [80612, 80613, 80614, 80616, 80617, 80618, 80619]:
zss_tiles, iexps_tiles = [], []
for tileid in deep_exp['TILEID']:
# only use targets where we have accurate redshifts in the deep exposure
try:
_zbest_deep = sv1.get_zbest(tileid, 'deep', targetclass='brightfaint');
except:
continue
zs_deep = zsuccess_redrock_criteria(_zbest_deep)
zbest_deep = _zbest_deep[zs_deep]['TARGETID', 'Z']
zbest_deep.rename_column('Z', 'Z_TRUE') # deep exposure redshift is the "true redshift"
if len(zbest_deep) < 200: continue
zss, iexps = [], []
nights = np.unique(exps['NIGHT'][exps['TILEID'] == tileid])
for i, night in enumerate(nights):
expids = np.unique(exps['EXPID'][(exps['TILEID'] == tileid) & (exps['NIGHT'] == night)])
for ii, expid in enumerate(expids):
# only look at TRANSP > 0.9 with GFA SKY MAG < 20.075
iexp = np.where(exps['EXPID'] == expid)[0]
if exps['GFA_TRANSPARENCY_MED'][iexp] <= 0.9:# or exps['GFA_SKY_MAG_AB_MED'][isexp] > 20.075:
continue
try:
_zbest_exp = sv1.get_zbest(tileid, night, expid=expid, targetclass='brightfaint');
except:
continue
zbest_exp = atable.join(zbest_deep, _zbest_exp, keys='TARGETID', join_type='inner')
# calculate redshift successs rate
zs_exp = zsuccess_truth_criteria(zbest_exp, zbest_exp['Z_TRUE'])
r_mag = 22.5 - 2.5 * np.log10(zbest_exp['FLUX_R'])
zss.append(zsuccess_rate(r_mag, zs_exp, range=[17.5, 20.5]))
iexps.append(iexp)
if len(zss) > 0:
zss_tiles.append(zss)
iexps_tiles.append(iexps)
fig = plt.figure(figsize=(30,15))
for i, zss, iexps in zip(range(len(iexps_tiles)), zss_tiles, iexps_tiles):
sub = fig.add_subplot(2,4,i+1)
nexp = 0
sub.plot([17.5, 20.5], [1., 1.], c='k', ls='--')
sub.vlines(19.5, 0.6, 1.0, color='k', linestyle=':')
for zs, iexp in zip(zss, iexps):
r_mid, zs, zs_err = zs
sub.errorbar(r_mid, zs, yerr=zs_err, fmt='.C%i' % nexp)
sub.plot(r_mid, zs, c='C%i' % nexp,
label=r'TEXP=%.fs, SKY MAG=%.2f' % #\nMOON ILL=%.1f,SEP=%.f,ALT=%.f,AIRMASS=%.1f' %
(exps['EXPTIME'][iexp],
exps['GFA_SKY_MAG_AB_MED'][iexp]))#, exps['GFA_MOON_ILLUMINATION_MED'][iexp], exps['GFA_MOON_SEP_DEG_MED'][iexp], 90.-exps['GFA_MOON_ZD_DEG_MED'][iexp], exps['GFA_AIRMASS_MED'][iexp]))
nexp += 1
sub.legend(loc='lower left', fontsize=15)
sub.text(0.95, 0.95, str(exps['TILEID'][iexp][0]), transform=sub.transAxes, ha='right', va='top', fontsize=25)
sub.set_xlim(17.5, 20.5)
sub.set_ylim(0.6, 1.1)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel('$r$ mag', fontsize=30)
bkgd.set_ylabel('$z$ success rate', fontsize=30)
bkgd.set_title('single exposure $z$ success rates (deep exp. truth) $\Delta \chi^2 > 40$', fontsize=40)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
```
| github_jupyter |
```
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import pandas as pd
import random
import checklist
from checklist.editor import Editor
from checklist.expect import Expect
from checklist.pred_wrapper import PredictorWrapper
from checklist.test_types import MFT
from typing import List
import warnings
warnings.filterwarnings('ignore')
# Initialize random seed
# Remove this code to experiment with random samples
random.seed(123)
torch.manual_seed(456)
```
# MFTs: Introduction
In this notebook, we will create Minimum Functionality Tests (MFTs) for a generative language model. MFTs test one specific function of a language model. They are analogous to unit tests in traditional software engineering.
## Setup generative model
Before we can test anything, we need to set up our language model. We will use the HuggingFace transformers library to load a GPT2 model.
First, we create a tokenizer. The tokenizer is responsible for splitting strings into individual words, then converting those words into vectors of numbers that our model can understand.
```
# Load pretrained model tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# Demonstrate what the tokenizer does
tokenizer.encode("Wherefore art thou Romeo?")
```
Our tokenizer has turned the human-readable text into a list of numbers that the model understands. Next, let's load the GPT2 model.
```
# Load pretrained model (weights)
model = GPT2LMHeadModel.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id)
device = 'cuda'
model.eval()
model.to(device)
"Model loaded"
```
Generating text with the model requires a bit of work. Let's write a function `generate_sentences` to handle the text generation.
`generate_sentences` has 1 parameter, `prompts`, which is a list of strings. A prompt is a string that the model will use as a starting point for generating new text. It gives the model context about what kind of text should be generated.
`generate_sentences` will output a list of generated text responses for each prompt.
```
def generate_sentences(prompts: List[str]) -> List[str]:
sentences = []
for prompt in prompts:
token_tensor = tokenizer.encode(prompt, return_tensors='pt').to(device) # return_tensors = "pt" returns a PyTorch tensor
out = model.generate(
token_tensor,
do_sample=True,
min_length=10,
max_length=50,
num_beams=1,
temperature=1.0,
no_repeat_ngram_size=2,
early_stopping=False,
output_scores=True,
return_dict_in_generate=True)
text = tokenizer.decode(out.sequences[0], skip_special_tokens=True)
sentences.append(text[len(prompt):])
return sentences
generate_sentences(["Wherefore art thou Romeo?"])
```
Now that everything is ready, we can write our first MFT.
## MFT - Language prompt
For this MFT, we will expect the model to create a reasonable continuation of a prompt. The model will be prompted with strings like "The most commonly spoken language in {country} is " where {country} is a placeholder for a country such as Spain.
We need create a rule to determine if the model passes our test. The criteria for passing or failing the test is entirely user defined. We will consider this MFT to pass if the model's output contains any language name. This will demonstrate that the model understands the general context of the prompt. The mentioned language doesn't have to be accurate - for example, "In Spain the most commonly spoken language is Indonesian" would pass our test, because Indonesian is a language. The language may also be located anywhere in the output - for example, "In Spain the most commonly spoken language is not easy to learn. Spanish has many complicated conjugations." would also pass our test.
In a later section of this notebook, there is another version of this MFT that is stricter, requiring the correct language to be mentioned in the response.
### Handwritten MFT
First, we will write the MFT by hand. Then, we'll use Checklist's MFT class to demonstrate how Checklist helps us create the MFT much more quickly.
#### Generate prompts from template
We will use Checklist's Editor class to quickly create the prompts. For a detailed explanation of generating data, see the "1. Generating data" tutorial notebook.
```
editor = Editor()
# Note: remove the country parameter to generate prompts with random countries
prompt_strs = editor.template("The most commonly spoken language in {country} is", country = ["United States", "France", "Guatemala", "Mongolia", "Japan"])
prompt_strs.data
```
#### Language CSV
We need a list of languages to check if the model's output contains a language. To save some time, we will read language names from a CSV file. The data comes from standard ISO Language Codes https://datahub.io/core/language-codes
```
import urllib.request
urllib.request.urlretrieve('https://datahub.io/core/language-codes/r/language-codes.csv', 'language-codes.csv')
lang_codes_csv = pd.read_csv('language-codes.csv')
lang_codes_csv
```
#### Run the MFT
Now we're ready to create the MFT. We will create 3 Pandas dataframes, one each for prompts, responses, and results. Then, we will loop over the prompts, send each prompt to the model, and determine if it passes or fails the test. Each prompt and its test result will be recorded in the dataframes.
```
prompts = pd.DataFrame({"id": [], "prompt": []})
responses = pd.DataFrame({"id": [], "response": []})
results = pd.DataFrame({"id": [], "p/f": []})
langs = lang_codes_csv["English"].tolist()
model_responses = generate_sentences(prompt_strs.data)
for (i, response) in enumerate(model_responses):
pf = 'fail'
# Check if any language from the CSV data is in the generated string
for l in langs:
if l in response:
pf = 'pass'
break
prompts = prompts.append({"id": i, "prompt": prompt_strs.data[i]}, ignore_index=True)
responses = responses.append({"id": i, "response": response}, ignore_index=True)
results = results.append({"id": i, "p/f": pf}, ignore_index=True)
```
#### Show test results
Now let's look at the results of our test.
```
pd.set_option("max_colwidth", 250)
prompts
responses
results
```
We can merge all the dataframes to make the results easier to read.
```
merged = pd.merge(responses, results, on="id")
merged = pd.merge(prompts, merged, on="id")
merged
```
Finally, let's display the failing tests.
```
merged.loc[merged['p/f'] == 'fail']
```
### Test with Checklist
Next, let's try running the MFT with Checklist. We will no longer need to keep track of results in Pandas dataframes, since Checklist will track the results for us.
#### Create the expectation function
In order to determine if an example passes or fails the test, Checklist uses an expectation function. An expectation function is a function that receives the example, then returns true if the example passes the test, or false if the example fails.
```
def response_contains_language(x, pred, conf, label=None, meta=None):
for l in langs:
if l in pred:
return True
return False
```
We will wrap this function with `Expect.single`, which causes the expectation function to be called for each example. In other cases, you might want to have an expectation function that checks multiple examples simulatneously. See the tutorial notebook "3. Test types, expectation functions, running tests" for detailed information about expectation functions.
```
contains_language_expect_fn = Expect.single(response_contains_language)
```
Now we can feed our prompts and expectation function into the MFT constructor.
```
test = MFT(**prompt_strs, name='Language in response', description='The response contains a language.', expect=contains_language_expect_fn)
```
In order to run the test, Checklist also needs a function that generates the model's predictions for the inputs. The function receives all inputs (prompts) as a list, and must return the results in a tuple `(model_predictions, confidences)`, where `model_predictions` is a list of all the predictions, and `confidences` is a list of the model's scores for those predictions.
We will not be using confidences in this test. Checklist provides a wrapper function `PredictorWrapper.wrap_predict()` that outputs a tuple with a confidence score of 1 for any prediction. We can use it to wrap `generate_sentences` so the predictions will have a confidence score as needed.
```
wrapped_generator = PredictorWrapper.wrap_predict(generate_sentences)
wrapped_generator(["The most commonly spoken language in Brazil is "])
```
Now we're ready to run the test. The first argument to the `test.run()` function is the generator function we just created. We will also set the optional parameter `overwrite=True` so the test can be re-run without an error. If overwrite=False, then Checklist will reject subsequent test runs to prevent us from accidentally overwriting your test results.
```
test.run(wrapped_generator, overwrite=True)
```
To see the results, we can use the `summary` function.
```
def format_example(x, pred, conf, label=None, meta=None):
return 'Prompt: %s\nCompletion: %s' % (x, pred)
test.summary(format_example_fn = format_example)
```
Test results can also be explored visually by using the `visual_summary` function.
```
test.visual_summary()
```
## MFT - Language prompt with accurate response
Let's make our test a little stricter to better understand the model's behavior. We will now require the model to respond with the correct language instead of any language in general. By using the `meta=True` argument for `editor.template()`, the country associated with the prompt will be will be stored in the `country_prompts` object.
```
country_prompts = editor.template("The most commonly spoken language in {country} is ", country = ["United States", "France", "Guatemala", "Mongolia", "Japan"], meta=True)
correct_responses = {
"United States": "English",
"France": "French",
"Guatemala": "Spanish",
"Mongolia": "Mongolian",
"Japan": "Japanese"
}
```
The country metadata can be accessed with `country_prompts.meta`.
```
country_prompts.meta
```
### Handwritten Test
```
prompts = pd.DataFrame({"id": [], "prompt": []})
responses = pd.DataFrame({"id": [], "response": []})
test_results = pd.DataFrame({"id": [], "p/f": []})
model_responses = generate_sentences(country_prompts.data)
for (i, response) in enumerate(model_responses):
pf = 'fail'
country = country_prompts.meta[i]["country"]
# Check if the correct language is in the response
language = correct_responses[country]
if language in response:
pf = 'pass'
prompts = prompts.append({"id": i, "prompt": country_prompts.data[i]}, ignore_index=True)
responses = responses.append({"id": i, "response": response}, ignore_index=True)
test_results = test_results.append({"id": i, "p/f": pf}, ignore_index=True)
```
#### Show test results
Let's look at our test results. The first dataframe contains the prompts given to the model.
```
prompts
```
The next dataframe shows the model's response to the prompt (not including the prompt itself)
```
responses
```
The final dataframe shows the pass/fail status of the test
```
test_results
```
### Testing with Checklist
Now let's run the test with Checklist. All we need is a new expectation function. The rest of the process is the same as before.
```
def response_contains_correct_language(x, pred, conf, label=None, meta=None):
country = meta['country']
language = correct_responses[country]
return language in pred
correct_language_expect_fn = Expect.single(response_contains_correct_language)
test = MFT(**country_prompts, name='Correct language in response', description='The response contains the correct language for the country in the prompt.', expect=correct_language_expect_fn)
test.run(wrapped_generator, overwrite=True)
test.summary(format_example_fn = format_example)
test.visual_summary()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/unicamp-dl/IA025_2022S1/blob/main/ex01/Alexander_Valle/t1_IA025_1s22_Alexande_Valle.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Esté um notebook Colab contendo exercícios de programação em python, numpy e pytorch.
## Coloque seu nome
```
print('Meu nome é: Rolan Alexander Valle Rey Sánchez , RA 230254')
```
# Parte 1:
##Exercícios de Processamento de Dados
Nesta parte pode-se usar as bibliotecas nativas do python como a `collections`, `re` e `random`. Também pode-se usar o NumPy.
##Exercício 1.1
Crie um dicionário com os `k` itens mais frequentes de uma lista.
Por exemplo, dada a lista de itens `L=['a', 'a', 'd', 'b', 'd', 'c', 'e', 'a', 'b', 'e', 'e', 'a']` e `k=2`, o resultado deve ser um dicionário cuja chave é o item e o valor é a sua frequência: {'a': 4, 'e': 3}
```
L = ['f', 'a', 'a', 'd', 'b', 'd', 'c', 'e', 'a', 'b', 'e', 'e', 'a', 'd']
le=list(set(L))# list the unique elemets of L
freqlist=[int(sum([1 for l in L if e==l])) for e in le]# get the frequency of elemets of le
#le,freqlist
data=[(e,freq) for e,freq in zip(le,freqlist) ]# generate al list of pairs
# sort by frequense (reverse)
data.sort(key=lambda data: data[1], reverse=True)
#data
k=2
dicionario=dict(p for p in data[0:k])
dicionario
def top_k(L, k):
""""code to get an dictionary with the top elements with higth frency
"L" is the list of elemnts to anlyis, "k" is the size of the ranking"""
le=list(set(L))# list the unique elemets of L
freqlist=[int(sum([1 for l in L if e==l])) for e in le]# get the frequency of elemets of le
data=[(e,freq) for e,freq in zip(le,freqlist) ]# generate al list of pairs
# sort by frequense (reverse)
data.sort(key=lambda data: data[1], reverse=True)
return dict(p for p in data[0:k])# retun the top k elements with higth frecency
```
Mostre que sua implementação está correta usando uma entrada com poucos itens:
```
L = ['f', 'a', 'a', 'd', 'b', 'd', 'c', 'e', 'a', 'b', 'e', 'e', 'a', 'd']
k = 3
resultado = top_k(L=L, k=k)
print(f'resultado: {resultado}')
```
Mostre que sua implementação é eficiente usando uma entrada com 10M de itens:
```
import random
L = random.choices('abcdefghijklmnopqrstuvwxyz', k=10_000_000)
k = 10000
%%timeit
resultado = top_k(L=L, k=k)
```
## Exercício 1.2
Em processamento de linguagem natural, é comum convertemos as palavras de um texto para uma lista de identificadores dessas palavras. Dado o dicionário `V` abaixo onde as chaves são palavras e os valores são seus respectivos identificadores, converta o texto `D` para uma lista de identificadores.
Palavras que não existem no dicionário deverão ser convertidas para o identificador do token `unknown`.
O código deve ser insensível a maiúsculas (case-insensitive).
Se atente que pontuações (vírgulas, ponto final, etc) também são consideradas palavras.
```
V = {'eu': 1, 'de': 2, 'gosto': 3, 'comer': 4, '.': 5, 'unknown': -1}
D = 'Eu gosto de comer pizza.'
lcD=D.lower()# conver the text in lowe case
lcD=lcD. replace(".", " .")# creates a space before
lw=lcD.split(' ')# break the text into a lisst of words
lw
lwords=[w for w,i in V.items()]#list of works in the dictionary
lwords.remove('unknown')
lwords
[V[w] if w in lwords else -1 for w in lw]
def tokens_to_ids(text, vocabulary):
# escreva o código aqui.
D,V=text, vocabulary
lcD=D.lower()# conver the text in lowe case
lcD=lcD. replace(".", " .")# creates a space before
lw=lcD.split(' ')# break the text into a lisst of words
lwords=[w for w,i in V.items()]#list of works in the dictionary
lwords.remove('unknown')
return [V[w] if w in lwords else -1 for w in lw]
```
Mostre que sua implementação esta correta com um exemplo pequeno:
---
```
V = {'eu': 1, 'de': 2, 'gosto': 3, 'comer': 4, '.': 5, 'unknown': -1}
D = 'Eu gosto de comer pizza.'
print(tokens_to_ids(D, V))
```
Mostre que sua implementação é eficiente com um exemplo grande:
```
V = {'eu': 1, 'de': 2, 'gosto': 3, 'comer': 4, '.': 5, 'unknown': -1}
D = ' '.join(1_000_000 * ['Eu gosto de comer pizza.'])
%%timeit
resultado = tokens_to_ids(D, V)
```
## Exercício 1.3
Em aprendizado profundo é comum termos que lidar com arquivos muito grandes.
Dado um arquivo de texto onde cada item é separado por `\n`, escreva um programa que amostre `k` itens desse arquivo aleatoriamente.
Nota 1: Assuma amostragem de uma distribuição uniforme, ou seja, todos os itens tem a mesma probablidade de amostragem.
Nota 2: Assuma que o arquivo não cabe em memória.
Nota 3: Utilize apenas bibliotecas nativas do python.
```
import numpy as np
data = """a,b,c
d,e,f
g,h,i
j,k,l
d,e,f
g,h,i
j,k,l
d,e,f
g,h,i
j,k,l
d,e,f
g,h,i
j,k,l"""
data
data.splitlines()
data
li=data.split('\n')# list of items
li
ld=len(li)
ld
k=5
# np.random.uniform(1, ld+1, size=k)//1
positionitem=np.random.randint(ld, size=100)
positionitem
import matplotlib.pyplot as plt
plt.hist(positionitem, bins =1)
plt.show()
set(positionitem)
```
## Exercício 1.3
Em aprendizado profundo é comum termos que lidar com arquivos muito grandes.
Dado um arquivo de texto onde cada item é separado por `\n`, escreva um programa que amostre `k` itens desse arquivo aleatoriamente.
Nota 1: Assuma amostragem de uma distribuição uniforme, ou seja, todos os itens tem a mesma probablidade de amostragem.
Nota 2: Assuma que o arquivo não cabe em memória.
Nota 3: Utilize apenas bibliotecas nativas do python.
```
def sample(path: str, k: int):
# Escreva o seu código aqui.
dat=path
Li=dat.split('\n')# list of items
Ld=len(Li)
Pi=np.random.randint(Ld, size=k)#=Position of item
return [Li[i] for i in Pi]
```
Mostre que sua implementação está correta com um exemplo pequeno:
```
filename = 'small.txt'
total_size = 100
n_samples = 10
with open(filename, 'w') as fout:
fout.write('\n'.join(f'line {i}' for i in range(total_size)))
samples = sample(path=filename, k=n_samples)
print(samples)
print(len(samples) == n_samples)
```
Mostre que sua implementação é eficiente com um exemplo grande:
```
filename = 'large.txt'
total_size = 1_000_000
n_samples = 10000
with open(filename, 'w') as fout:
fout.write('\n'.join(f'line {i}' for i in range(total_size)))
%%timeit
samples = sample(path=filename, k=n_samples)
assert len(samples) == n_samples
```
# Parte 2:
##Exercícios de Numpy
Nesta parte deve-se usar apenas a biblioteca NumPy. Aqui não se pode usar o PyTorch.
##Exercício 2.1
Quantos operações de ponto flutuante (flops) de soma e de multiplicação tem a multiplicação matricial $AB$, sendo que a matriz $A$ tem tamanho $m \times n$ e a matriz $B$ tem tamanho $n \times p$?
Resposta:
- número de somas: $n*m*p$
- número de multiplicações: $n*m*p$
## Exercício 2.2
Em programação matricial, não se faz o loop em cada elemento da matriz,
mas sim, utiliza-se operações matriciais.
Dada a matriz `A` abaixo, calcule a média dos valores de cada linha sem utilizar laços explícitos.
Utilize apenas a biblioteca numpy.
```
import numpy as np
A = np.arange(24).reshape(4, 6)
print(A)
nrow=len(A)
ncol=len(A[0])
nrow,ncol
c=np.ones(ncol).reshape(ncol,1)
c
medias=np.matmul(A,c)/ncol
medias.T
# Escreva sua solução aqui.
list(medias.T[0])
```
## Exercício 2.3
Seja a matriz $C$ que é a normalização da matriz $A$:
$$ C(i,j) = \frac{A(i,j) - A_{min}}{A_{max} - A_{min}} $$
Normalizar a matriz `A` do exercício acima de forma que seus valores fiquem entre 0 e 1.
```
a=A.flatten()
amin=min(a)
amax=max(a)
amin,amax
C=(A-amin)/(amax-amin)
C
# Escreva sua solução aqui.
C
def nomarry(AA):
aa=AA.flatten()
return (AA-min(aa))/(max(aa)-min(aa))
```
## Exercício 2.4
Modificar o exercício anterior de forma que os valores de cada *coluna* da matriz `A` sejam normalizados entre 0 e 1 independentemente dos valores das outras colunas.
```
A
# Escreva sua solução aqui.
At=A.T
At
np.array([nomarry(col) for col in At]).T
```
## Exercício 2.5
Modificar o exercício anterior de forma que os valores de cada *linha* da matriz `A` sejam normalizados entre 0 e 1 independentemente dos valores das outras linhas.
```
np.min(A)
np.array([nomarry(row) for row in A])
# Escreva sua solução aqui.
```
## Exercício 2.6
A [função softmax](https://en.wikipedia.org/wiki/Softmax_function) é bastante usada em apredizado de máquina para converter uma lista de números para uma distribuição de probabilidade, isto é, os números ficarão normalizados entre zero e um e sua soma será igual à um.
Implemente a função softmax com suporte para batches, ou seja, o softmax deve ser aplicado a cada linha da matriz. Deve-se usar apenas a biblioteca numpy. Se atente que a exponenciação gera estouro de representação quando os números da entrada são muito grandes. Tente corrigir isto.
```
import numpy as np
def softmax(AA):
'''
Aplica a função de softmax à matriz `A`.
Entrada:
`A` é uma matriz M x N, onde M é o número de exemplos a serem processados
independentemente e N é o tamanho de cada exemplo.
Saída:
Uma matriz M x N, onde a soma de cada linha é igual a um.
'''
# Escreva sua solução aqui.
# we have to be becarfull when sum of a bach get to infinit if this is the case
# we have to change the expresion to the sofmax: 1/sum(np.exp(B-e)
# on the contrary we use the regular expresion of sofmax: np.exp(e)/sum(np.exp(B))
# where B is the row of matrix AA, and e is an element of B
return np.array([[1/sum(np.exp(B-e)) if np.isinf(sum(np.exp(B))) else np.exp(e)/sum(np.exp(B)) for e in B] for B in AA])
import numpy as np
def softmax(AA):
'''
Aplica a função de softmax à matriz `A`.
Entrada:
`A` é uma matriz M x N, onde M é o número de exemplos a serem processados
independentemente e N é o tamanho de cada exemplo.
Saída:
Uma matriz M x N, onde a soma de cada linha é igual a um.
'''
# Escreva sua solução aqui.
# we have to be becarfull when the sum of a bach get to infinit if this is the case
# we have to change the expresion to the sofmax: 1/sum(np.exp(B-e)
return np.array([[1/sum(np.exp(B-e)) for e in B] for B in AA])
```
Mostre que sua implementação está correta usando uma matriz pequena como entrada:
```
A = np.array([[0.5, -1, 1000],
[-2, 0, 0.5]])
softmax(A)
```
O código a seguir verifica se sua implementação do softmax está correta.
- A soma de cada linha de A deve ser 1;
- Os valores devem estar entre 0 e 1
```
np.allclose(softmax(A).sum(axis=1), 1) and softmax(A).min() >= 0 and softmax(A).max() <= 1
def softmax(AA):
'''
Aplica a função de softmax à matriz `A`.
Entrada:
`A` é uma matriz M x N, onde M é o número de exemplos a serem processados
independentemente e N é o tamanho de cada exemplo.
Saída:
Uma matriz M x N, onde a soma de cada linha é igual a um.
'''
# Escreva sua solução aqui.
# we have to be becarfull when the sum of a bach get to infinit if this is the case
# we have to limit that the program get to inf.
eA2=np.exp(AA-AA.max(axis=1).reshape(-1, 1))# line of code from Pedro Guilherme Siqueira Moreira (thanks)
return eA2/eA2.sum(axis=1).reshape(-1, 1)# line of code form Patrick de Carvalho Tavares Rezende Ferreira (thanks)
```
Mostre que sua implementação é eficiente usando uma matriz grande como entrada:
```
A = np.random.uniform(low=-10, high=10, size=(128, 100_000))
%%timeit
softmax(A)
SM = softmax(A)
np.allclose(SM.sum(axis=1), 1) and SM.min() >= 0 and SM.max() <= 1
```
## Exercício 2.7
A codificação one-hot é usada para codificar entradas categóricas. É uma codificação onde apenas um bit é 1 e os demais são zero, conforme a tabela a seguir.
| Decimal | Binary | One-hot
| ------- | ------ | -------
| 0 | 000 | 1 0 0 0 0 0 0 0
| 1 | 001 | 0 1 0 0 0 0 0 0
| 2 | 010 | 0 0 1 0 0 0 0 0
| 3 | 011 | 0 0 0 1 0 0 0 0
| 4 | 100 | 0 0 0 0 1 0 0 0
| 5 | 101 | 0 0 0 0 0 1 0 0
| 6 | 110 | 0 0 0 0 0 0 1 0
| 7 | 111 | 0 0 0 0 0 0 0 1
Implemente a função one_hot(y, n_classes) que codifique o vetor de inteiros y que possuem valores entre 0 e n_classes-1.
```
"""
def one_hots(y, n_classes):
# Escreva seu código aqui.
lc=list(range(N_CLASSES))#class list
sp= ''
OHL=[sp.join(['1' if c1==c2 else '0' for c1 in lc ]) for c2 in lc]
dec2oh=dict( (d,oh) for d,oh in zip(lc,OHL))# decimal to one hot
return [[i,dec2oh[i]] for i in y]
"""
def one_hot(y, n_classes):
# Escreva seu código aqui.
lc=list(range(N_CLASSES))#class list
OHL=[[1 if c1==c2 else 0 for c1 in lc ] for c2 in lc]
dec2oh=dict( (d,oh) for d,oh in zip(lc,OHL))# decimal to one hot
return [[i,dec2oh[i]] for i in y]
import numpy as np
N_CLASSES = 9
N_SAMPLES = 10
y = (np.random.rand((N_SAMPLES)) * N_CLASSES).astype(np.int)
print(y)
print(one_hot(y, N_CLASSES))
print(one_hot(y, N_CLASSES))
```
Mostre que sua implementação é eficiente usando uma matriz grande como entrada:
```
N_SAMPLES = 100_000
N_CLASSES = 1_000
y = (np.random.rand((N_SAMPLES)) * N_CLASSES).astype(np.int)
%%timeit
one_hot(y, N_CLASSES)
```
## Exercício 2.8
Implemente uma classe que normalize um array de pontos flutuantes `array_a` para a mesma média e desvio padrão de um outro array `array_b`, conforme exemplo abaixo:
```
array_a = np.array([-1, 1.5, 0])
array_b = np.array([1.4, 0.8, 0.3, 2.5])
normalize = Normalizer(array_b)
normalized_array = normalize(array_a)
print(normalized_array) # Deve imprimir [0.3187798 2.31425165 1.11696854]
```
Mostre que seu código está correto com o exemplo abaixo:
```
import numpy as np
class Normalizer:
def __init__(self, arrb):
self.meanb = np.mean(arrb)
self.sdtb=np.std(arrb)
def __call__(self,arr):
self.arr=arr
self.mean = np.mean(arr)
self.sdt=np.std(arr)
M=self.arr-self.mean
D=self.sdtb/self.sdt
return M*D+self.meanb
import numpy as np
array_a = np.array([-1, 1.5, 0])
array_b = np.array([1.4, 0.8, 0.3, 2.5])
normalize = Normalizer(array_b)
normalized_array = normalize(array_a)
print(normalized_array)
```
# Parte 3:
##Exercícios Pytorch: Grafo Computacional e Gradientes
Nesta parte pode-se usar quaisquer bibliotecas.
Um dos principais fundamentos para que o PyTorch seja adequado para deep learning é a sua habilidade de calcular o gradiente automaticamente a partir da expressões definidas. Essa facilidade é implementada através do cálculo automático do gradiente e construção dinâmica do grafo computacional.
## Grafo computacional
Seja um exemplo simples de uma função de perda J dada pela Soma dos Erros ao Quadrado (SEQ - Sum of Squared Errors):
$$ J = \sum_i (x_i w - y_i)^2 $$
que pode ser reescrita como:
$$ \hat{y_i} = x_i w $$
$$ e_i = \hat{y_i} - y_i $$
$$ e2_i = e_i^2 $$
$$ J = \sum_i e2_i $$
As redes neurais são treinadas através da minimização de uma função de perda usando o método do gradiente descendente. Para ajustar o parâmetro $w$ precisamos calcular o gradiente $ \frac{ \partial J}{\partial w} $. Usando a
regra da cadeia podemos escrever:
$$ \frac{ \partial J}{\partial w} = \frac{ \partial J}{\partial e2_i} \frac{ \partial e2_i}{\partial e_i} \frac{ \partial e_i}{\partial \hat{y_i} } \frac{ \partial \hat{y_i}}{\partial w}$$
```
y_pred = x * w
e = y_pred - y
e2 = e**2
J = e2.sum()
```
As quatro expressões acima, para o cálculo do J podem ser representadas pelo grafo computacional visualizado a seguir: os círculos são as variáveis (tensores), os quadrados são as operações, os números em preto são os cálculos durante a execução das quatro expressões para calcular o J (forward, predict). O cálculo do gradiente, mostrado em vermelho, é calculado pela regra da cadeia, de trás para frente (backward).
<img src="https://raw.githubusercontent.com/robertoalotufo/files/master/figures/GrafoComputacional.png" width="600pt"/>
Para entender melhor o funcionamento do grafo computacional com os tensores, recomenda-se leitura em:
https://pytorch.org/docs/stable/notes/autograd.html
```
import torch
torch.__version__
```
**Tensor com atributo .requires_grad=True**
Quando um tensor possui o atributo `requires_grad` como verdadeiro, qualquer expressão que utilizar esse tensor irá construir um grafo computacional para permitir posteriormente, após calcular a função a ser derivada, poder usar a regra da cadeia e calcular o gradiente da função em termos dos tensores que possuem o atributo `requires_grad`.
```
y = torch.arange(0, 8, 2).float()
y
x = torch.arange(0, 4).float()
x
w = torch.ones(1, requires_grad=True)
w
```
## Cálculo automático do gradiente da função perda J
Seja a expressão: $$ J = \sum_i ((x_i w) - y_i)^2 $$
Queremos calcular a derivada de $J$ em relação a $w$.
## Forward pass
Durante a execução da expressão, o grafo computacional é criado. Compare os valores de cada parcela calculada com os valores em preto da figura ilustrativa do grafo computacional.
```
# predict (forward)
y_pred = x * w; print('y_pred =', y_pred)
# cálculo da perda J: loss
e = y_pred - y; print('e =',e)
e2 = e.pow(2) ; print('e2 =', e2)
J = e2.sum() ; print('J =', J)
```
## Backward pass
O `backward()` varre o grafo computacional a partir da variável a ele associada (raiz) e calcula o gradiente para todos os tensores que possuem o atributo `requires_grad` como verdadeiro.
Observe que os tensores que tiverem o atributo `requires_grad` serão sempre folhas no grafo computacional.
O `backward()` destroi o grafo após sua execução. Esse comportamento é padrão no PyTorch.
A título ilustrativo, se quisermos depurar os gradientes dos nós que não são folhas no grafo computacional, precisamos primeiro invocar `retain_grad()` em cada um desses nós, como a seguir. Entretanto nos exemplos reais não há necessidade de verificar o gradiente desses nós.
```
e2.retain_grad()
e.retain_grad()
y_pred.retain_grad()
```
E agora calculamos os gradientes com o `backward()`.
w.grad é o gradiente de J em relação a w.
```
if w.grad: w.grad.zero_()
J.backward()
print(w.grad)
```
Mostramos agora os gradientes que estão grafados em vermelho no grafo computacional:
```
print(e2.grad)
print(e.grad)
print(y_pred.grad)
```
##Exercício 3.1
Calcule o mesmo gradiente ilustrado no exemplo anterior usando a regra das diferenças finitas, de acordo com a equação a seguir, utilizando um valor de $\Delta w$ bem pequeno.
$$ \frac{\partial J}{\partial w} = \frac{J(w + \Delta w) - J(w - \Delta w)}{2 \Delta w} $$
```
def J_func(w,x,y):
y_pred=x*w
e=y_pred-y
e2=e**2
J=e2.sum()
return J
def Gradj(w,x,y):
dJdw=2*x*(x*w-y)
return dJdw.sum()
# Calcule o gradiente usando a regra diferenças finitas
# Confira com o valor já calculado anteriormente
x = torch.arange(0, 4).float()
y = torch.arange(0, 8, 2).float()
w = torch.ones(1)
grad = Gradj(w,x,y)#?
print('grad=', grad)
```
##Exercício 3.2
Minimizando $J$ pelo gradiente descendente
$$ w_{k+1} = w_k - \lambda \frac {\partial J}{\partial w} $$
Supondo que valor inicial ($k=0$) $w_0 = 1$, use learning rate $\lambda = 0.01$ para calcular o valor do novo $w_{20}$, ou seja, fazendo 20 atualizações de gradientes. Deve-se usar a função `J_func` criada no exercício anterior.
Confira se o valor do primeiro gradiente está de acordo com os valores já calculado acima
```
learning_rate =lr= 0.01
iteracoes = 20
x = torch.arange(0, 4).float()
y = torch.arange(0, 8, 2).float()
w = torch.ones(1)
lossJ=[]
for i in range(iteracoes):
print('i =', i)
J = J_func(w, x, y)
print('J=', J)
lossJ.append(J.detach().numpy())# convertin into a numpy array
grad = Gradj(w,x,y)#?
print('grad =',grad)
w =w-lr*grad# ?
print('i:',i,' w =', w)
iteracoes
# Plote o gráfico da loss J pela iteração i
def plotlossji(it,lj):
"""
it=iteracoes
lj=lossJ
"""
x=1+np.arange(it)
y=np.array(lossJ)
plt.figure(figsize=(7,5))
plt.plot(x,y, 's-', color='blue', linewidth =2, markersize=5, label='Loss J')
plt.xticks(np.arange(0, 22, step=1))
plt.grid()
plt.legend()
plt.ylabel('loss (J)')
plt.xlabel('iteração(i)')
plt.title('Gráfico da loss J pela iteração i')
plt.show()
plotlossji(iteracoes,lossJ)
```
##Exercício 3.3
Repita o exercício 2 mas usando agora o calculando o gradiente usando o método backward() do pytorch. Confira se o primeiro valor do gradiente está de acordo com os valores anteriores. Execute essa próxima célula duas vezes. Os valores devem ser iguais.
```
def GradjT(w,x,y):
a=x*w
b=a-y
c=torch.sum(b)
c.backward()
return c#.grad#_fn
import torch
N,D=3,4
x= torch.rand((N,D),requires_grad=True)
y= torch.rand((N,D),requires_grad=True)
z= torch.rand((N,D),requires_grad=True)
a=x*w
b=(a-y)**2
c=torch.sum(b)
c.backward()
learning_rate = 0.01
lr=learning_rate
iteracoes = 20
x = torch.arange(0, 4).float()
y = torch.arange(0, 8, 2).float()
w = torch.ones(1, requires_grad=True)
lossJ=[]
"""
we use the auxiliars "a", "b" and c" to make an computational graph
to calculate the loss J in "c"
"""
for i in range(iteracoes):
print('i =', i)
J = J_func(w, x, y)
print('J=', J)
lossJ.append(J.detach().numpy())
w.retain_grad()# line of code from Patrick de Carvalho Tavares Rezende Ferreira (thanks)
a=x*w
b=(a-y)**2
c=torch.sum(b)
c.backward()# Cálculo automático do gradiente da função perda J
grad = w.grad#_fn#?
print('grad =',grad)
w = w-lr*grad
print('w =', w)
# Plote aqui a loss pela iteração
plotlossji(iteracoes,lossJ)
```
##Exercício 3.4
Quais são as restrições na escolha dos valores de $\Delta w$ no cálculo do gradiente por diferenças finitas?
* A small learning rate could achieve good performance, but a small learning rate could take too much time or fall into a local minimum.
* Although a big learning rate permits scanning a much larger part of the parameter space, the rate will make learning faster in the case of a big learning rate. But it will make de model fall into a suboptimal solution.
* However, recently it was shown that moderately large learning rates could achieve higher test accuracies by Samuel L. Smith https://openreview.net/pdf?id=rq_Qr0c1Hyo
Resposta:
##Exercício 3.5
Até agora trabalhamos com $w$ contendo apenas um parâmetro. Suponha agora que $w$ seja uma matriz com $N$ parâmetros e que o custo para executar $(x_i w - y_i)^2$ seja $O(N)$.
> a) Qual é o custo computacional para fazer uma única atualização (um passo de gradiente) dos parâmetros de $w$ usando o método das diferencas finitas?
>
> b) Qual é o custo computacional para fazer uma única atualização (um passo de gradiente) dos parâmetros de $w$ usando o método do backpropagation?
Resposta (justifique):
a) from $ \frac{\partial J}{\partial w} = \frac{J(w + \Delta w) - J(w - \Delta w)}{2 \Delta w} $ , we got $ \frac{\partial J}{\partial w}= 2\sum_i (x_i(x_i w - y_i)) =2*x(x*w-y).sum() $
the cost of $x_i(x_i w - y_i)$ is $O(N)$ ,since $w$ has $N$ parameters , the cost of execution is **$N.O(N) =O(N^2)$**
b) after made the partial derivaties we get 2*x(x*w-y).sum() so the cost is **$O(N^2)$**
##Exercício 3.6
Qual o custo (entropia cruzada) esperado para um exemplo (uma amostra) no começo do treinamento de um classificador inicializado aleatoriamente?
A equação da entropia cruzada é:
$$L = - \sum_{j=0}^{K-1} y_j \log p_j, $$
Onde:
- K é o número de classes;
- $y_j=1$ se $j$ é a classe do exemplo (ground-truth), 0 caso contrário. Ou seja, $y$ é um vetor one-hot;
- $p_j$ é a probabilidade predita pelo modelo para a classe $j$.
A resposta tem que ser em função de uma ou mais das seguintes variáveis:
- K = número de classes
- B = batch size
- D = dimensão de qualquer vetor do modelo
- LR = learning rate
Resposta: we have to compute the cross-entropy term $ y_j \log p_j$ for each class and for batch size so the cost for a multiple class classifications is $O(B*K)$
Fim do notebook.
```
```
| github_jupyter |
```
from attention import AttentionLayer
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
import warnings
pd.set_option("display.max_colwidth", 200)
warnings.filterwarnings("ignore")
data=pd.read_csv("Reviews.csv",nrows=100000)
data
data.drop_duplicates(subset=['Text'],inplace=True)#dropping duplicates
data.dropna(axis=0,inplace=True)#dropping na
data.info()
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not",
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam",
"mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
"wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
"we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have",
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
"you're": "you are", "you've": "you have"}
contraction_mapping
stop_words = set(stopwords.words('english'))
def text_cleaner(text,num):
newString = text.lower()
newString = BeautifulSoup(newString, "lxml").text
newString = re.sub(r'\([^)]*\)', '', newString)
newString = re.sub('"','', newString)
newString = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in newString.split(" ")])
newString = re.sub(r"'s\b","",newString)
newString = re.sub("[^a-zA-Z]", " ", newString)
newString = re.sub('[m]{2,}', 'mm', newString)
if(num==0):
tokens = [w for w in newString.split() if not w in stop_words]
else:
tokens=newString.split()
long_words=[]
for i in tokens:
if len(i)>1: #removing short word
long_words.append(i)
return (" ".join(long_words)).strip()
#call the function
cleaned_text = []
for t in data['Text']:
cleaned_text.append(text_cleaner(t,0))
cleaned_text[:5]
#call the function
cleaned_summary = []
for t in data['Summary']:
cleaned_summary.append(text_cleaner(t,1))
cleaned_summary[:10]
data['cleaned_text']=cleaned_text
data['cleaned_summary']=cleaned_summary
data.replace('', np.nan, inplace=True)
data.dropna(axis=0,inplace=True)
import matplotlib.pyplot as plt
text_word_count = []
summary_word_count = []
# populate the lists with sentence lengths
for i in data['cleaned_text']:
text_word_count.append(len(i.split()))
for i in data['cleaned_summary']:
summary_word_count.append(len(i.split()))
length_df = pd.DataFrame({'text':text_word_count, 'summary':summary_word_count})
length_df.hist(bins = 30)
plt.show()
cnt=0
for i in data['cleaned_summary']:
if(len(i.split())<=8):
cnt=cnt+1
print(cnt/len(data['cleaned_summary']))
max_text_len=30
max_summary_len=8
cleaned_text =np.array(data['cleaned_text'])
cleaned_summary=np.array(data['cleaned_summary'])
short_text=[]
short_summary=[]
for i in range(len(cleaned_text)):
if(len(cleaned_summary[i].split())<=max_summary_len and len(cleaned_text[i].split())<=max_text_len):
short_text.append(cleaned_text[i])
short_summary.append(cleaned_summary[i])
df=pd.DataFrame({'text':short_text,'summary':short_summary})
df['summary'] = df['summary'].apply(lambda x : 'sostok '+ x + ' eostok')
from sklearn.model_selection import train_test_split
x_tr,x_val,y_tr,y_val=train_test_split(np.array(df['text']),np.array(df['summary']),test_size=0.1,random_state=0,shuffle=True)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
#prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer()
x_tokenizer.fit_on_texts(list(x_tr))
thresh=4
cnt=0
tot_cnt=0
freq=0
tot_freq=0
for key,value in x_tokenizer.word_counts.items():
tot_cnt=tot_cnt+1
tot_freq=tot_freq+value
if(value<thresh):
cnt=cnt+1
freq=freq+value
print("% of rare words in vocabulary:",(cnt/tot_cnt)*100)
print("Total Coverage of rare words:",(freq/tot_freq)*100)
#prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer(num_words=tot_cnt-cnt)
x_tokenizer.fit_on_texts(list(x_tr))
#convert text sequences into integer sequences
x_tr_seq = x_tokenizer.texts_to_sequences(x_tr)
x_val_seq = x_tokenizer.texts_to_sequences(x_val)
#padding zero upto maximum length
x_tr = pad_sequences(x_tr_seq, maxlen=max_text_len, padding='post')
x_val = pad_sequences(x_val_seq, maxlen=max_text_len, padding='post')
#size of vocabulary ( +1 for padding token)
x_voc = x_tokenizer.num_words + 1
x_voc
#prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer()
y_tokenizer.fit_on_texts(list(y_tr))
thresh=6
cnt=0
tot_cnt=0
freq=0
tot_freq=0
for key,value in y_tokenizer.word_counts.items():
tot_cnt=tot_cnt+1
tot_freq=tot_freq+value
if(value<thresh):
cnt=cnt+1
freq=freq+value
print("% of rare words in vocabulary:",(cnt/tot_cnt)*100)
print("Total Coverage of rare words:",(freq/tot_freq)*100)
#prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer(num_words=tot_cnt-cnt)
y_tokenizer.fit_on_texts(list(y_tr))
#convert text sequences into integer sequences
y_tr_seq = y_tokenizer.texts_to_sequences(y_tr)
y_val_seq = y_tokenizer.texts_to_sequences(y_val)
#padding zero upto maximum length
y_tr = pad_sequences(y_tr_seq, maxlen=max_summary_len, padding='post')
y_val = pad_sequences(y_val_seq, maxlen=max_summary_len, padding='post')
#size of vocabulary
y_voc = y_tokenizer.num_words +1
y_tokenizer.word_counts['sostok'],len(y_tr)
ind=[]
for i in range(len(y_tr)):
cnt=0
for j in y_tr[i]:
if j!=0:
cnt=cnt+1
if(cnt==2):
ind.append(i)
y_tr=np.delete(y_tr,ind, axis=0)
x_tr=np.delete(x_tr,ind, axis=0)
ind=[]
for i in range(len(y_val)):
cnt=0
for j in y_val[i]:
if j!=0:
cnt=cnt+1
if(cnt==2):
ind.append(i)
y_val=np.delete(y_val,ind, axis=0)
x_val=np.delete(x_val,ind, axis=0)
from keras import backend as K
K.clear_session()
latent_dim = 300
embedding_dim=100
# Encoder
encoder_inputs = Input(shape=(max_text_len,))
#embedding layer
enc_emb = Embedding(x_voc, embedding_dim,trainable=True)(encoder_inputs)
#encoder lstm 1
encoder_lstm1 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4)
encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)
#encoder lstm 2
encoder_lstm2 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4)
encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)
#encoder lstm 3
encoder_lstm3=LSTM(latent_dim, return_state=True, return_sequences=True,dropout=0.4,recurrent_dropout=0.4)
encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2)
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
#embedding layer
dec_emb_layer = Embedding(y_voc, embedding_dim,trainable=True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True,dropout=0.4,recurrent_dropout=0.2)
decoder_outputs,decoder_fwd_state, decoder_back_state = decoder_lstm(dec_emb,initial_state=[state_h, state_c])
# Attention layer
attn_layer = AttentionLayer(name='attention_layer')
attn_out, attn_states = attn_layer([encoder_outputs, decoder_outputs])
# Concat attention input and decoder LSTM output
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_outputs, attn_out])
#dense layer
decoder_dense = TimeDistributed(Dense(y_voc, activation='softmax'))
decoder_outputs = decoder_dense(decoder_concat_input)
# Define the model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy')
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,patience=2)
history=model.fit([x_tr,y_tr[:,:-1]], y_tr.reshape(y_tr.shape[0],y_tr.shape[1], 1)[:,1:] ,epochs=50,callbacks=[es],batch_size=128, validation_data=([x_val,y_val[:,:-1]], y_val.reshape(y_val.shape[0],y_val.shape[1], 1)[:,1:]))
from matplotlib import pyplot
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
reverse_target_word_index=y_tokenizer.index_word
reverse_source_word_index=x_tokenizer.index_word
target_word_index=y_tokenizer.word_index
# Encode the input sequence to get the feature vector
encoder_model = Model(inputs=encoder_inputs,outputs=[encoder_outputs, state_h, state_c])
# Decoder setup
# Below tensors will hold the states of the previous time step
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_hidden_state_input = Input(shape=(max_text_len,latent_dim))
# Get the embeddings of the decoder sequence
dec_emb2= dec_emb_layer(decoder_inputs)
# To predict the next word in the sequence, set the initial states to the states from the previous time step
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c])
#attention inference
attn_out_inf, attn_states_inf = attn_layer([decoder_hidden_state_input, decoder_outputs2])
decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_outputs2, attn_out_inf])
# A dense softmax layer to generate prob dist. over the target vocabulary
decoder_outputs2 = decoder_dense(decoder_inf_concat)
# Final decoder model
decoder_model = Model(
[decoder_inputs] + [decoder_hidden_state_input,decoder_state_input_h, decoder_state_input_c],
[decoder_outputs2] + [state_h2, state_c2])
def decode_sequence(input_seq):
# Encode the input as state vectors.
e_out, e_h, e_c = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first word of target sequence with the start word.
target_seq[0, 0] = target_word_index['sostok']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c])
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = reverse_target_word_index[sampled_token_index]
if(sampled_token!='eostok'):
decoded_sentence += ' '+sampled_token
# Exit condition: either hit max length or find stop word.
if (sampled_token == 'eostok' or len(decoded_sentence.split()) >= (max_summary_len-1)):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update internal states
e_h, e_c = h, c
return decoded_sentence
def seq2summary(input_seq):
newString=''
for i in input_seq:
if((i!=0 and i!=target_word_index['sostok']) and i!=target_word_index['eostok']):
newString=newString+reverse_target_word_index[i]+' '
return newString
def seq2text(input_seq):
newString=''
for i in input_seq:
if(i!=0):
newString=newString+reverse_source_word_index[i]+' '
return newString
for i in range(0,100):
print("Review:",seq2text(x_tr[i]))
print("Original summary:",seq2summary(y_tr[i]))
print("Predicted summary:",decode_sequence(x_tr[i].reshape(1,max_text_len)))
print("\n")
```
| github_jupyter |
# Double-checking FiveThirtyEight's 2016 Primary Predictions
Here I look at the [predictions that FiveThiryEight made](https://projects.fivethirtyeight.com/election-2016/primary-forecast/) about the 2016 Presidential Primaries.
## Loading the data
Load the data about their predictions and the actual outcomes into `pandas` dataframes:
```
# Load the dataframes from disk
import pandas as pd
dem = pd.read_csv("./2016_dem_primary_dataframe.csv", index_col=[0,1])
gop = pd.read_csv("./2016_gop_primary_dataframe.csv", index_col=[0,1])
# We only care about races where there was a prediction made
dem = dem.dropna()
gop = gop.dropna()
```
## Looking at the data
Let's look at the results for Iowa for the Democrats, just to see what is in the table.
```
dem.ix[["Iowa"]]
```
The 80% confidence intervales are given by the "`80% Lower Bound`" and "`80% Upper Bound`" columns. The actually result of the election is given in the "`Result`" column.
Here is the data for Iowa for the Republicans:
```
gop.ix[["Iowa"]]
```
## Checking the intervals
Now I'll add a set of columns that tells us if the prediction was good or not, that is, if the actual result was within the 80% confidence interval or not:
```
import numpy as np
def add_good_predicitons(df, name="Prediction Good"):
cond = (df['80% Lower Bound'] <= df['Result']) & (df["Result"] <= df["80% Upper Bound"])
df["Prediction Good"] = np.where(cond, True, False)
df["Prediction Low"] = np.where(df["80% Upper Bound"] < df["Result"], True, False)
df["Prediction High"] = np.where(df["Result"] < df["80% Lower Bound"], True, False)
add_good_predicitons(dem)
add_good_predicitons(gop)
dem.ix[["Iowa"]]
# Compute how the results match the predictions
def get_low_right_high(df):
r = df["Prediction Good"]
right = float(r.sum()) / r.count()
h = df["Prediction High"]
high = float(h.sum()) / h.count()
l = df["Prediction Low"]
low = float(l.sum()) / l.count()
return low, right, high
dem_low, dem_right, dem_high = get_low_right_high(dem)
gop_low, gop_right, gop_high = get_low_right_high(gop)
print "Party Under Right Over Total"
print "------------|-------------------------------"
print "Democrats | {:.2%}, {:.2%}, {:.2%}, {:.2%}".format(dem_low, dem_right, dem_high, sum((dem_low, dem_right, dem_high)))
print "Republicans | {:.2%}, {:.2%}, {:.2%}, {:.2%}".format(gop_low, gop_right, gop_high, sum((gop_low, gop_right, gop_high)))
```
## Plotting the Results
We can make a plot of the actual voting results by scaling the vote share so that the low edge of the confidence interval is +1, the high edge is -1, and the midpoint is 0. Then if a candidate's vote share is within the predicted range their result will be between -1 and 1. If the prediction was 45% to 55% and the candidate actually got 60%, that would show up at -2 on the plot. The minus sign indicates that the prediction was too low.
```
def add_scaled_result(df):
interval = (df["80% Upper Bound"] - df["80% Lower Bound"]) / 2.
means = df["80% Lower Bound"] + interval
# The - out front makes it so that if the prediction is 1 sigma high, we get a +1
df["Scaled Result"] = -(df["Result"] - means) / interval
add_scaled_result(dem)
add_scaled_result(gop)
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
width = 10
height = 6
plt.figure(figsize=(width, height))
bins = [float(i)/10. for i in range(-44, 46, 2)]
ylim = [0, 17]
xlim = [min(bins), -min(bins)]
ax1 = plt.subplot2grid((1,2),(0,0))
ax2 = plt.subplot2grid((1,2),(0,1))
dem["Scaled Result"].plot(kind="hist", color='b', ax=ax1, ylim=ylim, xlim=xlim, bins=bins)
gop["Scaled Result"].plot(kind="hist", color='r', ax=ax2, ylim=ylim, xlim=xlim, bins=bins)
# 80% Confidence intervals
color="black"
linestyle="dotted"
ax1.axvline(-1, color=color, linestyle=linestyle)
ax1.axvline(+1, color=color, linestyle=linestyle)
ax2.axvline(-1, color=color, linestyle=linestyle)
ax2.axvline(+1, color=color, linestyle=linestyle)
ax1.set_title("Democrats")
ax2.set_title("Republicans")
ax2.yaxis.set_visible(False)
plt.subplots_adjust(wspace=0)
plt.savefig("/tmp/538_scaled_results.png", bbox_inches='tight')
plt.savefig("/tmp/538_scaled_results.svg", bbox_inches='tight')
plt.show()
```
## A (Rough) Estimate of Uncertainties
When I read FiveThirtyEight's plots, I only ever pick a whole number (I certainly am not accurate enough to get better precision than that). I estimate that if I say a number is "34%", then it is just as likely to be 33 or 35. To estimate what effect this has, I randomly adjust the prediction bounds (up 1, down 1, or leaving it alone with equal probability) and see how the predictions fare. The number I report below is the mean of these trials, and the uncertainties represent two standard deviations.
```
import random
import numpy as np
def ugly_simulation_hack(df, sigma=1, iterations=10000, std_to_return=2):
sim_good = []
sim_low = []
sim_high = []
# Run many simulations
for _ in xrange(iterations):
pred_good = []
pred_low = []
pred_high = []
# Check every prediction and perturb them
for ((state, candidate), low, high, result, _, _, _, _) in df.itertuples():
# Perturb the bounds I read from 538's plots assuming a Gaussian
# distribution around the value
#new_low = random.gauss(low, sigma)
#new_high = random.gauss(high, sigma)
new_low = random.randint(low-sigma, low+sigma)
new_high = random.randint(high-sigma, high+sigma)
# Check if the perturbed prediction is good or not
pred_good.append(new_low <= result <= new_high)
pred_low.append(new_high < result)
pred_high.append(result < new_low)
# Calculate the number correct accounting for the perturbations
sim_good.append(sum(pred_good)/float(len(pred_good)))
sim_low.append(sum(pred_low)/float(len(pred_low)))
sim_high.append(sum(pred_high)/float(len(pred_high)))
# Calculate outcome of the simulation
good_mean = np.mean(sim_good)
good_std = np.std(sim_good)
low_mean = np.mean(sim_low)
low_std = np.std(sim_low)
high_mean = np.mean(sim_high)
high_std = np.std(sim_high)
return (
good_mean,
good_std * std_to_return,
low_mean,
low_std * std_to_return,
high_mean,
high_std * std_to_return,
)
gop_good, gop_good_std, gop_low, gop_low_std, gop_high, gop_high_std = ugly_simulation_hack(gop)
dem_good, dem_good_std, dem_low, dem_low_std, dem_high, dem_high_std = ugly_simulation_hack(dem)
print "Party Under Right Over"
print "------------|---------------------------------------------"
print "Democrats | {:.1%} +- {:.1%} {:.1%} +- {:.1%} {:.1%} +- {:.1%}".format(dem_low, dem_low_std, dem_good, dem_good_std, dem_high, dem_high_std)
print "Republicans | {:.1%} +- {:.1%} {:.1%} +- {:.1%} {:.1%} +- {:.1%}".format(gop_low, gop_low_std, gop_good, gop_good_std, gop_high, gop_high_std)
```
| github_jupyter |
This example shows how to use a `SpectralMixtureKernel` module on an `ExactGP` model. This module is designed for
- When you want to use exact inference (e.g. for regression)
- When you want to use a more sophisticated kernel than RBF
Function to be modeled is $sin(2\pi x)$
The Spectral Mixture (SM) kernel was invented and discussed in this paper:
https://arxiv.org/pdf/1302.4245.pdf
```
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
from torch.autograd import Variable
# Training data points are located every 0.075 along 0 to 0.75 inclusive
train_x = Variable(torch.linspace(0, 0.75, 11))
# True function is sin(2*pi*x)
# Gaussian noise N(0,0.04) added
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2)
from torch import optim
from gpytorch.kernels import RBFKernel, SpectralMixtureKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.random_variables import GaussianRandomVariable
# Here we see an example of using the spectral mixture kernel as described here:
# https://arxiv.org/pdf/1302.4245.pdf
class SpectralMixtureGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(SpectralMixtureGPModel, self).__init__(train_x, train_y, likelihood)
# We can learn a mean between -1 and 1
self.mean_module = ConstantMean(constant_bounds=(-1, 1))
# We use a spectral mixture kernel where the frequency is a mixture of 3 Gaussians
self.covar_module = SpectralMixtureKernel(n_mixtures=3)
def forward(self,x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return GaussianRandomVariable(mean_x, covar_x)
# Initialize the likelihood. We use a Gaussian for regression to get predictive mean
# and variance and learn noise parameter
likelihood = GaussianLikelihood(log_noise_bounds=(-5, 5))
# Use the likelihood to initialize the model
model = SpectralMixtureGPModel(train_x.data, train_y.data, likelihood)
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
training_iter = 50
for i in range(training_iter):
# Zero previously backpropped gradients
optimizer.zero_grad()
# Make prediction
output = model(train_x)
# Calc loss and backprop
loss = -mll(output, train_y)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iter, loss.data[0]))
optimizer.step()
# Set into eval mode
model.eval()
likelihood.eval()
# Initialize figure
f, observed_ax = plt.subplots(1, 1, figsize=(4, 3))
# Test points every 0.1 between 0 and 5
# (note this is over 6 times the length of the region with training points)
test_x = Variable(torch.linspace(0, 5, 51))
# Make predictions
observed_pred = likelihood(model(test_x))
# Define plotting function
def ax_plot(ax, rand_var, title):
# Get lower and upper confidence bounds
lower, upper = rand_var.confidence_region()
# Training data as black stars
ax.plot(train_x.data.numpy(), train_y.data.numpy(), 'k*')
# Plot predictive mean as blue line
ax.plot(test_x.data.numpy(), rand_var.mean().data.numpy(), 'b')
# Shade confidence region
ax.fill_between(test_x.data.numpy(), lower.data.numpy(), upper.data.numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
# Labels + title
ax.legend(['Observed Data', 'Mean', 'Confidence'])
ax.set_title(title)
# Plot figure
ax_plot(observed_ax, observed_pred, 'Observed Values (Likelihood)')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, precision_score, recall_score, f1_score
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
path = "/content/drive/Shareddrives/CIS 520/final project/hotel_bookings_processed.csv"
data = pd.read_csv(path)
data
## Initialize X, y and split dataset
df = data.copy()
y = df['is_canceled']
X = df.drop(['is_canceled'], axis=1)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.20, random_state=42)
## Rebalance
X_train_balanced, y_train_balanced = SMOTE().fit_sample(X_train, y_train)
```
# Ensemble by Soft Voting
Selected the most accurate sklearn models (see notebooks "Basic Models Experiment") to combine as a new multi-classifier voting estimator.
```
from sklearn.ensemble import VotingClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
def softvote(X_train, X_test, y_train, y_test):
clf1 = DecisionTreeClassifier(max_depth=12)
# clf1 = XGBClassifier()
clf2 = MLPClassifier(alpha=0.001, hidden_layer_sizes=(100,100),
solver='adam', activation='logistic')
clf3 = RandomForestClassifier(max_features='sqrt',
min_samples_split=2,
n_estimators=500)
eclf = VotingClassifier(estimators=[('DT', clf1), ('NN', clf2), ('RF', clf3)],
voting='soft', weights=[1, 2, 2])
clf1 = clf1.fit(X_train, y_train)
clf2 = clf2.fit(X_train, y_train)
clf3 = clf3.fit(X_train, y_train)
eclf = eclf.fit(X_train, y_train)
y_pred = eclf.predict(X_test)
accu = round(accuracy_score(y_test, y_pred), 4)
return accu
accu = softvote(X_train, X_test, y_train, y_test)
print(accu)
```
# DeepFM (Feature Subset)
```
!pip install torchfm
import torch
import torch.nn as nn
import torch.optim as optim
from torchfm.layer import FactorizationMachine, FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
class DeepFM(nn.Module):
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, num_classes):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
self.last_linear = nn.Linear(mlp_dims[-1], num_classes)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = torch.relu(self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim)))
x = self.last_linear(x)
return x
field_dims = [1, 4, 2, 3, 1, 1, 2, 2, 1, 1, 1, 1, 5, 126, 6, 4, 10, 11, 3, 4, 2, 1, 1, 1]
assert sum(np.array(field_dims)) == X_train.shape[1]
model = DeepFM(field_dims, 4, [32, 32, 32], 0.3, 4)
model = model.to("cuda:0")
epochs = 100
lr = 5e-3
optimizer = optim.Adam(model.parameters(), lr=lr)
weight = compute_class_weight(class_weight="balanced", classes=np.array([0, 1, 2, 3]), y=np.squeeze(y_train))
loss = nn.CrossEntropyLoss(weight=torch.tensor(weight).to("cuda:0").float())
batch_size =1024
valid_interval = 1
train_loss = []
f1 = []
max_f1 = -1
max_pred = None
for epoch in range(epochs):
X_train, y_train = shuffle(X_train, y_train)
model.train()
tmp_loss = []
for index in range(0, X_train.shape[0], batch_size):
X = X_train[index:index + batch_size]
y = y_train[index:index + batch_size]
X = torch.tensor(X).to("cuda:0")
y = torch.squeeze(torch.tensor(y).to("cuda:0"))
pred = model(X)
loss_v = loss(pred, y)
loss_v.backward()
optimizer.step()
model.zero_grad()
tmp_loss.append(loss_v.item())
print("epoch %d: train loss %.2f" % (epoch + 1, np.mean(tmp_loss)))
train_loss.append(np.mean(tmp_loss))
tmp_loss = []
if (epoch + 1) % valid_interval == 0:
preds = []
model.eval()
with torch.no_grad():
for index in range(0, X_test.shape[0], batch_size):
X = X_test[index:index + batch_size]
y = y_test[index:index + batch_size]
X = torch.tensor(X).to("cuda:0")
y = torch.squeeze(torch.tensor(y).to("cuda:0"))
pred = model(X)
loss_v = loss(pred, y)
tmp_loss.append(loss_v.item())
pred = torch.argmax(pred, 1)
preds.extend(pred.cpu().numpy().tolist())
tmp_f1 = f1_score(y_test, preds,average='weighted')
f1.append(tmp_f1)
if max_f1 < tmp_f1:
max_f1 = tmp_f1
max_pred = preds
print("epoch %d: f1 %.2f" % (epoch + 1, tmp_f1))
matrix = confusion_matrix(y_test, max_pred, labels=[0, 1, 2, 3])
df_cm = pd.DataFrame(matrix, columns=np.unique(y_test), index = np.unique(y_test))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (10,7))
sns.set(font_scale=1.4)
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 16})
plt.show()
sns.lineplot(x=list(range(1, epochs + 1)), y=train_loss)
plt.xlabel("epochs")
plt.ylabel("training loss")
plt.show()
sns.lineplot(x=list(range(1, epochs + 1)), y=f1)
plt.xlabel("epochs")
plt.ylabel("f1 on validation")
plt.show()
print ("precision score", precision_score(y_test, max_pred,average='weighted'))
print ("recall score", recall_score(y_test, max_pred,average='weighted'))
print ("f1 score", f1_score(y_test, max_pred,average='weighted'))
```
# DeepFM (Formal)
```
!pip install deepctr[cpu]
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from deepctr.models import DeepFM
from deepctr.feature_column import SparseFeat, DenseFeat,get_feature_names
```
To utilize deepfm on the dataset, we re-process the raw data via the tools provided by deepctr package here
```
path = "/content/drive/Shareddrives/CIS 520/final project/hotel_bookings.csv"
data = pd.read_csv(path)
data['country'].fillna('others', inplace=True)
data = data.drop(['hotel', 'reservation_status'], axis=1)
for col in ['agent', 'company', 'children']: data[col].fillna(0, inplace=True)
## Numericalized by timestamp: `reservation_status_date`
col = 'reservation_status_date'
data['day_of_week'] = pd.DatetimeIndex(data[col]).dayofweek
data['day_of_year'] = pd.DatetimeIndex(data[col]).dayofyear
data.drop([col], axis=1, inplace=True)
data
## Check NaN
print(pd.DataFrame({'#NaN': data.isnull().sum(),
'%NaN': round(data.isnull().mean() * 100, 2)}))
sparse_features = data.loc[:, data.dtypes==np.object].columns.tolist()
dense_features_1 = data.loc[:, data.dtypes==np.integer].columns.tolist()
dense_features_2 = data.loc[:, data.dtypes==np.float64].columns.tolist()
dense_features = dense_features_1 + dense_features_2
assert data.shape[1] == len(sparse_features) + len(dense_features)
for feat in sparse_features:
lbe = LabelEncoder()
data[feat] = lbe.fit_transform(data[feat])
mms = MinMaxScaler(feature_range=(0,1))
data[dense_features] = mms.fit_transform(data[dense_features])
sparse_feature_columns = [SparseFeat(feat,
vocabulary_size=1e6,
embedding_dim=4,
use_hash=True)
for i,feat in enumerate(sparse_features)]
#The dimension can be set according to data
dense_feature_columns = [DenseFeat(feat, 1) for feat in dense_features]
# varlen_feature_columns = [VarLenSparseFeat('genres',
# maxlen=max_len,
# vocabulary_size=len(key2index)+1,
# embedding_dim=4,
# combiner='mean',
# weight_name=None)]
```
Generate feature columns
```
dnn_feature_columns = sparse_feature_columns + dense_feature_columns
linear_feature_columns = sparse_feature_columns + dense_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
```
Training set and test set
```
## Initialize X, y and split dataset
y = data['is_canceled']
X = data.drop(['is_canceled'], axis=1)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.20, random_state=42)
## Rebalance
X_train_balanced, y_train_balanced = SMOTE().fit_sample(X_train, y_train)
```
Implement DeepFM
```
linear_feature_columns
name = 'is_canceled' # y value
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name: train[name].values for name in feature_names}
test_model_input = {name: test[name].values for name in feature_names}
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')
model.compile("adam", "binary_crossentropy", metrics=['binary_crossentropy'], )
history = model.fit(train_model_input, train[target].values,
batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
y_pred = model.predict(test_model_input, batch_size=256)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.