code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import sys
sys.path.append('..')
from lib import *
plt.style.use('../custom.mplstyle')
# -
# overview of numbers
for cohort in [1, 2]:
meta = load_metadata_emerson(cohort=cohort, filtered=False)
meta_f = load_metadata_emerson(cohort=cohort, filtered=True)
noage = np.sum(meta['Age'].isna())
print(cohort, meta.shape[0], 'filtered', meta_f.shape[0], 'noage', noage, 'format difference', meta.shape[0]-meta_f.shape[0]-noage)
meta = load_metadata_lindau()
print('lindau', meta.shape[0])
meta = load_metadata_adaptive_all(filtered=True)
print('all', meta.shape[0],
'Age: %g-%g'%(round(meta['Age'].min()), meta['Age'].max()))
meta = load_metadata_britanova()
mask = meta['age']>0
print('britanova', 'adult', meta[mask].shape[0], 'cordblood', meta.shape[0]-meta[mask].shape[0],
'Age: %g-%g'%(meta[mask]['age'].min(), meta['age'].max()))
for cohort in ['all', 'both', 1, 2]:
if cohort == 'all':
meta = load_metadata_adaptive_all(filtered=True)
else:
meta = load_metadata_emerson(cohort=cohort)
meta['Age'].hist(bins=np.arange(0, 75, 1), histtype='stepfilled', alpha=.5)
meta = load_metadata_emerson(cohort='both')
meta['CMVpos'].value_counts()
meta = load_metadata_emerson(cohort='both')
metaunfiltered = load_metadata_emerson(cohort='both', filtered=False)
meta_lindau = load_metadata_lindau()
metaall = pd.concat([meta, meta_lindau])
metaunfiltered['Age'].hist(bins=np.arange(0, 75, 1), histtype='stepfilled', alpha=1)
meta['Age'].hist(bins=np.arange(0, 75, 1), histtype='stepfilled', alpha=1)
# +
fig, axes = plt.subplots(figsize=(7, 2.7), ncols=2)
meta = load_metadata_britanova()
mask = meta['age']>0
meta[mask]['age'].hist(bins=np.arange(0, 104, 1), histtype='stepfilled', ax=axes[0])
axes[0].set_xlim(0, 105)
axes[0].set_title('Britanova cohort (N=%g)'%len(meta[mask]))
metaall['Age'].hist(bins=np.arange(0, 75, 1), histtype='stepfilled', ax=axes[1])
axes[1].set_xlim(0, 75)
axes[1].set_title('Emerson cohort (N=%g)'%len(metaall))
for ax in axes:
ax.set_xlabel('Age in years (binned)')
ax.set_ylabel('Number of individuals')
ax.grid()
label_axes(axes)
fig.tight_layout()
fig.savefig(figure_directory+'cohortages.svg')
# -
| code/cohort_age_distribution/CohortMetadata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="zdy2V-j7zx_U"
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Flatten, BatchNormalization, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
import itertools
import os
import shutil
import random
import glob
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# %matplotlib inline
# + id="8XItXAjT0x2S"
# Organize data into train, valid, test dirs
data = '/content/drive/MyDrive/Colab Notebooks/data_sound/img_all/'
os.chdir(data)
if os.path.isdir('train/alegria') is False:
os.makedirs('train/alegria')
os.makedirs('train/raiva')
os.makedirs('train/tristeza')
os.makedirs('valid/alegria')
os.makedirs('valid/raiva')
os.makedirs('valid/tristeza')
os.makedirs('test/alegria')
os.makedirs('test/raiva')
os.makedirs('test/tristeza')
for i in random.sample(glob.glob('alegria*'), 10):
shutil.move(i, 'train/alegria')
for i in random.sample(glob.glob('raiva*'), 10):
shutil.move(i, 'train/raiva')
for i in random.sample(glob.glob('tristeza*'), 10):
shutil.move(i, 'train/tristeza')
for i in random.sample(glob.glob('alegria*'), 4):
shutil.move(i, 'valid/alegria')
for i in random.sample(glob.glob('raiva*'), 4):
shutil.move(i, 'valid/raiva')
for i in random.sample(glob.glob('tristeza*'), 4):
shutil.move(i, 'valid/tristeza')
for i in random.sample(glob.glob('alegria*'), 3):
shutil.move(i, 'test/alegria')
for i in random.sample(glob.glob('raiva*'), 3):
shutil.move(i, 'test/raiva')
for i in random.sample(glob.glob('tristeza*'), 4):
shutil.move(i, 'test/tristeza')
os.chdir('../../')
# + id="Q08QNIyj0x4k"
train_path = data + 'train'
valid_path = data + 'valid'
test_path = data + 'test'
# + id="D_9p0bPX0x63" colab={"base_uri": "https://localhost:8080/"} outputId="3d68eb66-0fce-48d5-8330-60f3b998631b"
train_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=train_path, target_size=(288,432), classes=['alegria', 'raiva', 'tristeza'], batch_size=5)
valid_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=valid_path, target_size=(288,432), classes=['alegria', 'raiva', 'tristeza'], batch_size=5)
test_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=test_path, target_size=(288,432), classes=['alegria', 'raiva', 'tristeza'], batch_size=5, shuffle=False)
# + id="39WunG330x9T"
imgs, labels = next(train_batches)
# + id="42Dqb-pA0x_p"
def plotImages(images_arr):
fig, axes = plt.subplots(1, 10, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
# + id="Aorg822u0yCQ" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a4c89f1d-ddd5-458e-eba6-d95fe06800ec"
plotImages(imgs)
print(labels)
# + id="Z3q5SRw20yE0"
model = Sequential([
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding = 'same', input_shape=(288,432,3)),
MaxPool2D(pool_size=(2, 2), strides=2),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding = 'same'),
MaxPool2D(pool_size=(2, 2), strides=2),
Flatten(),
Dense(units=3, activation='softmax')
])
# + id="hoIe_HFdNiWY"
Model: "sequential"
# + id="salqcbZLNiZQ"
model.compile(optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="igcbXur1NibW" outputId="e6100691-f0ed-4cc4-8646-9b2b3da5d66d"
model.fit(x=train_batches,
steps_per_epoch=len(train_batches),
validation_data=valid_batches,
validation_steps=len(valid_batches),
epochs=10,
verbose=3
)
# + id="-4tTt1KHNieE"
predictions = model.predict(x=test_batches, steps=len(test_batches), verbose=0)
# + colab={"base_uri": "https://localhost:8080/"} id="Fu8NMZuSNigq" outputId="20fb2447-bff8-4096-81c6-4b7df30f87c2"
np.round(predictions)
# + id="zHy1y2FMNijD"
cm = confusion_matrix(y_true=test_batches.classes, y_pred=np.argmax(predictions, axis=-1))
# + id="vz935WzlUdrU"
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# + colab={"base_uri": "https://localhost:8080/"} id="vEG-KNThUdvl" outputId="d65d9715-cca6-4698-ffe3-ab8d20bbb3d9"
test_batches.class_indices
# + colab={"base_uri": "https://localhost:8080/", "height": 378} id="QOeqbEFYUkKB" outputId="6f112890-50c5-4942-d4d8-04992a4ea0c2"
cm_plot_labels = ['alegria', 'raiva', 'tristeza']
plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix')
| prova02_2020_12_16_final_CNN_and_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Additional NAICS Data Processing
# **By: <NAME> and <NAME>**
# This file is for adding additionally features to the generated NAICS data from the `NAICS Code Data Generation` notebook. Feel free to add on here if there are other features you'd like to analyze in junction with previously generated NAICS Salesforce dataset.
import numpy as np
import pandas as pd
import re
import json
import time
import requests
# ---
# ## Add Lat/Long Information to Data
# This script is to generate lat/long information to the collected Salesforce information via `ArcGIS`'s `batch_geocode` functionality.
# ### Credentials Helpers
# +
def get_arcgis_access_token(client_id: str, client_secret: str) -> str:
"""
Gets a new ArcGIS access token.
"""
token = requests.get("https://www.arcgis.com/sharing/oauth2/token?client_id={}&grant_type=client_credentials&client_secret={}&f=pjson".format(client_id, client_secret))
return token.json()['access_token']
def get_credentials(filename):
"""
Getting credentials from given file.
"""
with open(filename, 'r') as f:
credentials = json.load(f)
return credentials
def refresh_access_token(filename) -> None:
"""
Writes the new passed in access token into the credentials file.
"""
# Getting a new access token and writing it into `credentials.txt`
credentials = get_credentials(filename)
if filename == '../credentials/sf_credentials.txt':
new_token = get_sf_access_token(credentials['client_id'], credentials['client_secret'], credentials['refresh_token'])
elif filename == '../credentials/arcgis_credentials.txt':
new_token = get_arcgis_access_token(credentials['client_id'], credentials['client_secret'])
credentials['access_token'] = new_token
# Writing new credentials back into `credentials.txt`
with open(filename, 'r+') as f:
json.dump(credentials, f, ensure_ascii=False)
return credentials
# -
# ### Data Extraction
# Read in generated data from previous notebook
all_data = pd.read_csv('../data/all_data.csv')
all_data.head().columns
all_data.isnull().sum()
def format_data(all_data: pd.DataFrame) -> list:
"""
Formatting the data from the `all_data` dataframe into the correct format for the request payload.
End payload to look like:
[{'attributes': {'OBJECTID': 1,
'Address': '25000 Avenue Stanford Suite 117',
'City': 'Valencia',
'Region': 'CA'}},
{'attributes': {'OBJECTID': 2,
'Address': '1 World Way',
'City': 'Los Angeles',
'Region': 'CA'}}]
"""
# Making temp dataframe
reduced_and_relabeled = (
all_data[[
'Account__r.BillingStreet',
'Account__r.BillingCity',
'Account__r.BillingState'
]]
.rename(columns={
'Account__r.BillingStreet': 'Address',
'Account__r.BillingCity': 'City',
'Account__r.BillingState': 'Region'
})
.drop_duplicates()
# Getting an index column to link different objects before and after request is made
.reset_index(drop=True)
.reset_index()
.rename(columns={
'index': 'OBJECTID'
})
)
# Finalizing dataframe attributes
spatial_data = [
{'attributes': val}
for val in list(
reduced_and_relabeled
# Only including certain characters in addresses (removing most special characters)
.assign(Address=reduced_and_relabeled.Address.apply(lambda address: re.sub(r"[^a-zA-Z0-9. ]", "", address)))
.T
.to_dict()
.values()
)]
return reduced_and_relabeled, spatial_data
def chunks(l, n):
"""
Splits list into n-sized chunks and each chunk into the following format:
{'records': [{'attributes': {'Address': '25000 Avenue Stanford Suite 117',
'City': 'Valencia',
'Region': 'CA'}},
{'attributes': {'Address': '1 World Way',
'City': 'Los Angeles',
'Region': 'CA'}}]}
"""
n = max(1, n)
return [{'records': l[i:i+n]} for i in range(0, len(l), n)]
# +
# Getting geospatial data
reduced_df, spatial_data = format_data(all_data)
# Geocoding addresses in batches of 100 (limit by URI length)
spatial_data_chunks = chunks(spatial_data, 100)
# -
def generate_spatial_data(df_chunks, to_json=False):
"""
Adds spatial data information to the `all_data` dataframe generated from earlier.
"""
def check_error(data):
"""
Check if the current data payload has an error.
"""
# Check if the `access_token` worked. If not, refresh this call with a new access token.
if 'error' in data:
# Refresh the access token
print("HTTP Request Access Token {} Error:".format(data['error']['code'], data['error']['message']))
print("Trying to resolve issue by refreshing access token...")
# Refreshing access token in credentials
refresh_access_token('../credentials/arcgis_credentials.txt')
return True
return False
# Getting credentials
credentials = get_credentials('../credentials/arcgis_credentials.txt')
# Making requests and appending locations to overall location list
all_locations = []
for chunk_idx in range(len(df_chunks)):
start = time.time()
print("Currently on chunk:", chunk_idx)
res = requests.get("https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/geocodeAddresses?addresses={}&token={}&f=pjson".format(spatial_data_chunks[chunk_idx], credentials['access_token']))
data = res.json()
# Check if the data has an error
if check_error(data):
# Trying to make request again with new access token if there is an error
return generate_spatial_data(df_chunks, to_json)
all_locations.extend(data['locations'])
finish = time.time()
print("Request finished. Chunk took", finish - start, 'seconds.')
# Exporting result object into JSON if desired.
if to_json:
full_json = json.dumps(all_locations)
json_file = open('../data/arcgis_latlong_data.json', 'w')
json_file.write(full_json)
json_file.close()
return all_locations
# Getting location data
locations = generate_spatial_data(spatial_data_chunks, to_json=True)
def add_loc_data(reduced_df, all_data, to_csv=False):
"""
Adds location data generated by `generate_spatial_data` to the `all_data` dataframe in a new `final_df`.
"""
# Load in the data
file = open('../data/arcgis_latlong_data.json')
data = json.load(file)
# Creating a mapping between 'OBJECTID' and 'Latitude' and 'Longitude' coordinates.
temp_mapping = {}
for obj in data:
object_id, status = obj['attributes']['ResultID'], obj['attributes']['Status']
temp_mapping[object_id] = obj['location'] if status != 'U' else {'x': np.nan, 'y': np.nan}
# Sorting the keys by ID so inserting into the dataframe will be easy.
coords_mapping = dict(sorted(temp_mapping.items()))
# Creating location mapping dataframe
loc_mapping = reduced_df.copy(deep=True)
loc_mapping['Latitude'] = [coord['y'] for coord in list(coords_mapping.values())]
loc_mapping['Longitude'] = [coord['x'] for coord in list(coords_mapping.values())]
loc_mapping['Full Address'] = loc_mapping['Address'] + ',' + loc_mapping['City'] + ',' + loc_mapping['Region']
loc_mapping = loc_mapping[['Full Address', 'Latitude', 'Longitude']]
# Creating `full_data` dataframe with `Full Address` w/o NaN values
full_data = all_data.copy(deep=True)
full_data['Full Address'] = full_data['Account__r.BillingStreet'] + ',' + full_data['Account__r.BillingCity'] + ',' + full_data['Account__r.BillingState']
full_data = full_data[full_data['Full Address'].notnull()]
# Merging data and removing NaN's in lat/long
full_spatial_data = full_data.merge(loc_mapping, how='inner', left_on='Full Address', right_on='Full Address')
final_df = full_spatial_data[full_spatial_data.Latitude.notnull() & full_spatial_data.Longitude.notnull()]
# Adding country column (just U.S. for now)
final_df['Country'] = 'United States'
# Exporting dataframe to CSV if desired
if to_csv:
final_df.to_csv('../data/data_with_latlong.csv', index=False)
return final_df
# Generating final dataframe with location data (lat, long) attached to each awarded opportunity.
final = add_loc_data(reduced_df, all_data, to_csv=True)
final
# ---
# ## **Adding a category column instead of separate `DBE`, `MBE`, and `WBE` columns (to overlay plots at the same time on Carto).**
# Read in data
arcgis_df = pd.read_csv('../data/data_with_latlong.csv')
# Join columns together
arcgis_df['Category'] = arcgis_df[['DBE__c', 'MBE__c', 'WBE__c']].apply(
lambda x: ','.join(x.dropna().astype(str)),
axis=1
)
def find_category_name(booleans):
"""
Find the category name for the list of booleans.
"""
vals = list(map(lambda val: val == 'True', booleans.split(',')))
if all(vals):
return 'DBE, MBE, and WBE'
elif vals[0] and vals[1]:
return 'DBE and MBE'
elif vals[1] and vals[2]:
return 'MBE and WBE'
elif vals[0] and vals[2]:
return 'DBE and WBE'
elif vals[0]:
return 'DBE'
elif vals[1]:
return 'MBE'
elif vals[2]:
return 'WBE'
elif (not vals[0] and not vals[1] and not vals[2]):
return 'Not DBE, MBE, WBE'
# Find category name for each row
arcgis_df['Category'] = arcgis_df['Category'].apply(find_category_name)
# Export dataframe
arcgis_df.to_csv('../data/data_with_latlong_and_cat.csv', index=False)
| notebooks/Additional NAICS Data Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Motivation
#
# In [Novikov eta al. 2016](http://papers.nips.cc/paper/5787-tensorizing-neural-networks.pdf) they use the tensor-train representation to construct a weight matrix. However, the tensor-train constructs a high dimensional teensor and they simply reshape it into a matrix. I thought this was interesting/weird and want to investigate.
#
# Specifically, I am interested in how parameters are shared across the constructed weight matrix. Weight tying is an important part of designing neural networks, and I am interested in the relationship between parameter tying schemes and tensor (networks) and reshaping.
#
# The motivating example would be that a convolution can be written as a parameter sharing scheme in matrix form. Constructed using a circulant, ...?!
#
# ***
#
# Secondly, when looking at the [algol for HOSVD](https://lirias.kuleuven.be/bitstream/123456789/72517/1/94-31.pdf) there is an unfolding (aka reshape) operation that is used to matricse the tensors so the left singular vectors of each dimension can be calculated.
# ### The tensor train format (aka MPS)
#
#
# +
import functools
import sympy as sym
sym.init_printing(use_latex='mathjax')
import numpy as np
import matplotlib.pyplot as plt
# NEED a way to visualise!
# dont want to just focus on TT format.
# what other interesting ones are there?
# Have a look at MPS and PEPS?
# something more exotic?
# +
idx = 'abcdefghijkl' # names for indexes
def construct_core(idx, n):
# construct a 3-tensor
return sym.tensor.Array([[[sym.Symbol('{}_{}{}{}'.format(idx,i,j,k))
for i in range(n)]
for j in range(n)]
for k in range(n)])
def construct_cores(N, n):
return [construct_core(idx[i], n) for i in range(N)]
def construct_tensor(cores):
t = cores[0]
for i in range(len(cores)-1):
t = sym.tensorproduct(t, cores[i+1])
t = sym.tensorcontraction(t, (3,4)) # not sure if this is right...
return t
# +
n_cores = 2
cores = construct_cores(n_cores, 2)
t = construct_tensor(cores)
print(t.shape)
t = sym.simplify(t)
t = sym.reshape(t, [8])
# -
t
# +
# can even cosntruct tensors where we have the same
# or more parameters than elements, but they are shared
# in interesting ways
# +
s = str(t)
print(s.count('a_000'))
print(s.count('a_001'))
# so each parameter is shared over eight spaces?
# want a general formula for this
# also there is an a_{}{}{} parameter in every element.
# kind of locality prior?
# each parameter is shared over some local set (a row or colum).
# -
# ### The various forms of tensor SVD
#
# So, what about all the reshaping funny business going on in HSVD and HOSVD?
#
#
def unfold(tensor, axis): # aka Mode, matricization, fibre
return np.reshape(tensor, (tensor.shape[axis], -1))
# I do not have any good intuition for why/how taking the
# left eigenvectors of a reshaped tensor ...?
# so somehow, under reshaping, the left singular vectors are preserved?
# S and V are unncessary (which seems rather unusual...)
#
# ***
# The way the core singular value tensor is calucluated seems like cheating.
# $$
# \mathcal A = S \times_1 U_1 ... \times_n U_n \\
# S = \mathcal A \times_1 U_1^T ... \times_n U_n^T \\
# $$
# this doesnt seem right, S should be diagonal!?
#
# ***
#
# Hierarchical SVD also uses the same trick.
# Should I bother coding it?
# Seems interesting as now you have multiple core tensors and they need to be reconstructed using the right graph.
# +
class HOSVD():
def decompose(tensor):
U = []
# for each arm of the tensor
for i, s in enumerate(tensor.shape):
u, _, _ = np.linalg.svd(unfold(tensor, i))
U.append(u)
S = tensor
for i, leg in enumerate(U):
S = np.tensordot(leg.T, S, axes=[1, i])
return U, S
def construct(legs, core):
c = core
# or could outerproduct the legs first and then elementwise mul!?
for i, leg in enumerate(legs):
c = np.tensordot(leg, c, axes=[1, i])
return c
def test():
A = np.random.random((5,5,5))
u, s = HOSVD.decompose(A)
B = HOSVD.construct(u ,s)
d = np.sum(np.abs(A - B))
if d > 1e-8:
raise ValueError('A and B are not equal. Difference = {}'.format(d))
HOSVD.test()
# -
# Ok, so that is the motivation out of the way... phew. Now lets take a closer look at reshaping.
#
# Main questions;
#
# ...
# ## Reshaping algols
#
# * Row first, outer vs inner.
# * Can just be done at read time with different striding patterns (see views in numpy?)
#
# Only real requirements are that is must have an inverse? It is consistent? It ...?
# What about more 'random' permutations on the indexes?
#
# What if we thought about it as a function? What does a reshape do?
# Have some $f: x->y$ but we change the f while preserving XXX?!? What is preserved? What are its symmetries?
def reshape(tensor):
pass
# Is reshaing a linear op!?
# Does it commute, associate, distribute, ...
# Firstly, its a unary operation?! So not sure what to do with that...
#
# ### Associativity
#
# $\varrho(u) + (v + w) = (\varrho(u) + v) + w$
#
# ### Commutativity
#
# $\varrho(a) + b = b + \varrho(a)$
#
#
# $a(\mathring u + v) = \mathring{au} + av$
#
#
#
# Reshaping is a permutation of the bases?
#
#
# Reshape.
# Want;
# - some properties that I can measure!?!
# - some visualisations! (what happens when I reshape?)
# - better intuition... need a concrete example to play with
# -
#
# #### Neighborhoods
#
# Picture I already have. Neighbors and where they go to.
#
#
# #### Connectedness (the dual of neighborhoods?)
#
# What about the graph POV?
#
# #### How is reshape like a convolution?
#
# For example, this is what we do when we want to do a convolution. Construct a tensor of patches (examples, X, Y, kernel, kernel) and then reshape it into a (examples x X x Y, kernel x kernel ) matrix.
#
#
#
#
# ## Parameter sharing
#
# What is it, why do we do it, some examples.
# Can represent large(r) spaces with fewer parameters (that is the usual argument for TNs on parameter sharing...)
#
# Sharing over;
#
# * space,
# * time,
# * relations,
# * ?
#
# Nice way to build priors about invariance. (how does this related to the structure of tensor networks!?)
#
# Aka, parameter sharing schemes. If we write the reshaped, constructed tensor, and show the receptive field of original parameters.
# - are the receptive fields local, which tensor-nets/reshapings give local receptive fields?
# - ?
# -
#
# This idea is orthogonal to reshaping, reshaping is just a nice way to visualise it?
#
# $$\begin{aligned}
# &= \begin{bmatrix}
# a_{11} & a_{12} & a_{13} & a_{14} & a_{15} & a_{16} \\
# a_{21} & a_{22} & a_{23} & a_{24} & a_{25} & a_{26} \\
# a_{31} & a_{32} & a_{33} & a_{34} & a_{35} & a_{36} \\
# a_{41} & a_{42} & a_{43} & a_{44} & a_{45} & a_{46} \\
# a_{51} & a_{52} & a_{53} & a_{54} & a_{55} & a_{56} \\
# a_{61} & a_{62} & a_{63} & a_{64} & a_{65} & a_{66} \\
# \end{bmatrix} \\
# &\text{(stack by columns. reshape by first indices fastest)}\\
# &= \begin{bmatrix}
# \begin{bmatrix}
# a_{11} & a_{31} & a_{51}\\
# a_{21} & a_{41} & a_{61}\\
# \end{bmatrix} &
# \begin{bmatrix}
# a_{12} & a_{32} & a_{52}\\
# a_{22} & a_{42} & a_{62}\\
# \end{bmatrix}\\
# \begin{bmatrix}
# a_{13} & a_{33} & a_{53}\\
# a_{23} & a_{43} & a_{63}\\
# \end{bmatrix} &
# \begin{bmatrix}
# a_{14} & a_{34} & a_{54}\\
# a_{24} & a_{44} & a_{64}\\
# \end{bmatrix} \\
# \begin{bmatrix}
# a_{15} & a_{35} & a_{55}\\
# a_{25} & a_{45} & a_{65}\\
# \end{bmatrix} &
# \begin{bmatrix}
# a_{16} & a_{36} & a_{56}\\
# a_{26} & a_{46} & a_{66}\\
# \end{bmatrix} \\
# \end{bmatrix}\end{aligned}$$
#
# Distances are not preserved. Originally $a_{33}$ is one index away from
# $a_{32},a_{34},a_{23},a_{43}$. But after the reshaping, the set of
# elements that d=1 are $a_{13},a_{53},a_{43},a_{31},a_{35},a_{34}$.
# If we map these back into the original matrix, we can see that the
# ‘range’ of the indicies is speading. More are in each elements
# neighbourhood. What does this mean?
#
| tensor-networks/reshape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Use the LLH Client & Service to produce FreeDOM LLH scans
#
# Intended to demonstrate the LLH service functionality
import tensorflow as tf
import numpy as np
import pickle
import json
import math
from matplotlib import pyplot as plt
from freedom.llh_service.llh_service import LLHService
from freedom.llh_service.llh_client import LLHClient
from multiprocessing import Process
with open('../../freedom/resources/test_data/test_events.pkl', 'rb') as f:
event = pickle.load(f)[8]
# ### llh service configuration:
service_conf = {
"req_addr": "tcp://127.0.0.1:12345",
"ctrl_addr": "tcp://127.0.0.1:12346",
"poll_timeout": 1,
"flush_period": 1,
"n_hypo_params": 8,
"n_hit_features": 9,
"n_evt_features": 2,
"batch_size" : {
"n_hypos": 250,
"n_observations": 4000
},
"send_hwm": 10000,
"recv_hwm": 10000,
"use_freeDOM_model": True,
"hitnet_file": "HitNet_logE18_May_2020-21h02/epoch_32_model.hdf5",
"chargenet_file": "ChargeNet_nChannels_22_May_2020-11h05/epoch_400_model.hdf5"
}
# +
# start the service in a separate process
def start_service(params):
with LLHService(**params) as serv:
print('starting work loop...')
serv.start_work_loop()
print('done')
proc = Process(target=start_service, args=(service_conf,))
proc.start()
# -
# ### Construct the client
# The client loads its configuration directly from the service. Client construction will block until a connection to the service is established or "conf_timeout" milliseconds have elapsed.
#
# In this case, it will block until the service has finished initializing.
# %%time
client = LLHClient(
ctrl_addr=service_conf["ctrl_addr"],
conf_timeout=60000
)
# + jupyter={"source_hidden": true}
def colorbar(mappable):
# from https://joseph-long.com/writing/colorbars/
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
last_axes = plt.gca()
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(mappable, cax=cax)
plt.sca(last_axes)
cbar.ax.tick_params(labelsize=12)
return cbar
# utility function to map out a 2d NLL slice
def map_2d(inds, steps, event, base_params=event['params']):
mg = np.meshgrid(*steps)
n_hypotheses = mg[0].size
# param table is a table of hypotheses to evaluate
# each row contains a value for each of the 8 hypothesis parameters
param_table = np.repeat(base_params[np.newaxis, :], n_hypotheses, axis=0)
# load hypotheses to evaluate into the param table
for ind, coord in zip(inds, mg):
param_table[:, ind] = coord.flat
# client.eval_llh is a synchronous (blocking) LLH evaluation
# asynchronous requests are also supported
return client.eval_llh(event['hits'], event['total_charge'], param_table).reshape([step.size for step in steps[::-1]])
def plot_2d(inds, steps, true_params, ax):
llh_map = map_2d(inds, steps, event)
delta = llh_map - llh_map.min()
im = ax.pcolormesh(*steps, delta, cmap='Spectral',
rasterized=True, linewidth=0)
colorbar(im)
mle_y, mle_x = np.unravel_index(np.argmin(llh_map), llh_map.shape)
# place crosshair at true parameters
ax.axvline(true_params[0])
ax.axhline(true_params[1])
# white star at MLE point in the 2d scan
ax.plot(*[[step[i]] for step, i in zip(steps, (mle_x, mle_y))], 'w*')
# +
true_params = event['params']
par_names = ['x', 'y', 'z', 'time',
'azimuth', 'zenith',
'cascade energy', 'track energy']
par_pairs = [(0, 1), (0, 2), (1, 2), (3, 2),
(4, 5), (4, 2), (4, 0), (6, 7)
]
gridsize=100
spacings = [np.linspace(true_params[0]-50, true_params[0]+50, 100),
np.linspace(true_params[1]-50, true_params[1]+50, 100),
np.linspace(true_params[2]-50, true_params[2]+50, 100),
np.linspace(true_params[3]-300, true_params[3]+300, 100),
np.linspace(0, 2*math.pi, 100),
np.linspace(0, math.pi, 100),
np.logspace(-1, 2, 100),
np.logspace(-1, 2, 100)
]
# +
# %%time
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(16,8))
plt.subplots_adjust(wspace=0.5)
plt.subplots_adjust(hspace=0.25)
axiter = axes.flat
for i, ax in enumerate(axiter):
ind0, ind1 = par_pairs[i]
grid_spacing = (spacings[ind0], spacings[ind1])
plot_2d((ind0, ind1), grid_spacing, (true_params[ind0], true_params[ind1]), ax)
ax.set_xlabel(par_names[ind0], fontsize=12)
ax.set_ylabel(par_names[ind1], fontsize=12)
ax.tick_params(labelsize=12)
if par_names[ind0].endswith('energy'):
ax.set_xscale('log')
if par_names[ind1].endswith('energy'):
ax.set_yscale('log')
fig.suptitle('LLH Service', fontsize=16)
fig.savefig('llh_service_maps.pdf', bbox='tight')
# -
# time it
# t = %timeit -o output = map_2d((0,1), (spacings[0], spacings[1]), event)
print(f'time per eval is {t.average/gridsize**2*1e6:.2f} usec')
# kill the service
import zmq
ctrl_sock = zmq.Context.instance().socket(zmq.REQ)
ctrl_sock.connect(service_conf["ctrl_addr"])
ctrl_sock.send_string("die")
proc.join()
| notebooks/atfienberg/serviceLLHScans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Shrey-Viradiya/Pneumonia_Detection/blob/master/Training_Corona_on_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="6OA-fA4SFZ3Z"
# !nvidia-smi
# + [markdown] id="cYoTFfr7Ps4L"
# # Step 1: Login to your GitHub account and clone repository
# + id="5P8zJd-CPbn3"
# Login to GitHub and clone repository.
# %%shell
sudo apt install git-lfs
# cd /content/
# echo -n "Enter your github username: "
read GITUSER
# echo -n "Enter you github password (replace special characters like '@' with '%40', etc): "
read GITPASS
git clone https://$GITUSER:$GITPASS@github.com/Shrey-Viradiya/Pneumonia_Detection.git
# + [markdown] id="GhmrVFd3PySY"
# # Step 2: Add the Kaggle API key
# + id="LleC1KN_jC9k"
kaggle_username = "" ### Your kaggle Username
kaggle_api_key = "" ### Your kaggle API key
# + id="Vvy87F5Ci9Df"
with open("/content/kaggle.json", "w") as f:
f.write("{\"username\": \"%s\", " % kaggle_username)
f.write("\"key\": \"%s\"}" % kaggle_api_key)
# + [markdown] id="hKHlWoLqP3Xa"
# # Step 3: Set up the repository and train model
# + id="DtNK0eyeZLEK"
# %%shell
# cd /content/Pneumonia_Detection
source scripts/colab.sh
# + [markdown] id="2dLc9xP1MxlY"
# # Step 4: Push the new model directly on github!
# + id="hTBHLtFKIG2i"
# %%shell
# cd /content/Pneumonia_Detection
git add model_metadata/test_accuracies_YourBaseModel.npy
git add model_metadata/test_losses_YourBaseModel.npy
git add model_metadata/train_accuracies_YourBaseModel.npy
git add model_metadata/train_losses_YourBaseModel.npy
git add model_objects/ConvModel_YourBaseModel
git add model_objects/ConvModel_YourBaseModel_results.txt
git commit -m "add the new YourBaseModel model results and metadata"
git push origin master
# + id="rF7Wj-7VIQPm"
| notebooks/Training_Corona_on_Colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Variables, Strings, and Numbers
# + [markdown] slideshow={"slide_type": "fragment"}
# In this section, you will learn to store information in variables.
#
# You will learn about two types of data: strings, which are sets of characters, and numerical data types.
# + [markdown] slideshow={"slide_type": "skip"}
# ## <a name="top"></a>Contents
# ---
# - [Variables](#variables)
# - [Example](#example)
# - [Naming rules](#naming_rules)
# - [NameError](#name_error)
# - [Exercises](#exercises_variables)
# - [Strings](#strings)
# - [Single and double quotes](#single_double_quotes)
# - [Changing case](#changing_case)
# - [Combining strings (concatenation)](#concatenation)
# - [Whitespace](#whitespace)
# - [Exercises](#exercises_strings)
# - [Numbers](#numbers)
# - [Integer operations](#integer_operations)
# - [Floating-point numbers](#floats)
# - [Exercises](#exercises_numbers)
# - [Challenges](#challenges_numbers)
# - [Comments](#comments)
# - [What makes a good comment?](#good_comments)
# - [When should you write comments?](#when_comments)
# - [Exercises](#exercises_comments)
# - [Overall Challenges](#challenges_overall)
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='variables'></a>Variables
# ===
# A variable holds a value.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='example'></a>Example
# ---
# + slideshow={"slide_type": "fragment"}
message = "Hello Python world!"
print(message)
# + [markdown] slideshow={"slide_type": "subslide"}
# A variable holds a value. You can change the value of a variable at any point.
# + slideshow={"slide_type": "fragment"}
message = "Hello Python world!"
print(message)
message = "Python is my favorite language!"
print(message)
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='naming_rules'></a>Naming rules
# ---
# - Variables can only contain letters, numbers, and underscores. Variable names can start with a letter or an underscore, but can not start with a number.
# - Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name".
# - You cannot use [Python keywords](http://docs.python.org/3/reference/lexical_analysis.html#keywords) as variable names.
# - Variable names should be descriptive, without being too long. For example mc_wheels is better than just "wheels", and number_of_wheels_on_a_motorycle.
# - Be careful about using the lowercase letter l and the uppercase letter O in places where they could be confused with the numbers 1 and 0.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='name_error'></a>NameError
# ---
# There is one common error when using variables, that you will almost certainly encounter at some point. Take a look at this code, and see if you can figure out why it causes an error.
# + slideshow={"slide_type": "fragment"}
message = "Thank you for sharing Python with the world, Guido!"
print(mesage)
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's look through this error message. First, we see it is a NameError. Then we see the file that caused the error, and a green arrow shows us what line in that file caused the error. Then we get some more specific feedback, that "name 'mesage' is not defined".
# + [markdown] slideshow={"slide_type": "subslide"}
# You may have already spotted the source of the error. We spelled message two different ways. Python does not care whether we use the variable name "message" or "mesage". Python only cares that the spellings of our variable names match every time we use them.
#
# This is pretty important, because it allows us to have a variable "name" with a single name in it, and then another variable "names" with a bunch of names in it.
# + [markdown] slideshow={"slide_type": "subslide"}
# We can fix **NameErrors** by making sure all of our variable names are spelled consistently.
# + slideshow={"slide_type": "fragment"}
message = "Thank you for sharing Python with the world, Guido!"
print(message)
# + [markdown] slideshow={"slide_type": "fragment"}
# In case you didn't know [Guido](http://en.wikipedia.org/wiki/Guido_van_Rossum) [van Rossum](http://www.python.org/~guido/) created the Python language over 20 years ago, and he is considered Python's [Benevolent Dictator for Life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life). Guido still signs off on all major changes to the core Python language.
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='exercises_variables'></a>Exercises
# ---
# #### Hello World - variable
#
# - Store your own version of the message "Hello World" in a variable, and print it.
#
# #### One Variable, Two Messages:
# - Store a message in a variable, and then print that message.
# - Store a new message in the same variable, and then print that new message.
# + slideshow={"slide_type": "skip"}
# Ex 2.1 : Hello World - Variable
# put your code here
# + slideshow={"slide_type": "skip"}
# Ex 2.2 : One Variable, Two Messages
# put your code here
# + [markdown] slideshow={"slide_type": "skip"}
# [top](#top)
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='strings'></a>Introduction to Strings
# ===
# Strings are sets of characters. Strings are easier to understand by looking at some examples.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='single_double_quotes'></a>Single and double quotes
# ---
# Strings are contained by either single or double quotes.
# + slideshow={"slide_type": "fragment"}
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.'
# + [markdown] slideshow={"slide_type": "subslide"}
# This lets us make strings that contain quotations.
# + slideshow={"slide_type": "fragment"}
quote = "<NAME> once said, \
'Any program is only as good as it is useful.'"
# -
# ### Multiline Strings
#
# In case we need to create a multiline string, there is the **triple-quote** to the rescue:
# `'''`
# +
multiline_string = '''This is a string where I
can confortably write on multiple lines
without worring about to use the escape character "\\" as in
the previsou example.
As you'll see, the original string formatting is preserved.
'''
print(multiline_string)
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='changing_case'></a>Changing case
# ---
# You can easily change the case of a string, to present it the way you want it to look.
# + slideshow={"slide_type": "fragment"}
first_name = 'eric'
print(first_name)
print(first_name.title())
# + [markdown] slideshow={"slide_type": "subslide"}
# It is often good to store data in lower case, and then change the case as you want to for presentation. This catches some TYpos. It also makes sure that 'eric', 'Eric', and 'ERIC' are not considered three different people.
#
# Some of the most common cases are lower, title, and upper.
# + slideshow={"slide_type": "fragment"}
first_name = 'eric'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name_titled = 'Eric'
print(first_name_titled.lower())
# -
# **Note**: Please notice that the original strings remain **always** unchanged
print(first_name)
print(first_name_titled)
# + [markdown] slideshow={"slide_type": "subslide"}
# You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.
#
# variable_name.action()
#
# In this example, the word "action" is the name of a **method**.
# + [markdown] slideshow={"slide_type": "fragment"}
# A method is something that can be done to a variable.
# + [markdown] slideshow={"slide_type": "fragment"}
# The methods `lower`, `title`, and `upper` are all functions that have been written into the Python language, which do something to **strings**.
#
# Later on, you will learn to write your own methods.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='concatenation'></a>Combining strings (concatenation)
# ---
# It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
# + slideshow={"slide_type": "fragment"}
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
print(full_name.title())
# + [markdown] slideshow={"slide_type": "fragment"}
# The plus sign combines two strings into one, which is called **concatenation**.
# + [markdown] slideshow={"slide_type": "subslide"}
# You can use as many plus signs as you want in composing messages. In fact, many web pages are written as giant strings which are put together through a long series of string concatenations.
# + slideshow={"slide_type": "fragment"}
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + \
"was considered the world's first computer programmer."
print(message)
# + [markdown] slideshow={"slide_type": "fragment"}
# If you don't know who Ada Lovelace is, you might want to go read what [Wikipedia](http://en.wikipedia.org/wiki/Ada_Lovelace) or the [Computer History Museum](http://www.computerhistory.org/babbage/adalovelace/) have to say about her. Her life and her work are also the inspiration for the [Ada Initiative](http://adainitiative.org/faq/about-ada-lovelace/), which supports women who are involved in technical fields.
# -
# ### Brief introduction to string formatting
# +
string_template = 'The result of the calculation of {calc} is {res}'
print("String Template: ", string_template)
print(string_template.format(calc='(3*4)+2', res=(3*4)+2))
# -
# ### There's much more than that!
#
# For further information about *String formatting*, see the official online documentation about the [`string`](https://docs.python.org/3/library/string.html) module.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='whitespace'></a>Whitespace
# ---
# The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.
#
# Spaces are easy to create, because you have been using them as long as you have been using computers. Tabs and newlines are represented by special character combinations.
#
# The two-character combination "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string.
# + slideshow={"slide_type": "fragment"}
print("Hello everyone!")
# + slideshow={"slide_type": "fragment"}
print("\tHello everyone!")
# + slideshow={"slide_type": "fragment"}
print("Hello \teveryone!")
# + [markdown] slideshow={"slide_type": "subslide"}
# The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
# + slideshow={"slide_type": "fragment"}
print("Hello everyone!")
# + slideshow={"slide_type": "fragment"}
print("\nHello everyone!")
# + slideshow={"slide_type": "fragment"}
print("Hello \neveryone!")
# + slideshow={"slide_type": "fragment"}
print("\n\n\nHello everyone!")
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Stripping whitespace
#
# Many times you will allow users to enter text into a box, and then you will read that text and use it. It is really easy for people to include extra whitespace at the beginning or end of their text. Whitespace includes spaces, tabs, and newlines.
#
# It is often a good idea to strip this whitespace from strings before you start working with them. For example, you might want to let people log in, and you probably want to treat 'eric ' as 'eric' when you are trying to see if I exist on your system.
# + [markdown] slideshow={"slide_type": "subslide"}
# You can strip whitespace from the left side, the right side, or both sides of a string.
# + slideshow={"slide_type": "fragment"}
name = ' eric '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
# + [markdown] slideshow={"slide_type": "subslide"}
# It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
# + slideshow={"slide_type": "fragment"}
name = ' eric '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='exercises_strings'></a>Exercises
# ---
# #### Someone Said
# - Find a quote that you like. Store the quote in a variable, with an appropriate introduction such as "<NAME> once said, 'One of my most productive days was throwing away 1000 lines of code'". Print the quote.
#
# #### First Name Cases
# - Store your first name, in lowercase, in a variable.
# - Using that one variable, print your name in lowercase, Titlecase, and UPPERCASE.
#
# #### Full Name
# - Store your first name and last name in separate variables, and then combine them to print out your full name.
#
# #### About This Person
# - Choose a person you look up to. Store their first and last names in separate variables.
# - Use concatenation to make a sentence about this person, and store that sentence in a variable.-
# - Print the sentence.
#
# #### Name Strip
# - Store your first name in a variable, but include at least two kinds of whitespace on each side of your name.
# - Print your name as it is stored.
# - Print your name with whitespace stripped from the left side, then from the right side, then from both sides.
# + slideshow={"slide_type": "skip"}
# Ex 2.3 : Someone Said
# put your code here
# + slideshow={"slide_type": "skip"}
# Ex 2.4 : First Name Cases
# put your code here
# + slideshow={"slide_type": "skip"}
# Ex 2.5 : Full Name
# put your code here
# + slideshow={"slide_type": "skip"}
# Ex 2.6 : About This Person
# put your code here
# + slideshow={"slide_type": "skip"}
# Ex 2.7 : Name Strip
# put your code here
# + [markdown] slideshow={"slide_type": "skip"}
# [top](#top)
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='numbers'></a>Numbers
# ===
# Dealing with simple numerical data is fairly straightforward in Python, but there are a few things you should know about.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='integers'></a>Integers
# ---
# You can do all of the basic operations with integers, and everything should behave as you expect. Addition and subtraction use the standard plus and minus symbols. Multiplication uses the asterisk, and division uses a forward slash. Exponents use two asterisks.
# + slideshow={"slide_type": "subslide"}
print(3+2)
# + slideshow={"slide_type": "fragment"}
print(3-2)
# + slideshow={"slide_type": "fragment"}
print(3*2)
# + slideshow={"slide_type": "fragment"}
print(3/2)
# + slideshow={"slide_type": "fragment"}
print(3**2)
# + [markdown] slideshow={"slide_type": "subslide"}
# You can use parenthesis to modify the standard order of operations.
# + slideshow={"slide_type": "fragment"}
standard_order = 2+3*4
print(standard_order)
# + slideshow={"slide_type": "fragment"}
my_order = (2+3)*4
print(my_order)
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='floats'></a>Floating-Point numbers
# ---
# Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to.
# + slideshow={"slide_type": "fragment"}
print(0.1+0.1)
# + [markdown] slideshow={"slide_type": "subslide"}
# However, sometimes you will get an answer with an unexpectly long decimal part:
# + slideshow={"slide_type": "fragment"}
print(0.1+0.2)
# + [markdown] slideshow={"slide_type": "subslide"}
# This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.
#
# Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.
# + [markdown] slideshow={"slide_type": "subslide"}
# You can also get the same kind of result with other operations.
# + slideshow={"slide_type": "fragment"}
print(3*0.1)
# -
# Test
3 * 0.1 == 0.3
# ## The `decimal` to the rescue
#
# The `decimal` module provides support for fast correctly-rounded decimal floating point arithmetic. It offers several advantages over the float datatype.
# For Example:
#
# * Decimal “is based on a floating-point model which was designed with people in mind, and necessarily has a paramount guiding principle – computers must provide an arithmetic that works in the same way as the arithmetic that people learn at school.” – excerpt from the decimal arithmetic specification.
#
#
# * Decimal numbers can be represented exactly. In contrast, numbers like 1.1 and 2.2 do not have exact representations in binary floating point. End users typically would not expect 1.1 + 2.2 to display as 3.3000000000000003 as it does with binary floating point.
#
#
# * The exactness carries over into arithmetic. In decimal floating point, 0.1 + 0.1 + 0.1 - 0.3 is exactly equal to zero. In binary floating point, the result is 5.5511151231257827e-017. While near to zero, the differences prevent reliable equality testing and differences can accumulate. For this reason, decimal is preferred in accounting applications which have strict equality invariants.
#
#
# * Unlike hardware based binary floating point, the decimal module has a user alterable precision (defaulting to 28 places) which can be as large as needed for a given problem
#
#
# * The module design is centered around three concepts: the decimal number, the context for arithmetic, and signals.
#
#
#
# ### See Also
#
# * [What Every Computer Scientist Should Know About Floating-Point Arithmetic](http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html)
# * [`decimal` module](https://docs.python.org/3/library/decimal.html)
#
#
# ## Example using decimal
from decimal import Decimal, getcontext
getcontext().prec = 17
result = 3 * Decimal(0.1)
print(type(result))
print(3 * Decimal(0.1))
print(3 * 0.1)
# ### Note
#
# The first instruction of the previous cell is an exmaple of **selective module import**.
#
# So far, just note that the syntax pattern of the selective import is:
#
# from module import something[,something else comma separated]
#
# See more about this in the notebook specifically devoted to this!
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='exercises_numbers'></a>Exercises
# ---
# #### Arithmetic
# - Write a program that prints out the results of at least one calculation for each of the basic operations: addition, subtraction, multiplication, division, and exponents.
#
# #### Order of Operations
# - Find a calculation whose result depends on the order of operations.
# - Print the result of this calculation using the standard order of operations.
# - Use parentheses to force a nonstandard order of operations. Print the result of this calculation.
#
# #### Long Decimals
# - On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.
# - Find at least one other calculation that results in a long decimal like this.
# + slideshow={"slide_type": "skip"}
# Ex 2.8 : Arithmetic
a = 6
b = 5
print("a + b = ", end='')
o = a+b
print(o)
# + slideshow={"slide_type": "skip"}
# Ex 2.9 : Order of Operations
result = (3*4)+2
print('The result of the calculation of (3*4)+2', result, sep=' = ')
# + slideshow={"slide_type": "skip"}
# Ex 2.10 : Long Decimals
print(3.125 / 0.2)
# -
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='challenges_numbers'></a>Challenges
# ---
# #### Neat Arithmetic
#
# - Store the results of at least 5 different calculations in separate variables. Make sure you use each operation at least once.
# - Print a series of informative statements, such as "The result of the calculation 5+7 is 12."
#
# #### Neat Order of Operations
# - Take your work for "Order of Operations" above.
# - Instead of just printing the results, print an informative summary of the results. Show each calculation that is being done and the result of that calculation. Explain how you modified the result using parentheses.
#
# #### Long Decimals - Pattern
# - On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.
# - Find a number of other calculations that result in a long decimal like this. Try to find a pattern in what kinds of numbers will result in long decimals.
# + slideshow={"slide_type": "skip"}
# Challenge: Neat Arithmetic
# Put your code here
# + slideshow={"slide_type": "skip"}
# Challenge: Neat Order of Operations
# Put your code here
# + slideshow={"slide_type": "skip"}
# Challenge: Long Decimals - Pattern
# Put your code here
# + [markdown] slideshow={"slide_type": "skip"}
# [top](#top)
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='comments'></a>Comments
# ===
# As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.
#
# Comments allow you to write in English, within your program. In Python, any line that starts with a pound (#) symbol is ignored by the Python interpreter.
# + slideshow={"slide_type": "subslide"}
# This line is a comment.
#this
#is
#not
print("This line is not a comment, it is code.")
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='good_comments'></a>What makes a good comment?
# ---
# - It is short and to the point, but a complete thought. Most comments should be written in complete sentences.
# - It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem.
# - It explains your thinking, so that others who work with your code will understand your overall approach to a problem.
# - It explains particularly difficult sections of code in detail.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a name='when_comments'></a>When should you write a comment?
# ---
# - When you have to think about code before writing it.
# - When you are likely to forget later exactly how you were approaching a problem.
# - When there is more than one way to solve a problem.
# - When others are unlikely to anticipate your way of thinking about a problem.
#
# Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now. You will see them throughout the examples in these notebooks.
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='exercises_comments'></a>Exercises
# ---
# #### First Comments
# - Choose the longest, most difficult, or most interesting program you have written so far. Write at least one comment in your program.
#
# + slideshow={"slide_type": "skip"}
# Ex 2.10 : First Comments
# put your code here
# + [markdown] slideshow={"slide_type": "skip"}
# [top](#top)
# + [markdown] slideshow={"slide_type": "slide"}
# <a name='challenges_overall'></a>Overall Challenges
# ===
# We have learned quite a bit so far about programming, but we haven't learned enough yet for you to go create something. In the next notebook, things will get much more interesting, and there will be a longer list of overall challenges.
#
# <blank>
#
# #### What I've Learned
# - Write a program that uses everything you have learned in this notebook at least once.
# - Write comments that label each section of your program.
# - For each thing your program does, write at least one line of output that explains what your program did.
# - For example, you might have one line that stores your name with some whitespace in a variable, and a second line that strips that whitespace from your name:
# + slideshow={"slide_type": "skip"}
# Overall Challenge
# Put your code here
# + slideshow={"slide_type": "skip"}
# I learned how to strip whitespace from strings.
name = '\t\teric'
print("I can strip tabs from my name: " + name.strip())
# + [markdown] slideshow={"slide_type": "skip"}
# [top](#top)
| .ipynb_checkpoints/02 Variable Strings and Numbers-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimizer tweaks
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
#export
from exp.nb_08 import *
# ## Imagenette data
# We grab the data from the previous notebook.
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=3917)
path = datasets.untar_data(datasets.URLs.IMAGENETTE_160)
# +
tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]
bs=128
il = ImageList.from_files(path, tfms=tfms)
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='val'))
ll = label_by_func(sd, parent_labeler, proc_y=CategoryProcessor())
data = ll.to_databunch(bs, c_in=3, c_out=10, num_workers=4)
# -
# Then a model:
nfs = [32,64,128,256]
cbfs = [partial(AvgStatsCallback,accuracy), CudaCallback,
partial(BatchTransformXCallback, norm_imagenette)]
# This is the baseline of training with vanilla SGD.
learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs)
run.fit(1, learn)
# ## Refining the optimizer
# In PyTorch, the base optimizer in `torch.optim` is just a dictionary that stores the hyper-parameters and references to the parameters of the model we want to train in parameter groups (different groups can have different learning rates/momentum/weight decay... which is what lets us do discriminative learning rates).
#
# It contains a method `step` that will update our parameters with the gradients and a method `zero_grad` to detach and zero the gradients of all our parameters.
#
# We build the equivalent from scratch, only ours will be more flexible. In our implementation, the step function loops over all the parameters to execute the step using stepper functions that we have to provide when initializing the optimizer.
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=4074)
#
# Jeremy uses param groups in order to tweak the hyper params or add annealing, etc. They also call the parameters groups as layer groups.
#
# In linear layer, we have wieght and bias tensor, so each one of these is a parameter tensor. so to Optimize something, we need to know what the parameters tensors are in the model torch, we have model.params.
#
# We want to be able to say the the LR for the last 2 layers in the model should be X - discriminative learning rates.
class Optimizer():
def __init__(self, params, steppers, **defaults):
# might be a generator
self.param_groups = list(params)
# ensure params is a list of lists
if not isinstance(self.param_groups[0], list): self.param_groups = [self.param_groups]
self.hypers = [{**defaults} for p in self.param_groups] # for each param group, we have a dict that holds all the hyperparam values
self.steppers = listify(steppers)
def grad_params(self):
return [(p,hyper) for pg,hyper in zip(self.param_groups,self.hypers)
for p in pg if p.grad is not None]
def zero_grad(self): # zero params and reset gradient history
for p,hyper in self.grad_params():
p.grad.detach_()
p.grad.zero_()
def step(self):
for p,hyper in self.grad_params(): compose(p, self.steppers, **hyper)
# To do basic SGD, this what a step looks like:
#export
def sgd_step(p, lr, **kwargs):
p.data.add_(-lr, p.grad.data)
return p
opt_func = partial(Optimizer, steppers=[sgd_step])
# Now that we have changed the optimizer, we will need to adjust the callbacks that were using properties from the PyTorch optimizer: in particular the hyper-parameters are in the list of dictionaries `opt.hypers` (PyTorch has everything in the the list of param groups).
# +
#export
class Recorder(Callback):
def begin_fit(self): self.lrs,self.losses = [],[]
def after_batch(self):
if not self.in_train: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.loss.detach().cpu())
def plot_lr (self): plt.plot(self.lrs)
def plot_loss(self): plt.plot(self.losses)
def plot(self, skip_last=0):
losses = [o.item() for o in self.losses]
n = len(losses)-skip_last
plt.xscale('log')
plt.plot(self.lrs[:n], losses[:n])
class ParamScheduler(Callback): # Updating ParamScheduler to use our new Optimizer
_order=1
def __init__(self, pname, sched_funcs):
self.pname,self.sched_funcs = pname,listify(sched_funcs)
def begin_batch(self):
if not self.in_train: return
fs = self.sched_funcs
if len(fs)==1: fs = fs*len(self.opt.param_groups)
pos = self.n_epochs/self.epochs
for f,h in zip(fs,self.opt.hypers): h[self.pname] = f(pos)
class LR_Find(Callback):
_order=1
def __init__(self, max_iter=100, min_lr=1e-6, max_lr=10):
self.max_iter,self.min_lr,self.max_lr = max_iter,min_lr,max_lr
self.best_loss = 1e9
def begin_batch(self):
if not self.in_train: return
pos = self.n_iter/self.max_iter
lr = self.min_lr * (self.max_lr/self.min_lr) ** pos
for pg in self.opt.hypers: pg['lr'] = lr
def after_step(self):
if self.n_iter>=self.max_iter or self.loss>self.best_loss*10:
raise CancelTrainException()
if self.loss < self.best_loss: self.best_loss = self.loss
# -
# So let's check we didn't break anything and that recorder and param scheduler work properly.
sched = combine_scheds([0.3, 0.7], [sched_cos(0.3, 0.6), sched_cos(0.6, 0.2)])
cbfs = [partial(AvgStatsCallback,accuracy),
CudaCallback, Recorder,
partial(ParamScheduler, 'lr', sched)]
learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs, opt_func=opt_func)
# %time run.fit(1, learn)
run.recorder.plot_loss()
run.recorder.plot_lr()
# ## Weight decay
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=4623)
# By letting our model learn high parameters, it might fit all the data points in the training set with an over-complex function that has very sharp changes, which will lead to overfitting.
#
# <img src="images/overfit.png" alt="Fitting vs over-fitting" width="600">
#
# Weight decay comes from the idea of L2 regularization, which consists in adding to your loss function the sum of all the weights squared. Why do that? Because when we compute the gradients, it will add a contribution to them that will encourage the weights to be as small as possible.
# Limiting our weights from growing too much is going to hinder the training of the model, but it will yield to a state where it generalizes better. Going back to the theory a little bit, weight decay (or just `wd`) is a parameter that controls that sum of squares we add to our loss:
# ``` python
# loss_with_wd = loss + (wd/2) * (weights**2).sum()
# ```
#
# In practice though, it would be very inefficient (and maybe numerically unstable) to compute that big sum and add it to the loss. If you remember a little bit of high school math, the derivative of `p**2` with respect to `p` is `2*p`. So adding that big sum to our loss is exactly the same as doing:
# ``` python
# weight.grad += wd * weight
# ```
#
# for every weight in our model, which in the case of vanilla SGD is equivalent to updating the parameters with:
# ``` python
# weight = weight - lr*(weight.grad + wd*weight)
# ```
#
# This technique is called "weight decay", as each weight is decayed by a factor `lr * wd`, as it's shown in this last formula.
#
# This only works for standard SGD, as we have seen that with momentum, RMSProp and Adam, the update has some additional formulas around the gradient. In those cases, the formula that comes from L2 regularization:
# ``` python
# weight.grad += wd * weight
# ```
# is different than weight decay
# ``` python
# new_weight = weight - lr * weight.grad - lr * wd * weight
# ```
#
# Most libraries use the first one, but as it was pointed out in [Decoupled Weight Regularization](https://arxiv.org/pdf/1711.05101.pdf) by <NAME> and <NAME>, it is better to use the second one with the Adam optimizer, which is why fastai made it its default.
# Weight decay is subtracting `lr*wd*weight` from the weights. We need this function to have an attribute `_defaults` so that we are sure there is an hyper-parameter of the same name in our `Optimizer`.
#export
def weight_decay(p, lr, wd, **kwargs): # adding weight decay step, just like we added the SGD step. So we are not adding WD optimizers, but adding WD step as an optimizer
p.data.mul_(1 - lr*wd)
return p
weight_decay._defaults = dict(wd=0.)
# L2 regularization is adding `wd*weight` to the gradients.
#export
def l2_reg(p, lr, wd, **kwargs):
p.grad.data.add_(wd, p.data)
return p
l2_reg._defaults = dict(wd=0.)
# Let's allow steppers to add to our `defaults` (which are the default values of all the hyper-parameters). This helper function adds in `dest` the key/values it finds while going through `os` and applying `f` when they was no `key` of the same name.
# +
#export
def maybe_update(os, dest, f):
for o in os:
for k,v in f(o).items():
if k not in dest: dest[k] = v
def get_defaults(d): return getattr(d,'_defaults',{})
# -
# This is the same as before, we just take the default values of the steppers when none are provided in the kwargs.
#export
class Optimizer():
def __init__(self, params, steppers, **defaults):
self.steppers = listify(steppers)
maybe_update(self.steppers, defaults, get_defaults)
# might be a generator
self.param_groups = list(params)
# ensure params is a list of lists
if not isinstance(self.param_groups[0], list): self.param_groups = [self.param_groups]
self.hypers = [{**defaults} for p in self.param_groups]
def grad_params(self):
return [(p,hyper) for pg,hyper in zip(self.param_groups,self.hypers)
for p in pg if p.grad is not None]
def zero_grad(self):
for p,hyper in self.grad_params():
p.grad.detach_()
p.grad.zero_()
def step(self):
for p,hyper in self.grad_params(): compose(p, self.steppers, **hyper)
#export
sgd_opt = partial(Optimizer, steppers=[weight_decay, sgd_step]) # sgd step
learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs, opt_func=sgd_opt)
# Before trying to train, let's check the behavior works as intended: when we don't provide a value for `wd`, we pull the corresponding default from `weight_decay`.
model = learn.model
opt = sgd_opt(model.parameters(), lr=0.1)
test_eq(opt.hypers[0]['wd'], 0.)
test_eq(opt.hypers[0]['lr'], 0.1)
# But if we provide a value, it overrides the default.
opt = sgd_opt(model.parameters(), lr=0.1, wd=1e-4)
test_eq(opt.hypers[0]['wd'], 1e-4)
test_eq(opt.hypers[0]['lr'], 0.1)
# Now let's fit.
cbfs = [partial(AvgStatsCallback,accuracy), CudaCallback]
learn,run = get_learn_run(nfs, data, 0.3, conv_layer, cbs=cbfs, opt_func=partial(sgd_opt, wd=0.01))
run.fit(1, learn)
# This is already better than the baseline!
# ## With momentum
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=4872)
# Momentum requires to add some state. We need to save the moving average of the gradients to be able to do the step and store this inside the optimizer state. To do this, we introduce statistics. Statistics are object with two methods:
# - `init_state`, that returns the initial state (a tensor of 0. for the moving average of gradients)
# - `update`, that updates the state with the new gradient value
#
# We also read the `_defaults` values of those objects, to allow them to provide default values to hyper-parameters.
#
# DP: For momentum, we need to have the state saved.
#export
class StatefulOptimizer(Optimizer):
def __init__(self, params, steppers, stats=None, **defaults):
self.stats = listify(stats)
maybe_update(self.stats, defaults, get_defaults)
super().__init__(params, steppers, **defaults)
self.state = {} #we need to have the state saved
def step(self):
for p,hyper in self.grad_params():
if p not in self.state:
#Create a state for p and call all the statistics to initialize it.
self.state[p] = {}
maybe_update(self.stats, self.state[p], lambda o: o.init_state(p))
state = self.state[p]
for stat in self.stats: state = stat.update(p, state, **hyper)
compose(p, self.steppers, **state, **hyper)
self.state[p] = state
#export
class Stat():
_defaults = {}
def init_state(self, p): raise NotImplementedError
def update(self, p, state, **kwargs): raise NotImplementedError
# Here is an example of `Stat`:
class AverageGrad(Stat):
_defaults = dict(mom=0.9)
def init_state(self, p): return {'grad_avg': torch.zeros_like(p.grad.data)}
def update(self, p, state, mom, **kwargs):
state['grad_avg'].mul_(mom).add_(p.grad.data)
return state
# Then we add the momentum step (instead of using the gradients to perform the step, we use the average).
#export
def momentum_step(p, lr, grad_avg, **kwargs):
p.data.add_(-lr, grad_avg)
return p
sgd_mom_opt = partial(StatefulOptimizer, steppers=[momentum_step,weight_decay],
stats=AverageGrad(), wd=0.01)
learn,run = get_learn_run(nfs, data, 0.3, conv_layer, cbs=cbfs, opt_func=sgd_mom_opt)
run.fit(1, learn)
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=5115) for discussion about weight decay interaction with batch normalisation
# ### Momentum experiments
# What does momentum do to the gradients exactly? Let's do some plots to find out!
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=5487)
x = torch.linspace(-4, 4, 200)
y = torch.randn(200) + 0.3
betas = [0.5, 0.7, 0.9, 0.99]
def plot_mom(f):
_,axs = plt.subplots(2,2, figsize=(12,8))
for beta,ax in zip(betas, axs.flatten()):
ax.plot(y, linestyle='None', marker='.')
avg,res = None,[]
for i,yi in enumerate(y):
avg,p = f(avg, beta, yi, i)
res.append(p)
ax.plot(res, color='red')
ax.set_title(f'beta={beta}')
# This is the regular momentum.
def mom1(avg, beta, yi, i):
if avg is None: avg=yi
res = beta*avg + yi
return res,res
plot_mom(mom1)
# As we can see, with a too high value, it may go way too high with no way to change its course.
#
# Another way to smooth noisy data is to do an exponentially weighted moving average. In this case, there is a dampening of (1-beta) in front of the new value, which is less trusted than the current average. We'll define `lin_comb` (*linear combination*) to make this easier (note that in the lesson this was named `ewma`).
#export
def lin_comb(v1, v2, beta): return beta*v1 + (1-beta)*v2 # or exponentially weighted moving avg
def mom2(avg, beta, yi, i):
if avg is None: avg=yi
avg = lin_comb(avg, yi, beta)
return avg, avg
plot_mom(mom2)
# We can see it gets to a zero-constant when the data is purely random. If the data has a certain shape, it will get that shape (with some delay for high beta).
y = 1 - (x/3) ** 2 + torch.randn(200) * 0.1
y[0]=0.5
plot_mom(mom2)
# Debiasing is here to correct the wrong information we may have in the very first batch. The debias term corresponds to the sum of the coefficient in our moving average. At the time step i, our average is:
#
# $\begin{align*}
# avg_{i} &= \beta\ avg_{i-1} + (1-\beta)\ v_{i} = \beta\ (\beta\ avg_{i-2} + (1-\beta)\ v_{i-1}) + (1-\beta)\ v_{i} \\
# &= \beta^{2}\ avg_{i-2} + (1-\beta)\ \beta\ v_{i-1} + (1-\beta)\ v_{i} \\
# &= \beta^{3}\ avg_{i-3} + (1-\beta)\ \beta^{2}\ v_{i-2} + (1-\beta)\ \beta\ v_{i-1} + (1-\beta)\ v_{i} \\
# &\vdots \\
# &= (1-\beta)\ \beta^{i}\ v_{0} + (1-\beta)\ \beta^{i-1}\ v_{1} + \cdots + (1-\beta)\ \beta^{2}\ v_{i-2} + (1-\beta)\ \beta\ v_{i-1} + (1-\beta)\ v_{i}
# \end{align*}$
#
# and so the sum of the coefficients is
#
# $\begin{align*}
# S &=(1-\beta)\ \beta^{i} + (1-\beta)\ \beta^{i-1} + \cdots + (1-\beta)\ \beta^{2} + (1-\beta)\ \beta + (1-\beta) \\
# &= (\beta^{i} - \beta^{i+1}) + (\beta^{i-1} - \beta^{i}) + \cdots + (\beta^{2} - \beta^{3}) + (\beta - \beta^{2}) + (1-\beta) \\
# &= 1 - \beta^{i+1}
# \end{align*}$
#
# since all the other terms cancel out each other.
#
# By dividing by this term, we make our moving average a true average (in the sense that all the coefficients we used for the average sum up to 1).
def mom3(avg, beta, yi, i):
if avg is None: avg=0
avg = lin_comb(avg, yi, beta)
return avg, avg/(1-beta**(i+1))
plot_mom(mom3)
# ## Adam and friends
# ADAM = (dampenend debiased momentum) / (dampened debiased RMS gradient)
#
# In Adam, we use the gradient averages but with dampening (not like in SGD with momentum), so let's add this to the `AverageGrad` class.
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=5889)
#export
class AverageGrad(Stat):
_defaults = dict(mom=0.9)
def __init__(self, dampening:bool=False): self.dampening=dampening # made dampening optional
def init_state(self, p): return {'grad_avg': torch.zeros_like(p.grad.data)}
def update(self, p, state, mom, **kwargs):
state['mom_damp'] = 1-mom if self.dampening else 1.
state['grad_avg'].mul_(mom).add_(state['mom_damp'], p.grad.data)
return state
# We also need to track the moving average of the gradients squared.
#export
class AverageSqrGrad(Stat):
_defaults = dict(sqr_mom=0.99)
def __init__(self, dampening:bool=True): self.dampening=dampening
def init_state(self, p): return {'sqr_avg': torch.zeros_like(p.grad.data)}
def update(self, p, state, sqr_mom, **kwargs):
state['sqr_damp'] = 1-sqr_mom if self.dampening else 1.
state['sqr_avg'].mul_(sqr_mom).addcmul_(state['sqr_damp'], p.grad.data, p.grad.data)
return state
# We will also need the number of steps done during training for the debiasing.
#export
class StepCount(Stat):
def init_state(self, p): return {'step': 0}
def update(self, p, state, **kwargs):
state['step'] += 1
return state
# This helper function computes the debias term. If we dampening, `damp = 1 - mom` and we get the same result as before. If we don't use dampening, (`damp = 1`) we will need to divide by `1 - mom` because that term is missing everywhere.
#export
def debias(mom, damp, step): return damp * (1 - mom**step) / (1-mom)
# Then the Adam step is just the following:
#export
def adam_step(p, lr, mom, mom_damp, step, sqr_mom, sqr_damp, grad_avg, sqr_avg, eps, **kwargs):
debias1 = debias(mom, mom_damp, step)
debias2 = debias(sqr_mom, sqr_damp, step)
p.data.addcdiv_(-lr / debias1, grad_avg, (sqr_avg/debias2).sqrt() + eps)
return p
adam_step._defaults = dict(eps=1e-5)
#export
def adam_opt(xtra_step=None, **kwargs):
return partial(StatefulOptimizer, steppers=[adam_step,weight_decay]+listify(xtra_step),
stats=[AverageGrad(dampening=True), AverageSqrGrad(), StepCount()], **kwargs)
learn,run = get_learn_run(nfs, data, 0.001, conv_layer, cbs=cbfs, opt_func=adam_opt())
run.fit(3, learn)
# ## LAMB
# [Jump_to lesson 11 video](https://course.fast.ai/videos/?lesson=11&t=6038)
# It's then super easy to implement a new optimizer. This is LAMB from a [very recent paper](https://arxiv.org/pdf/1904.00962.pdf):
#
# $\begin{align}
# g_{t}^{l} &= \nabla L(w_{t-1}^{l}, x_{t}) \\
# m_{t}^{l} &= \beta_{1} m_{t-1}^{l} + (1-\beta_{1}) g_{t}^{l} \\ EWMA
# v_{t}^{l} &= \beta_{2} v_{t-1}^{l} + (1-\beta_{2}) g_{t}^{l} \odot g_{t}^{l} \\ EWMA with grad^2
# m_{t}^{l} &= m_{t}^{l} / (1 - \beta_{1}^{t}) \\ debiasing
# v_{t}^{l} &= v_{t}^{l} / (1 - \beta_{2}^{t}) \\
# r_{1} &= \|w_{t-1}^{l}\|_{2} \\ weight decay
# s_{t}^{l} &= \frac{m_{t}^{l}}{\sqrt{v_{t}^{l} + \epsilon}} + \lambda w_{t-1}^{l} \\ ADAM Step
# r_{2} &= \| s_{t}^{l} \|_{2} \\
# \eta^{l} &= \eta * r_{1}/r_{2} \\
# w_{t}^{l} &= w_{t}^{l-1} - \eta_{l} * s_{t}^{l} \\
# \end{align}$
def lamb_step(p, lr, mom, mom_damp, step, sqr_mom, sqr_damp, grad_avg, sqr_avg, eps, wd, **kwargs):
debias1 = debias(mom, mom_damp, step)
debias2 = debias(sqr_mom, sqr_damp, step)
r1 = p.data.pow(2).mean().sqrt()
step = (grad_avg/debias1) / ((sqr_avg/debias2).sqrt()+eps) + wd*p.data
r2 = step.pow(2).mean().sqrt()
p.data.add_(-lr * min(r1/r2,10), step)
return p
lamb_step._defaults = dict(eps=1e-6, wd=0.)
lamb = partial(StatefulOptimizer, steppers=lamb_step, stats=[AverageGrad(dampening=True), AverageSqrGrad(), StepCount()])
learn,run = get_learn_run(nfs, data, 0.003, conv_layer, cbs=cbfs, opt_func=lamb)
run.fit(3, learn)
# Other recent variants of optimizers:
# - [Large Batch Training of Convolutional Networks](https://arxiv.org/abs/1708.03888) (LARS also uses weight statistics, not just gradient statistics. Can you add that to this class?)
# - [Adafactor: Adaptive Learning Rates with Sublinear Memory Cost](https://arxiv.org/abs/1804.04235) (Adafactor combines stats over multiple sets of axes)
# - [Adaptive Gradient Methods with Dynamic Bound of Learning Rate](https://arxiv.org/abs/1902.09843)
# ## Export
# !python notebook2script.py 09_optimizers.ipynb
| nbs/dl2/09_optimizers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
import random
import os
from beta_vae import VAE
from dsprites import DataManager
import matplotlib.pyplot as plt
import time
# # Local Independence Training on dSprites
#
# In this example, we'll test out local independence training on [dSprites](https://github.com/deepmind/dsprites-dataset), a dataset of 64x64 images that are generated based on 5 ground-truth latent factors: shape, scale, rotation, x-, and y-position.
#
# Running naive local independence training with only a few models on dSprites in input space, which is 4096-dimensional, won't work for us -- some of the pixel features in dSprites _never_ change, which means that a model can assign almost infinite importance to one of those pixels--globally--without harming training accuracy. This will make its gradients effectively "orthogonal" to another model that ignores the feature. We might be able to circumvent that problem by training a much larger set of models, but computational constraints prevent us from simultaneously training thousands of them.
#
# Instead, we will solve this problem by running local independence training in a lower dimensional _latent_ space. In particular, we'll use a [$\beta$-VAE](https://arxiv.org/abs/1804.03599) (with credit to <NAME> for providing an [implementation](https://github.com/miyosuda/disentangled_vae)). Since $\beta$-VAEs are designed to _disentangle_ latent generative factors of datapoints into separate dimensions, they bring us closer to the problem of feature selection, which we know our method can solve. We'll actually see that the $\beta$-VAE we train doesn't perfectly disentangle these factors, but we'll still be able to train an ensemble of models each sensitive to only one.
# ## Load our pretrained $\beta$-VAE
manager = DataManager()
manager.load()
sess = tf.Session()
model = VAE(
gamma=1.0,
capacity_limit=20.0,
capacity_change_duration=100000)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state('./beta_vae_checkpoints')
saver.restore(sess, checkpoint.model_checkpoint_path)
# ## Verify it can reconstruct dSprites images
# +
reconstruct_check_images = manager.get_random_images(10)
x_reconstruct = model.reconstruct(sess, reconstruct_check_images)
plt.figure(figsize=(10,2))
def imshow(i):
plt.imshow(i.reshape(64,64), interpolation='none', cmap='gray')
plt.axis('off')
for i in range(10):
plt.subplot(2,10,i+1)
imshow(reconstruct_check_images[i])
plt.subplot(2,10,i+11)
imshow(x_reconstruct[i])
plt.show()
# -
# ## Compute low-variance dimensions
#
# Although the latent representation our $\beta$-VAE learns is 10-dimensional, when that many latent factors aren't necessary to fully reconstruct inputs, $\beta$-VAEs tend to ignore many of them, creating additional "slack" features. We will want to keep track of these and only penalize cosine similarity in the non-slack feature gradients.
img = manager.get_image(shape=1, scale=2, orientation=5)
plt.figure(figsize=(1,1))
imshow(img)
plt.show()
z_mean, z_log_sigma_sq = model.transform(sess, [img])
z_sigma_sq = np.exp(z_log_sigma_sq)[0]
z_sigma_sq.round(2)
relevant_idxes = np.argwhere(z_sigma_sq.round(2) < 0.7)[:,0]
irrelevant_idxes = np.argwhere(z_sigma_sq.round(2) > 0.7)[:,0]
relevant_I = np.ones(10)
relevant_I[irrelevant_idxes] = 0
I = tf.constant(relevant_I.astype(np.float32))
# ## Perform latent traversal of low-variance dimensions
z_m = z_mean[0]
n_z = 10
foo = len(relevant_idxes)
plt.figure(figsize=(5,5))
for j, target_z_index in enumerate(relevant_idxes):
for k, ri in enumerate(relevant_idxes):
value = -3.0 + (6.0 / 9.0) * (2*k)
z_mean2 = np.zeros((1, n_z))
for i in range(n_z):
if( i == target_z_index ):
z_mean2[0][i] = value
else:
z_mean2[0][i] = z_m[i]
reconstr_img = model.generate(sess, z_mean2)
rimg = reconstr_img[0].reshape(64, 64)
plt.subplot(foo,foo,j*foo+k+1)
imshow(rimg)
plt.show()
# Here we can see that, while $x$ and $y$-position are fairly well isolated to individual dimensions, they also remain somewhat entangled with scale. Shape and rotation are fairly closely entangled with each other (and perhaps slightly with scale as well).
#
# ## Construct our training dataset
# +
# load the dataset, along with the ground-truth latent factors
dataset_zip = np.load('dsprites.npz', encoding = 'latin1')
latents_values = dataset_zip['latents_values']
medians = np.median(latents_values, axis=0) # compute latent medians
np.random.seed(0)
# shuffle the dataset
indexes = np.arange(len(latents_values))
np.random.shuffle(indexes)
# separate out 10000 images from the full dataset to use for evaluation
X_test = manager.imgs[indexes[:10000]]
Z_test = latents_values[indexes[:10000]]
# compute test sets based on scale, x, and y independently
# being greater than the median
y_test2 = (Z_test[:,2] > medians[2]).astype(int)
y_test4 = (Z_test[:,4] > medians[4]).astype(int)
y_test5 = (Z_test[:,5] > medians[5]).astype(int)
# now compute the training dataset -- take the remaining images
# and determine whether each satisfy each median condition
Z_full = latents_values[indexes[10000:]]
y2 = (Z_full[:,2] > medians[2]).astype(int) # scale
y4 = (Z_full[:,4] > medians[4]).astype(int) # x
y5 = (Z_full[:,5] > medians[5]).astype(int) # y
# introduce train/test ambiguity by limiting to cases where all
# three conditions are true or false
y_sum = y2 + y4 + y5
y0_indexes = np.argwhere(y_sum == 0)[:,0]
y1_indexes = np.argwhere(y_sum == 3)[:,0]
# construct a label-balanced training dataset
n = int(np.floor(min(len(y0_indexes), len(y1_indexes)) / 1000) * 1000)
Z_train = np.vstack([
latents_values[indexes[10000:]][y0_indexes[:n]],
latents_values[indexes[10000:]][y1_indexes[:n]]])
X_train = np.vstack([
manager.imgs[indexes[10000:]][y0_indexes[:n]],
manager.imgs[indexes[10000:]][y1_indexes[:n]]])
y_train = np.array([0]*n + [1]*n)
# shuffle again
train_shuffle = np.arange(len(y_train))
np.random.shuffle(train_shuffle)
X_train = X_train[train_shuffle]
y_train = y_train[train_shuffle]
Z_train = Z_train[train_shuffle]
# define/import helper functions to perform classification
# and local independence training
def mlp(X, prefix):
L1 = tf.layers.dense( X, 256, name=prefix+'/1', activation=tf.nn.softplus)
L2 = tf.layers.dense(L1, 256, name=prefix+'/2', activation=tf.nn.softplus)
return tf.layers.dense(L2, 2, name=prefix+'/3', activation=None)
def xent(logits,labels):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
def accu(logits,labels):
return tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits,1), tf.argmax(labels, 1)), dtype=tf.float32))
def logps(l):
return l-tf.reduce_logsumexp(l,1,keep_dims=True)
from neural_network import onehot, squared_cos_sim
def vars_for(prefix):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, prefix)
def reinit_op():
uninitialized_vars = []
for var in tf.global_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
return tf.variables_initializer(uninitialized_vars)
# -
# ## Define train operations
# +
mlp_logits = []
mlp_xents = []
mlp_grads = []
mlp_accus = []
X_symb = tf.placeholder(tf.float32, [None, 64, 64])
y_symb = tf.placeholder(tf.float32, [None, 2])
y_trainhot = onehot(y_train, 2)
s = str(time.time())
for i in range(3):
# Build MLP on top of beta-VAE latent representation (`model.z`)
mlp_i = mlp(model.z, "{}/{}".format(s,i))
lossi = xent(mlp_i, y_symb)
accui = accu(mlp_i, y_symb)
mlp_logits.append(mlp_i)
mlp_xents.append(lossi)
mlp_accus.append(accui)
# Compute symbolic representation of gradients
mlp_grads.append(tf.multiply(tf.gradients(logps(mlp_i), model.z)[0], I))
# Sum up cross-entropies
mlp_xent = tf.add_n(mlp_xents)
# Sum up pairwise cosine similarities
mlp_pair = tf.add_n([
tf.reduce_sum(squared_cos_sim(mlp_grads[i],mlp_grads[j]))
for i in range(len(mlp_grads))
for j in range(i+1, len(mlp_grads))])
# Add them to the loss
lambda_pair = 0.1
mlp_loss = mlp_xent + lambda_pair * mlp_pair
# Set up training operations
mlp_optimizer = tf.train.AdamOptimizer()
mlp_train = mlp_optimizer.minimize(mlp_loss, var_list=vars_for(str(s)))
sess.run(reinit_op())
# -
# ## Train the ensemble
#
# Note: for speed, this version of the notebook defaults to loading an existing ensemble, but you can set retrain=True to run this process over again!
# +
retrain = False # Set this to true to retrain from scratch -- might take a while, though!
if retrain:
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
ops = [mlp_train,lambda_pair*mlp_pair,mlp_xent]+mlp_accus
for i in range(5000): # this is a decent number of iterations
i1 = 256*i % len(X_train)
i2 = 256*(i+1) % len(X_train)
Xb = X_train[i1:i2].reshape(-1, 64*64)
yb = y_trainhot[i1:i2]
if i2 < i1: continue
results = sess.run(ops, feed_dict={ model.x: Xb, y_symb: yb })
if i % 10 == 0:
print('pair: {:.2f}, xent: {:.2f}, acc1: {:.2f}, acc2: {:.2f}, acc3: {:.2f}'.format(*results[1:]))
if i % 50 == 0:
print('--')
a2 = sess.run(mlp_accus, feed_dict={model.x: X_test[:100].reshape(-1,64*64), y_symb: onehot(y_test2[:100])})
a4 = sess.run(mlp_accus, feed_dict={model.x: X_test[:100].reshape(-1,64*64), y_symb: onehot(y_test4[:100])})
a5 = sess.run(mlp_accus, feed_dict={model.x: X_test[:100].reshape(-1,64*64), y_symb: onehot(y_test5[:100])})
print(np.array([a2,a4,a5]).T)
print('--')
else:
import pickle
with open('./dsprites-ensemble.pkl', 'rb') as f:
vals = pickle.load(f)
for var, val in zip(vars_for(str(s)), vals):
sess.run(var.assign(val))
# -
# ## Train another model normally (for comparison)
# +
s2 = str(time.time())
mlp1_logits = mlp(model.z, s2)
mlp1_loss = xent(mlp1_logits, y_symb)
mlp1_accu = accu(mlp1_logits, y_symb)
mlp1_vars = vars_for(s2)
mlp1_optimizer = tf.train.AdamOptimizer()
mlp1_train = mlp1_optimizer.minimize(mlp1_loss, var_list=mlp1_vars)
sess.run(reinit_op())
for i in range(200): # need many fewer iterations for convergence
i1 = 256*i % len(X_train)
i2 = 256*(i+1) % len(X_train)
if i2 < i1: continue
_, acc = sess.run([mlp1_train, mlp1_accu], feed_dict={
model.x: X_train[i1:i2].reshape(-1,64*64), y_symb:y_trainhot[i1:i2]})
# -
# ## Compute test accuracy for all models for all test sets
# +
accuracy_ops = [mlp1_accu] + mlp_accus # accuracy operations for each model
test_sets = [(X_train, y_train), (X_test, y_test2), (X_test, y_test4), (X_test, y_test5)] # sets for evaluation
indexes = [slice(100*i, 100*(i+1)) for i in range(100)] # batch indexes over each set
accuracy_grid = [[[
sess.run(op, feed_dict={
model.x: Xt[idx].reshape(-1, 64*64),
y_symb: onehot(yt[idx]) })
for op in accuracy_ops]
for Xt, yt in test_sets]
for idx in indexes]
accuracy_grid = np.mean(accuracy_grid, axis=0) # average over batches
# -
# ## Plot accuracies
#
# Reorder models for better display -- if you retrained from scratch, you might want to change this!
reordered = accuracy_grid.T[[0,2,3,1]].T
plt.imshow(reordered.T, interpolation='none',cmap='RdYlGn', vmin=0.5, vmax=1.0)
plt.yticks([0,1,2,3],['Normal', 'Diverse 1', 'Diverse 2', 'Diverse 3'], fontsize=12)
plt.xticks([0,1,2,3],['Train', 'Test 1\n(scale)', 'Test 2\n(x-pos)', 'Test 3\n(y-pos)'], fontsize=12)
for i in range(4):
for j in range(4):
plt.text(i,j,'{:.1%}'.format(reordered[i][j]), ha='center', va='center', color=('white' if reordered[i][j] > 0.9 or reordered[i][j] < 0.6 else 'black'))
cb = plt.colorbar()
cb.set_label('Accuracy', fontsize=14)
cb.set_ticks([0.5,0.6,0.7,0.8,0.9,1.0])
plt.title('dSprites Accuracy', fontsize=16, y=1.02)
plt.ylabel('Model', fontsize=14)
plt.xlabel('Dataset', fontsize=14)
plt.show()
# ## Feature visualization to explain each model
#
# Similar to input-space [feature visualization](https://distill.pub/2017/feature-visualization/), as a way of explaining a particular _model_, we'll solve an optimization problem to modify an _image_ to maximize the predicted probability of the positive class. Because we have a nice autoencoder to work with, we'll do this optimization via gradient descent in its latent space (and use its decoder to visualize the resulting series of images).
# +
def normalize(v):
return v / np.linalg.norm(v)
def visualize_progression(viz):
plt.figure(figsize=(len(viz),1))
for i in range(len(viz)):
plt.subplot(1,len(viz),i+1)
imshow(viz[i])
plt.show()
def feature_visualize(img, logits, iters=15, eps=0.05):
logit = logits[:,1] - logits[:,0]
grad = tf.gradients(logit, model.z)[0]
z_mean, z_log_sigma_sq = model.transform(sess, [img])
zs = [z_mean]
for i in range(iters):
z = zs[-1]
grad_val = sess.run(grad, feed_dict={model.z: z})
z_new = z + eps*normalize(grad_val*relevant_I)
zs.append(z_new)
return [model.generate(sess, z).reshape(64,64) for z in zs]
# -
imgs0 = feature_visualize(img, mlp1_logits, eps=0.05, iters=80)
imgs1 = feature_visualize(img, mlp_logits[1], eps=0.05, iters=80)
imgs2 = feature_visualize(img, mlp_logits[2], eps=0.05, iters=80)
imgs3 = feature_visualize(img, mlp_logits[0], eps=0.05, iters=80)
# ### Normal
visualize_progression(imgs0[::4])
# ### Diverse 1
visualize_progression(imgs1[::4])
# ### Diverse 2
visualize_progression(imgs2[::4])
# ### Diverse 3
visualize_progression(imgs3[::4])
# +
from matplotlib.patches import ConnectionPatch
ts = {'fontweight': 800, 'fontsize': 14}
def add_arrow(ax, xyA, xyB):
ax.add_artist(ConnectionPatch(
xyA=xyA, coordsA="data", axesA=ax,
xyB=xyB, coordsB="data", axesB=ax3,
color="darkgreen", arrowstyle="<|-", mutation_scale=20, linewidth=4))
fig = plt.figure(figsize=(8,5))
ax1 = plt.subplot(3,3,3); imshow(imgs1[-1]); plt.text(69, 32, 'Diverse 1', va='center', ha='left', **ts)
ax2 = plt.subplot(3,3,4); imshow(imgs0[-1]); plt.title('Normal\nModel', **ts)
ax3 = plt.subplot(3,3,5); imshow(img); plt.title('Original\nImage', **ts)
ax4 = plt.subplot(3,3,6); imshow(imgs2[-1]); plt.text(69, 32, 'Diverse 2', va='center', ha='left', **ts)
ax5 = plt.subplot(3,3,9); imshow(imgs3[-1]); plt.text(69, 32, 'Diverse 3', va='center', ha='left', **ts)
add_arrow(ax1, (1,32), (63,1))
add_arrow(ax4, (1,32), (63,32))
add_arrow(ax5, (1,32), (63,63))
add_arrow(ax2, (63,32), (1,32))
fig.text(0.6,0.25, "Feature\nVisualization", color='darkgreen', ha='center', va='center', **ts)
plt.show()
# -
# These visualizations show that each of the three diverse models appears to be sensitive to different latent generative factors, which is what we wanted. Although this example is pretty simple, it's an important demonstration of this technique as a method for obtaining more abstract kinds of diversity in our ensembles.
#
# One important caveat, though: although we trained our classifiers on a dataset where x, y, and size were correlated with each other, we trained our _autoencoder_ on the full dataset, where they weren't. If we hadn't trained our autoencoder in this way, we might not have been able to learn a representation that would have allowed us to train the ensemble we wanted. However, there has been recent progress in learning disentangled representations that [generalize outside the training data](https://arxiv.org/abs/1802.04942) (in that case, being able to produce moustached female faces on CelebA), and even if we can't, our method is still useful in semi-supervised cases, where our training set comes from a much larger unlabeled dataset -- especially when that labeling happened with _selection bias_. In that case, we might be able to learn a good representation for our data, but not a generalizable classifier, which is exactly the setting we considered here.
| DSprites-Latent-Space.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling and Simulation in Python
#
# Chapter 16
#
# Copyright 2017 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
#
# +
# Configure Jupyter so figures appear in the notebook
# %matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
# %config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
# -
# ## Code from previous notebooks
def update_func(state, t, system):
"""Update the thermal transfer model.
state: State (temp)
t: time
system: System object
returns: State (temp)
"""
unpack(system)
T = state.T
T += -r * (T - T_env) * dt
return State(T=T)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
Add a TimeFrame to the System: results
system: System object
update_func: function that updates state
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.row[0] = init
ts = linrange(0, t_end, dt)
for t in ts:
frame.row[t+dt] = update_func(frame.row[t], t, system)
# store the final temperature in T_final
system.T_final = get_last_value(frame.T)
return frame
def make_system(T_init, r, volume, t_end):
"""Makes a System object with the given parameters.
T_init: initial temperature in degC
r: heat transfer rate, in 1/min
volume: volume of liquid in mL
t_end: end time of simulation
returns: System object
"""
init = State(T=T_init)
# T_final is used to store the final temperature.
# Before the simulation runs, T_final = T_init
T_final = T_init
T_env = 22
dt = 1
return System(locals())
# ### Using `fsolve`
#
# As a simple example, let's find the roots of this function; that is, the values of `x` that make the result 0.
def func(x):
return (x-1) * (x-2) * (x-3)
# `modsim.py` provides `fsolve`, which does some error-checking and then runs `scipy.optimize.fsolve`. The first argument is the function whose roots we want. The second argument is an initial guess.
fsolve(func, x0=0)
# Usually the root we get is the one that's closest to the initial guess.
fsolve(func, 1.9)
fsolve(func, 2.9)
# But not always.
fsolve(func, 1.5)
# We want to find the value of `r` that makes the final temperature 70, so we define an "error function" that takes `r` as a parameter and returns the difference between the final temperature and the goal.
def error_func1(r):
"""Runs a simulation and returns the `error`.
r: heat transfer rate, in 1/min
returns: difference between final temp and 70 C
"""
system = make_system(T_init=90, r=r, volume=300, t_end=30)
results = run_simulation(system, update_func)
return system.T_final - 70
# With `r=0.01`, we end up a little too warm.
error_func1(r=0.01)
# The return value from `fsolve` is an array with a single element, the estimated value of `r`.
solution = fsolve(error_func1, 0.01)
r_coffee = solution[0]
r_coffee
# If we run the simulation with the estimated value of `r`, the final temperature is 70 C, as expected.
coffee = make_system(T_init=90, r=r_coffee, volume=300, t_end=30)
results = run_simulation(coffee, update_func)
coffee.T_final
# **Exercise:** When you call `fsolve`, it calls `error_func1` several times. To see how this works, add a print statement to `error_func1` and run `fsolve` again.
# **Exercise:** Repeat this process to estimate `r_milk`, given that it starts at 5 C and reaches 20 C after 15 minutes.
#
# Before you use `fsolve`, you might want to try a few values for `r_milk` and see how close you can get by trial and error. Here's an initial guess to get you started:
r_milk = 0.1
milk = make_system(T_init=5, t_end=15, r=r_milk, volume=50)
results = run_simulation(milk, update_func)
milk.T_final
# +
# Solution
def error_func2(r):
"""Runs a simulation and returns the `error`.
r: heat transfer rate, in 1/min
returns: difference between final temp and 20C
"""
system = make_system(T_init=5, t_end=15, r=r, volume=50)
results = run_simulation(system, update_func)
return system.T_final - 20
# +
# Solution
error_func2(r=0.1)
# +
# Solution
solution = fsolve(error_func2, 0.1)
r_milk = solution[0]
# +
# Solution
milk = make_system(T_init=5, t_end=15, r=r_milk, volume=50)
results = run_simulation(milk, update_func)
milk.T_final
# -
# ### Mixing liquids
# The following function takes `System` objects that represent two liquids, computes the temperature of the mixture, and returns a new `System` object that represents the mixture.
def mix(s1, s2):
"""Simulates the mixture of two liquids.
s1: System representing coffee
s2: System representing milk
returns: System representing the mixture
"""
assert s1.t_end == s2.t_end
V_mix = s1.volume + s2.volume
T_mix = (s1.volume * s1.T_final +
s2.volume * s2.T_final) / V_mix
mixture = make_system(T_init=T_mix,
t_end=0,
r=s1.r,
volume=V_mix)
return mixture
# ## Mixing at the end
#
# First we'll see what happens if we add the milk at the end. We'll simulate the coffee and the milk separately.
coffee = make_system(T_init=90, t_end=30, r=r_coffee, volume=300)
coffee_results = run_simulation(coffee, update_func)
coffee.T_final
milk = make_system(T_init=5, t_end=30, r=r_milk, volume=50)
milk_results = run_simulation(milk, update_func)
milk.T_final
# Here's what the results look like.
# +
plot(coffee_results.T, label='coffee')
plot(milk_results.T, '--', label='milk')
decorate(xlabel='Time (minutes)',
ylabel='Temperature (C)',
loc='center left')
savefig('figs/chap07-fig01.pdf')
# -
# Here's what happens when we mix them.
mix_last = mix(coffee, milk)
mix_last.T_final
# ## Mixing immediately
#
# Next here's what we get if we add the milk immediately.
coffee = make_system(T_init=90, t_end=0, r=r_coffee, volume=300)
milk = make_system(T_init=5, t_end=0, r=r_milk, volume=50)
mix_first = mix(coffee, milk)
mix_first.t_end = 30
results = run_simulation(mix_first, update_func)
mix_first.T_final
# The following function takes `t_add`, which is the time when the milk is added, and returns the final temperature.
def run_and_mix(t_add, t_total):
"""Simulates two liquids and them mixes them at t_add.
t_add: time in minutes
t_total: total time to simulate, min
returns: final temperature
"""
coffee = make_system(T_init=90, t_end=t_add,
r=r_coffee, volume=300)
coffee_results = run_simulation(coffee, update_func)
milk = make_system(T_init=5, t_end=t_add,
r=r_milk, volume=50)
milk_results = run_simulation(milk, update_func)
mixture = mix(coffee, milk)
mixture.t_end = t_total - t_add
results = run_simulation(mixture, update_func)
return mixture.T_final
# We can try it out with a few values.
run_and_mix(t_add=0, t_total=30)
run_and_mix(t_add=15, t_total=30)
run_and_mix(t_add=30, t_total=30)
# And then sweep a range of values for `t_add`
sweep = SweepSeries()
for t_add in linspace(0, 30, 11):
sweep[t_add] = run_and_mix(t_add, 30)
# Here's what the result looks like.
# +
plot(sweep, label='final temp', color='C2')
decorate(xlabel='Time added (min)',
ylabel='Final temperature (C)')
savefig('figs/chap07-fig02.pdf')
# -
# ### Analysis
# Now we can use the analytic result to compute temperature as a function of time. The following function is similar to `run_simulation`.
def run_analysis(system):
"""Computes temperature using the analytic solution.
system: System object
returns: TimeFrame
"""
unpack(system)
T_init = init.T
ts = linrange(0, t_end, dt)
T_array = T_env + (T_init - T_env) * exp(-r * ts)
# to be consistent with run_simulation, we have to
# put the array into a TimeFrame
results = TimeFrame(T_array, index=ts, columns=['T'])
system.T_final = get_last_value(results.T)
return results
# Here's how we run it. From the analysis (see `chap14analysis.ipynb`), we have the computed value of `r_coffee2`
r_coffee2 = 0.011610223142273859
coffee2 = make_system(T_init=90, r=r_coffee2, volume=300, t_end=30)
results = run_analysis(coffee2)
coffee2.T_final
# And we can compare to the results from simulation.
coffee = make_system(T_init=90, r=r_coffee, volume=300, t_end=30)
results = run_simulation(coffee, update_func)
coffee.T_final
# They are identical except for a small roundoff error.
coffee.T_final - coffee2.T_final
# ## Exercises
#
# **Exercise:** Suppose the coffee shop won't let me take milk in a separate container, but I keep a bottle of milk in the refrigerator at my office. In that case is it better to add the milk at the coffee shop, or wait until I get to the office?
#
# Hint: Think about the simplest way to represent the behavior of a refrigerator in this model. The change you make to test this variation of the problem should be very small!
# +
# Solution
## A refrigerator keeps the milk at a constant temperature,
## so it is like a container with r = 0.
| code/soln/chap16soln.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Introduction to Gaussian Process Models
# Gaussian process (GP) models serve as approximations of computationally expensive (time-consuming) black-box functions. To reduce the number of times the expensive function must be queried during optimization, the GP is used to guide the sampling decisions in the parameter space and only the "most promising" parameters are selected for evaluation.
# A GP model treats the function it approximates like the realization of a stochastic process:
# $m_{GP}(\theta) = \mu + Z(\theta)$,
# where $\mu$ represents the mean of the stochastic process and $Z(\theta) \sim \mathcal{N}(0,\sigma^2)$ is the deviation from the mean.
# The correlation between two random variables $Z(\theta_k)$ and $Z(\theta_l)$ is defined by a kernel, e.g., the squared exponential (also Radial basis function) kernel:
# \begin{equation}
# Corr(Z(\theta_k),Z(\theta_l)) = \exp(-\sum_{i=1}^d \gamma_i|\theta_k^{(i)}-\theta_l^{(i)}|^{q_i})
# \end{equation},
# with $\gamma_i$ determining how quickly the correlation in dimension $i$ decreases, and $q_i$ refelcts the smoothness of the function in dimension $i$
# Denoting $\mathbf{R}$ as the matrix whose $(k,l)$-th element is given as the correlation above, maximum likelihood estimation is used to determine the GP parameters $\mu$, $\sigma^2$, and $\gamma_i$. Then, at an unsampled point $\theta^{new}$, the GP prediction is \begin{equation}
# m_{\text{GP}}(\theta^{\text{new}})=\hat{\mu}+\mathbf{r}^T\mathbf{R}^{-1}(\mathbf{f}-\mathbf{1}\hat\mu),
# \end{equation}
# where $\mathbf{1}$ is a vector of ones of appropriate dimension and $\mathbf{f}$ is the vector of function values obtained so far, and
# \begin{equation}
# \boldsymbol{r}=
# \begin{bmatrix}
# Corr\left(Z(\theta^{\text{new}}), Z(\theta_1)\right)\\
# \vdots\\
# Corr\left(Z(\theta^{\text{new}}
# ), Z(\theta_n)\right)
# \end{bmatrix}.
# \end{equation}
# The corresponding mean squared error is
# \begin{equation}
# s^2(\theta^{\text{new}})=\hat{\sigma}^2\left( 1-\boldsymbol{r}^T\boldsymbol{R}^{-1}\boldsymbol{r} +\frac{(1-\boldsymbol{1}^T\boldsymbol{R}^{-1}\boldsymbol{r})^2}{\mathbf{1}^T\boldsymbol{R}^{-1}\mathbf{1}}\right)
# \end{equation}
# with
# \begin{equation}
# \hat{\mu} = \frac{\mathbf{1}^T\boldsymbol{R}^{-1}\mathbf{f}}{\mathbf{1}^T\boldsymbol{R}^{-1}\mathbf{1}}
# \end{equation}
# and
# \begin{equation}
# \hat{\sigma}^2=\frac{(\mathbf{f}-\mathbf{1}\hat{\mu})^T\boldsymbol{R}^{-1}(\mathbf{f}-\mathbf{1}\hat{\mu})}{n}.
# \end{equation}
# Python has a good implementation of GPs where you can choose different kernels.
# First, we need (input, output) data pairs. Inputs are parameters where we query the function (for simplicity, the example has an inexpensive function). From the Sckit-Learn website: https://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gpr_noisy_targets.html
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Matern, RationalQuadratic, ExpSineSquared, WhiteKernel
from scipy.optimize import minimize
from scipy.spatial import distance
import scipy.spatial as scp
from scipy.stats import norm
from pyDOE import * #needed if Latin hypercube design is used
import warnings
warnings.filterwarnings("ignore")
def f(x):
"""The function we want to approximate."""
return x * np.sin(x)
xlow = 0 #lower bound on x
xup = 10 #upper bound on x
dim = 1 #dimension of the problem
lhs_wanted = False
np.random.seed(420)
if not(lhs_wanted): #when not using space-filling design
X = np.atleast_2d([1., 3., 7., 8.]).T #select some points where we evaluate the function
# Function evaluations
y = f(X).ravel()
# Other options for creating space filling designs is latin hypercube sampling:
if lhs_wanted:
ninit=6 #6 initial evaluations
init_design = lhs(dim, samples =ninit, criterion='maximin') #initial design in [0,1]^dim
X = xlow+(xup-xlow)*init_design #scale to [xlow,xup]
# Function evaluations
y = f(X).ravel()
# **Exercise:** run the code with different initial samples, i.e., try lhs_wanted = False and lhs_wanted = True and compare the sampling history
# +
# Select a GP kernel (here RBF or squared exponential)
kernel = RBF()
gp = GaussianProcessRegressor(kernel=kernel, normalize_y=True,n_restarts_optimizer=9)
# Fit the GP to the input-output data
gp.fit(X, y)
# -
# Make some good-looking plots
def plot_the_gp(X, y, gp, xnew):
#select a bunch of points where we want to make predictions wioth the GP
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Make the GP prediction at the points where no evaluations were taken - also return predicted uncertainty
y_pred, sigma = gp.predict(x, return_std=True)
plt.figure()
plt.plot(x, f(x), 'r:', label=r'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label='Observations')
plt.plot(x, y_pred, 'b-', label='Prediction')
if len(xnew)>0:
plt.plot(X[-1], y[-1], 'gs', markersize=10, label='Newest sample')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
plot_the_gp(X, y, gp, [])
# **Optional Exercise:** check out the Scikit-Learn website https://scikit-learn.org/stable/modules/gaussian_process.html#kernels-for-gaussian-processes and experiment around with different basic kernels, kernel parameters and kernel combinations, e.g.,
# - does using "kernel = RBF(10, (1e-2, 1e2))" change anything?
# - what happens when you use "kernel = Matern(length_scale=1.0, nu=1.5)"
# - try "kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1, alpha_bounds=(1e-5, 1e15))"
# - "kernel = 1.0 * ExpSineSquared(
# length_scale=1.0,
# periodicity=3.0,
# length_scale_bounds=(0.1, 10.0),
# periodicity_bounds=(1.0, 10.0),)"
# - use a combination of kernels: "kernel = RBF()+WhiteKernel(noise_level=.001)" using different noise_levels
#
# **Exercise:** Change the inputs of the GP (i.e., the training samples) and see how the GP predictions change (use fewer or more points, use different points in [0,10], e.g., "X=np.atleast_2d(np.random.uniform(0,10,5)).T"
# Takeaway: the quality and accuracy of the GP highly depends on the trianing data and the kernel used
# # Adaptive Optimization with the GP
# GP models are often used in optimization algorithms. In each iteration of the optimization, a new sample point is selected by maximizing the expected improvement (EI):
# \begin{equation}
# \mathbb{E}(I)(\theta) = s(\theta)\left(v\Phi(v)+\phi(v) \right),
# \end{equation}
# where
# \begin{equation}
# v=\frac{f^{\text{best}}-m_{\text{GP}}(\theta)}{s(\theta)}
# \end{equation}
# and $\Phi$ and $\phi$ are the normal cumulative distribution and density functions, respectively, and $s(\theta)=\sqrt{s^2(\theta)}$ is the square root of the mean squared error.
#
# The function $\mathbb{E}(I)(\theta)$ can be maximized with any python optimization library. The point $\theta^{\text{new}}$ where it reaches its maximum will be the new point where $f$ is evaluated.
# define expected improvement function
def ei(x, gpr_obj, Xsamples, Ysamples): #expected improvement
dim = len(x)
x= x.reshape(1, -1)
min_dist=np.min(scp.distance.cdist(x, Xsamples))
if min_dist<1e-6: #threshold for when points are so close that they are considered indistinguishable
expected_improvement=0.0
return expected_improvement
mu, sigma = gpr_obj.predict(x.reshape(1, -1), return_std=True)
mu_sample = gpr_obj.predict(Xsamples)
mu_sample_opt = np.min(Ysamples)
# In case sigma equals zero
with np.errstate(divide='ignore'):
Z = (mu_sample_opt-mu) / sigma
expected_improvement = (mu_sample_opt-mu) * norm.cdf(Z) + sigma * norm.pdf(Z)
expected_improvement[sigma == 0.0] == 0.0
answer=-1.*expected_improvement #to maximize EI, you minimize the negative of it
return answer
def plot_the_ei(gpr_obj, X, Y):
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
expimp=np.zeros(1000)
for ii in range(1000):
expimp[ii] = -ei(x[ii], gpr_obj, X, Y)
plt.figure()
plt.plot(x, expimp, 'k--', label='Expected improvement')
plt.plot(X, np.zeros(X.shape[0]), 'rx', markersize=10, label='Observation sites')
#plt.plot(X[-1],0, 'gs', markersize=10, label='Newest sample')
plt.xlabel('$x$')
plt.ylabel('$EI(x)$')
#plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
# do your GP iterations: maximize EI, select new point, evaluate new point, update GP, maximize EI, ....
n_GP_samples = 20 # allow 50 evaluations of f
bound_list = np.array([[xlow, xup]])
xnew=[]
while X.shape[0]< n_GP_samples:
gpr_obj = GaussianProcessRegressor(kernel=kernel, random_state=0,normalize_y=True, n_restarts_optimizer=10).fit(X, y) #create the GP
plot_the_gp(X, y, gpr_obj, xnew)
plot_the_ei(gpr_obj, X, y)
#compute next point by maximizing expected improvement, multi-start optimization
xnew = []
fnew =np.inf
for ii in range(10):
x0 = xlow + (xup-xlow) * np.random.rand(1,dim) #random starting point for optimizing expected improvement
res= minimize(ei,np.ravel(x0),method='SLSQP',bounds=bound_list, args=(gpr_obj, X, y))
dist = np.min(scp.distance.cdist(np.asmatrix(res.x), X)) #make sure new point is sufficiently far away from already sampled points
if np.min(dist)>1e-6 and res.success: #1e-3 is tunable
x_ = np.asmatrix(res.x)
if res.fun< fnew:
xnew = x_
fnew = res.fun
else: #use random point as new point
x_ = np.asarray(xlow) + np.asarray(xup-xlow) * np.asarray(np.random.rand(1,dim)) #random starting point
fv= ei(x_, gpr_obj, X, y)
if len(xnew)== 0 or fv < fnew:
xnew = np.asmatrix(x_)
fnew= fv
fval = f(np.ravel(xnew))
#update Xsamples and Ysamples arrays
X=np.concatenate((X, np.asmatrix(xnew)), axis = 0)
Y_ = np.zeros(len(y)+1)
Y_[0:len(y)]= y
Y_[-1]=fval
y =Y_
minID=np.argmin(y) #find index of best point
print('best point: ', X[minID])
print('best value: ', y[minID])
print('Number evaluations: ', X.shape[0])
# From the images of the expected improvement we can see that the peaks are becoming increasingly narrow, to almost looking like jump-discontinuities. This means that for an optimizer that tries to find the maximum of the expected improvement function, it becomes increasingly harder to find the optimum and sampling becomes more "random" in the space because the function is flat and EI values are the same everywhere except at the jumps.
# Takeaways:
# - GPs can be useful to guide the search during optimization
# - They shine when the number of function evaluations is severely limited
# - The expected improvement function helps to select points that are the "most promising" next evaluations
# - The expected improvement function is multimodal and becomes increasingly harder to optimize
| tutorials/optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import os.path
import scipy,scipy.spatial
import matplotlib
matplotlib.rcParams['figure.dpi'] = 100
from data_utilities import *
# from definitions import *
# from run_train_eval_net import run_train_eval_net,run_eval_net
# -
import os
GPU = "1"
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=GPU
# +
dataset_name = 'ManyRx'
dataset_path='../../orbit_rf_dataset/data/compact_pkl_datasets/'
compact_dataset = load_compact_pkl_dataset(dataset_path,dataset_name)
tx_list = compact_dataset['tx_list']
rx_list = compact_dataset['rx_list']
equalized = 0
capture_date_list = compact_dataset['capture_date_list']
capture_date = capture_date_list[0]
n_tx = len(tx_list)
n_rx = len(rx_list)
print(n_tx,n_rx)
# -
np.random.seed(0)
n_real = 5
rx_list_real = []
for i in range(n_real):
np.random.shuffle(rx_list)
rx_list_real.append(np.copy(rx_list).tolist())
print(rx_list_real)
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
# +
def create_net():
inputs = Input(shape=(256,2))
x = Reshape((256,2,1))(inputs)
x = Conv2D(8,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,2))(x)
x = Conv2D(32,(3,1),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,1),activation='relu',padding = 'same')(x)
#x = resnet(x,64,(3,2),'6')
#x = MaxPool2D((2,2))(x)
x = Flatten()(x)
x = Dense(100, activation='relu', kernel_regularizer = keras.regularizers.l2(0.0001))(x)
# x = Dropout(0.3)(x)
x = Dense(80, activation='relu',kernel_regularizer = keras.regularizers.l2(0.0001))(x)
x = Dropout(0.5)(x)
x = Dense(n_tx, activation='softmax',kernel_regularizer = keras.regularizers.l2(0.0001))(x)
ops = x
classifier = Model(inputs,ops)
classifier.compile(loss='categorical_crossentropy',metrics=['categorical_accuracy'],optimizer=keras.optimizers.Adam(0.0005))
return classifier
classifier = create_net()
classifier.summary()
# -
def evaluate_test(classifier):
pred = classifier.predict(sig_dfTest)
acc = np.mean(np.argmax(pred,1)==txidNum_dfTest)
test_indx = ()
for indx in range(len(tx_list)):
cls_indx = np.where(txidNum_dfTest == indx)
test_indx = test_indx + (cls_indx[0][:n_test_samples],)
test_indx = np.concatenate(test_indx)
acc_bal = np.mean(np.argmax(pred[test_indx,:],1)==txidNum_dfTest[test_indx])
return acc,acc_bal
n_test_rx = 5;
# +
list(range( 0,len(rx_list_real[0])-n_test_rx+1,5))
# +
TRAIN = True
continue_training = True
nreal = 5
real_list = list(range(nreal))
nrx_list = list(range( 0,len(rx_list_real[0])-n_test_rx+1,5)) # [0,len(rx_list_real[0])-1] #
patience = 5
n_epochs = 100
smTest_results = []
dfTest_results = []
dfTestBal_results = []
for real in real_list:
rx_list = rx_list_real[real]
rx_test_list = rx_list[-n_test_rx:]
test_dataset = merge_compact_dataset(compact_dataset,capture_date,tx_list,rx_test_list)
test_augset_dfRx,_,_ = prepare_dataset(test_dataset,tx_list,val_frac=0.0, test_frac=0.0)
[sig_dfTest,txidNum_dfTest,txid_dfTest,cls_weights] = test_augset_dfRx
cnt=np.histogram(txidNum_dfTest,bins=np.arange(len(tx_list)+1)-0.5)
n_test_samples = int(np.min(cnt[0]))
smTest_results_real = []
dfTest_results_real = []
dfTestBal_results_real = []
for nrx in nrx_list:
print("");print("")
print("nrx: {} - real: {} ".format(nrx,real))
fname_w = 'weights/d003_{:02d}_{:02d}.hd5'.format(nrx,real)
rx_train_list= rx_list[:nrx+1]
dataset = merge_compact_dataset(compact_dataset,capture_date,tx_list,rx_train_list)
train_augset,val_augset,test_augset_smRx = prepare_dataset(dataset,tx_list,
val_frac=0.1, test_frac=0.1)
[sig_train,txidNum_train,txid_train,cls_weights] = train_augset
[sig_valid,txidNum_valid,txid_valid,_] = val_augset
[sig_smTest,txidNum_smTest,txid_smTest,cls_weights] = test_augset_smRx
if continue_training:
skip = os.path.isfile(fname_w)
else:
skip = False
classifier = create_net()
if TRAIN and not skip:
filepath = 't_weights_'+GPU
c=[ keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)]
history = classifier.fit(sig_train,txid_train,class_weight=cls_weights,
validation_data=(sig_valid , txid_valid),callbacks=c, epochs=n_epochs)
classifier.load_weights(filepath)
classifier.save_weights(fname_w,save_format="h5")
else:
classifier.load_weights(fname_w)
smTest_r = classifier.evaluate(sig_smTest,txid_smTest,verbose=0)[1]
# dfTest_r = classifier.evaluate(sig_dfTest,txid_dfTest)[1]
dfTest_r,dfTestBal_r = evaluate_test(classifier)
print(smTest_r,dfTest_r)
smTest_results_real.append(smTest_r)
dfTest_results_real.append(dfTest_r)
dfTestBal_results_real.append(dfTestBal_r)
K.clear_session()
smTest_results.append(smTest_results_real)
dfTest_results.append(dfTest_results_real)
dfTestBal_results.append(dfTestBal_results_real)
# -
nrx_list
matplotlib.rcParams['figure.dpi'] = 100
plt.errorbar(np.array(nrx_list)+1,np.mean(smTest_results,0),np.std(smTest_results,0),capsize=4)
plt.errorbar(np.array(nrx_list)+1,np.mean(dfTest_results,0),np.std(dfTest_results,0),capsize=4)
plt.legend(['Same Rx(s)','Diff. Rx'])
plt.xlabel('N Train Rx')
plt.ylabel('Class. Accuracy')
#plt.xticks(range(0,len(nrx_list),2))
plt.grid()
print(np.mean(dfTest_results,0).tolist())
print(tx_list)
print(nrx_list)
print(real_list)
print(smTest_results)
print(dfTest_results)
print(dfTestBal_results)
print(rx_list_real)
| d003_ManyRx_nrx.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 (tensorflow)
# language: python
# name: tensorflow
# ---
# <h1>LSTM with TIEDVD for B0006.</h1>
# +
"""
Created on Mon Apr 10 09:02:14 2019
@author: khaled
"""
import sys
import numpy as np # linear algebra
from scipy.stats import randint
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv), data manipulation as in SQL
import matplotlib.pyplot as plt # this is used for the plot the graph
import seaborn as sns # used for plot interactive graph.
from sklearn.model_selection import train_test_split # to split the data into two parts
#from sklearn.cross_validation import KFold # use for cross validation
from sklearn.preprocessing import StandardScaler # for normalization
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline # pipeline making
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from sklearn import metrics # for the check the error and accuracy of the model
from sklearn.metrics import mean_squared_error,r2_score
## for Deep-learing:
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
import itertools
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import Dropout
# -
import numpy as np # linear algebra
import pandas as pd
df=pd.read_csv("Discharge6_H1.csv")
featurs=['Batt_name','cycle','amb_temp','voltage_battery','current_battery','temp_battery','current_load','voltage_load','time','Capacity','H']
ftr=['cycle','Capacity']
df1=df[ftr]
test=df[(df['cycle']>=82)]
f1=['cycle','H']
df=df[featurs]
#test=test['Capacity']
print(df1.head())
dataset=df[f1]
data_train=dataset[(dataset['cycle']<82)]
data_set_train=data_train.iloc[:,1:2].values
data_test=dataset[(dataset['cycle']>=82)]
data_set_test=data_test.iloc[:,1:2].values
from sklearn.preprocessing import MinMaxScaler
sc=MinMaxScaler(feature_range=(0,1))
data_set_train=sc.fit_transform(data_set_train)
data_set_test=sc.transform(data_set_test)
# +
X_train=[]
y_train=[]
for i in range(10,81):
X_train.append(data_set_train[i-10:i,0])
y_train.append(data_set_train[i,0])
X_train,y_train=np.array(X_train),np.array(y_train)
X_train=np.reshape(X_train,(X_train.shape[0],X_train.shape[1],1))
# +
#Start LSTM
regress=Sequential()
regress.add(LSTM(units=200, return_sequences=True, input_shape=(X_train.shape[1],1)))
regress.add(Dropout(0.3))
regress.add(LSTM(units=200, return_sequences=True))
regress.add(Dropout(0.3))
regress.add(LSTM(units=200, return_sequences=True))
regress.add(Dropout(0.3))
regress.add(LSTM(units=200))
regress.add(Dropout(0.3))
regress.add(Dense(units=1))
regress.compile(optimizer='adam',loss='mean_squared_error')
regress.fit(X_train,y_train,epochs=200,batch_size=50)
# +
##### predictions
data_total=pd.concat((data_train['H'],data_test['H']),axis=0)
inputs=data_total[len(data_total)-len(data_test)-10:].values
inputs=inputs.reshape(-1,1)
inputs=sc.transform(inputs)
X_test=[]
for i in range(10,97):
X_test.append(inputs[i-10:i,0])
X_test=np.array(X_test)
X_test=np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))
pred=regress.predict(X_test)
pred=sc.inverse_transform(pred)
pred=pred[:,0]
tests=data_test.iloc[:,1:2]
# -
fitting=df[ (df['cycle']>=82)]
fitting=fitting['Capacity']
abc=np.array(fitting)
## abc == Capacity
fitting=fitting.values
#test=per
cap=np.array(pred)
li=[]
z=np.polyfit(cap,abc,1)
z
li=[]
for i in range(len(abc)):
xx=(cap[i] *0.000655219062) + 0.681545173
li.append(xx)
# +
per=test
#per['per']=pred
#per['per']=li
per['per']=li
# -
rmse = np.sqrt(mean_squared_error(per['Capacity'],per['per']))
print('Test RMSE: %.3f' % rmse)
print("R^2 = " + str(metrics.r2_score(per['Capacity'],per['per'])))
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plot_df = df1.loc[(df1['cycle']>=1),['cycle','Capacity']]
plot_per=per.loc[per['cycle']>=82,['cycle','per']]
sns.set_style("darkgrid")
plt.figure(figsize=(15, 9))
plt.plot(plot_df['cycle'], plot_df['Capacity'], label="Actual data", color='blue')
plt.plot(plot_per['cycle'],plot_per['per'],label="Prediction data", color='red')
#Draw threshold
plt.plot([0.,168], [1.38, 1.38])
plt.ylabel('Capacity')
# make x-axis ticks legible
adf = plt.gca().get_xaxis().get_major_formatter()
plt.xlabel('cycle')
plt.title('Discharge B0006')
# +
actual=0
pred=0
Afil=0
Pfil=0
a=per['Capacity'].values
b=per['per'].values
j=0
k=0
for i in range(len(a)):
actual=a[i]
if actual<1.38:
j=i
Afil=j
break
for i in range(len(a)):
pred=b[i]
if pred< 1.38:
k=i
Pfil=k
break
print("The Actual fail at cycle number: "+ str(Afil+81))
print("The prediction fail at cycle number: "+ str(Pfil+81))
# -
RULerror=Pfil-Afil
print("The error of RUL= "+ str(RULerror)+ " Cycle(s)")
| LSTM_Paper/LTSM_Paper_P2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## `Non-Repetitive Parts Calculator` in Action!
# **Authors** <NAME> and <NAME>
#
# **Updated** August 21, 2020
# Click on the title **`Non-Repetitive` ... Action!** above and press `Shift`+`Enter` together on each cell to follow along and see all the code in action. If you're previewing this on GitHub, please [download](https://github.com/ayaanhossain/nrpcalc#NRP-Calculator-in-Action) the `nrpcalc` repository to execute this notebook locally. TOC links may not work in GitHub preview.
# This `jupyter` notebook will demonstrate the usage of the `Non-Repetitive Parts Calculator` and describe how to use its [API](https://github.com/ayaanhossain/nrpcalc/blob/master/docs/DOCS.md) to design commonly used genetic parts. Our purpose here is to demonstrate the different features of the `Non-Repetitive Parts Calculator` and to illustrate the considerations involved when designing thousands of non-repetitive genetic parts. Of course, the `Non-Repetitive Parts Calculator` may be used to design many different types of genetic parts beyond the examples presented here. The design possibilities are quite open-ended with this algorithm.
# ### Table of Contents
# * [Notebook Setup](#Notebook-Setup)
#
# * [Constraint Based Design of Genetic Parts](#Constraint-Based-Design-of-Genetic-Parts)
#
# * [Non-Repetitive σ<sup>70</sup> Promoters with CRISPRi with `Lmax=12`](#Non-Repetitive-σ70-Promoters-with-CRISPRi-with-Lmax=12)
# * [Designing the First Toolbox](#Designing-the-First-Toolbox)
# * [Designing the Second Toolbox - First Attempt](#Designing-the-Second-Toolbox---First-Attempt)
# * [Designing the Second Toolbox - Second Attempt](#Designing-the-Second-Toolbox---Second-Attempt)
#
# * [Non-Repetititve Ribosome Binding Sites with `Lmax=14`](#Non-Repetititve-Ribosome-Binding-Sites-with-Lmax=14)
#
# * [Non-Repetitive Toehold Switches with `Lmax=14`](#Non-Repetitive-Toehold-Switches-with-Lmax=14)
#
# * [Non-Repetitive Intrinsic Terminators with `Lmax=14`](#Non-Repetitive-Intrinsic-Terminators-with-Lmax=14)
#
# * [And Now, Our Watch is Ended](#And-Now,-Our-Watch-is-Ended)
#
# * [References](#References)
# ### Notebook Setup
# If you [installed](https://github.com/ayaanhossain/nrpcalc#Installation) `nrpcalc` successfully, you have everything you need to follow along.
# Let's first import `nrpcalc`.
import nrpcalc
# Hopefully, the import worked without throwing up any errors! If you face issues importing, please [open an issue](https://github.com/ayaanhossain/nrpcalc/issues). If everything worked fine, you're ready to follow along. If you do not understand a specific part of this notebook, either open an issue, or please reach the authors via Twitter or Email. We would be happy to answer your questions, and update this notebook in response to your questions, comments or concerns.
print(nrpcalc.__doc__) # show docs
import time # time-keeping is important!
# ### Constraint Based Design of Genetic Parts
# Genetic parts exhibit their activity through biophysical interactions that rely on **DNA** or **RNA** sequence motifs, the presence or absence of specific RNA structures, and/or higher-order sequence or structural characteristics. For example, a constitutive σ<sup>70</sup> _E. coli_ promoter sequence will have a high transcription initiation rate when it contains a conserved $-35$ and $-10$ hexamer, separated by a $17$ base pair spacer. Likewise, a bacterial transcriptional terminator will have a high efficiency when it contains a fast-folding, stable RNA hairpin, followed by a U-rich tract.
#
# Such essential characteristics can be flexibly distilled into a set of criteria that every generated genetic part sequence must satisfy. The `Non-Repetitive Parts Calculator` `Maker Mode` accepts three types of genetic part constraints: a degenerate DNA or RNA sequence using the **IUPAC code**; an essential RNA secondary structure using _dot-parenthesis-x_ notation; and a model-based constraint that can be customized to quantify the presence of higher-order interactions or to facilitate the synthesis and construction of the genetic system by excluding sequences (e.g. restriction sites and polymeric sequences). All three constraints may be simultaneously used to specify a set of genetic part sequences with desired functionalities.
#
# As examples, **Supplementary Table 2** of our [publication](https://static-content.springer.com/esm/art%3A10.1038%2Fs41587-020-0584-2/MediaObjects/41587_2020_584_MOESM1_ESM.pdf) (see page 30) lists the design constraints and algorithm outcomes for a wide variety of genetic parts commonly used in synthetic biology, including minimal Pol II promoters, insulated ribosome binding sites, prokaryotic transcriptional terminators, and toehold RNA switches.
#
# In one sense, these genetic part constraints are explicit hypotheses that distill one’s knowledge of gene regulation and biophysics into the simplest possible computable form. In another sense, they are a type of classifier that separates the genetic part sequence space into only two categories: sequences expected to have some amount of genetic part activity versus sequences expected to have minimal to none activity.
#
# > **Note** The design constraints are not a quantitative prediction of functional activity; experimental characterization is still needed to validate designed parts. In general, it is advantageous to incorporate as much degeneracy into the constraints as possible to design larger toolboxes.
# ### Non-Repetitive σ<sup>70</sup> Promoters with CRISPRi with `Lmax=12`
# We will first design $1000$ brand new promoters for constitutive transcription in prokaryotes, divided into two toolboxes. We want the first toolbox to have $500$ strong promoters, while for the second toolbox we want to design $500$ promoters with variable strength. Additionally, we want these promoters to be CRISPRi repressible for engineering system logic (see [Reis et al. (2019)](https://www.nature.com/articles/s41587-019-0286-9)). Importantly, we will use the findings from [Larson et al. (2013)](https://www.nature.com/articles/nprot.2013.132) to design our CRISPRi.
# #### Designing the First Toolbox
# The sequence constraint for the first toolbox is defined for strong constitutive transcription (consensus $-35$ and $-10$ hexamers, and an optimal spacing of $17$ bases separating the two). Additionally, a **PAM** is embedded in the $17$-bp spacer to repress transcription initiation via CRISPRi. To enhance initiation, we will also embed a G+C-rich motif into the insulating $20$-bp upstream region of $-35$ hexamer.
tb1_seq_constraint = ('S'*5 + # G+C-rich motif in Upstream (5 Bases)
'N'*15 + # Remaining 15 Bases in Upstream
'TTGACA' + # Consensus -35 Hexamer
'N'*6 + # First 6 Bases of 17-bp Spacer
'CCN' + # PAM (Next 3 Bases of 17-bp Spacer)
'N'*8 + # Remaining 8 Bases of 17-bp Spacer
'TATAAT' + # Consensus -10 Hexamer
'N'*6) # Discriminator
# Let's review our sequence constraint.
print(tb1_seq_constraint)
print(' '*35 + '-'*20)
print(' '*35 + '{:^20}'.format('sgRNA Target Site'))
# For promoters, we don't really have any DNA or RNA secondary structure constraint, so it can be all dots, that is, we don't care about the secondary structure anywhere along the sequence (this will change for downstream part design, as we'll see).
tb1_struct_constraint = '.'*len(tb1_seq_constraint)
tb1_struct_constraint
# Once the design constraints are finalized, it is time to think about the experimental objectives as well. One possible objective might involve eliminating restriction sites from our promoters, so that we may clone them in successfully.
#
# To do that, we can define some _model functions_ to help us generate promoters that are compatible for our cloning purposes. Importantly, while we can define functions to explicitly prevent our used cutsites (say, BamHI or XbaI, specifically), it would be better to prevent the occurence of any palindromic hexamer in our parts, which is usually a property of many restriction sites. That way, our promoters can be cloned and used in a variety of scenerios, without us having to restrict ourselves to using only the ones in which the specific restriction site motifs are absent, for a given cloning workflow.
#
# > **Note** The `Non-Repetitive Parts Calculator` can optimize two types of functions for us - a **local model function** which is applied on a genetic part concurrently with addition of each nucleotide, and a **global model function** which is applied on a genetic part when it is fully constructed. The local model function must accept a single argument called `seq` (the partial sequence under construction) and returns either `(True, None)` if an evaluation was satisfied, or `(False, index)` where `index` is a traceback location where nucleotide choices need to be altered to fulfill an objective. The global model function also must accept a single input `seq` (a fully constructed sequence), and return either `True` if an objective was met, or `False` otherwise.
#
# We will now develop a new objective function to prevent palindromic hexamers in our designed promoters to be evaluated concurrently as each new base is added to a promoter towards completion (a local model function). Our function will start evaluation when the **sixth** base (base at `index=5` or equivalently, when `len(seq)=6`) is added to a partial promoter under construction (necessary condition for evaluation), and guide the design process to steer clear of palindromic hexamers. We will also define any helper functions we need, and test our logic as we move forward.
comptab = str.maketrans('ATGC', 'TACG') # our string translation table for complementng strings
# helper function
def revcomp(seq):
# reverse string, then return its complement
return seq[::-1].translate(comptab)
assert revcomp('AAATTTGGGCCC') == 'GGGCCCAAATTT' # a quick test for revcomp function shows that it works
assert revcomp('GGGAAATTTCCC') == 'GGGAAATTTCCC' # another quick check on a palindrome confirms definition
# actual local model function
def prevent_cutsite(seq):
# is our partial sequence long enough for evaluation?
if len(seq) >= 6:
# extract current (last-known) 6-mer for evaluation
hexamer = seq[-6:]
if hexamer == revcomp(hexamer): # a palindrome!
state = False # objective failed
index = len(seq)-6 # we need to go back 6 bases to alter current hexamer
return (state, index)
else:
return (True, None) # sequence is free of palindromic hexamers
# otherwise, pass for shorter sequences
else:
return (True, None)
# The optimization done by this function is straight-forward. We check if a partial sequence under construction ends with a palindromic hexamer. If it does, then we ask `Maker` to go back $6$ bases from our current index which is at index `len(seq)-6`, and start making alternate nucleotide choices starting **at** that location. If this function returns `True` for all locations starting at the sixth base, then naturally the complete part would be devoid of palindromic hexamers. Note also that we also return `True` for partial sequences shorter than $6$ bases.
#
# `Maker` takes care of calling this function above with the addition of each base, when it is passed as the designated local model function. So, all we ever need to do inside this function is just evaluate the "current" case, i.e., evaluate the hexamer ending at the current index to ensure our optimization.
# > **Note** The traceback index should point to the location _starting at which_ nucleotide choices need to altered. For example, in the illustrated `prevent_cutsite` function above, we need to go back $6$ bases from the current location if it is palindromic, and trace a new path through constraint space. This means, our traceback index should be `len(seq)-6=0` (the very beginning of our sequence) if our current sequence is of length $6$, and forms a palindromic hexamer.
assert prevent_cutsite('GGATCC') == (False, 0) # BamHI will be prevented
assert prevent_cutsite('TCTAGA') == (False, 0) # XbaI will be prevented
assert prevent_cutsite('GGGGGG') == (True, None) # non-palindromic hexamer will pass our filter
# At this point, we are ready to launch the `Non-Repetitive Parts Calculator` `Maker Mode` to design some promoters for us!
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox1 = nrpcalc.maker(
seed=1, # reproducible results
seq_constr=tb1_seq_constraint, # as defined above
struct_constr=tb1_struct_constraint, # as defined above
Lmax=12, # as stated in our goal
target_size=500, # as stated in our goal
part_type='DNA', # as stated in our goal
local_model_fn=prevent_cutsite) # as defined above
# Compute execution time
tf = time.time() - t0
# -
print('Run took {:.2f}s'.format(tf)) # Show run time
# The console output shows that all our constraints passed initial checks, and were valid. `Maker` was then apparently able to design $500$ promoters for us based on all given constraints without failure in less than a second. Let's look at some of the parts produced, and compare it with our sequence constraint.
toolbox1[0] # show the first part designed
tb1_seq_constraint # show the sequence constraint
toolbox1[499] # show the last part designed
# The construction looks good, but it's always a good idea to verify explicitly if our design objectives were met. We will define a verification function to check if our completely constructed promoters are indeed devoid of palindromic hexamers.
def final_cutsite_check(seq):
# grab all hexamers from the sequence
hexamers = [seq[i:i+6] for i in range(len(seq)-6+1)]
# if any of the hexamers are panlindromic, our
# design objective was clearly not met!
for hexamer in hexamers:
if hexamer == revcomp(hexamer):
return False # we will reject this part
# None of the hexamers were palindromic!
return True # we will accept this part
# We will loop through the toolbox, and ensure
# all our parts pass the new global check
for promoter in toolbox1.values():
assert final_cutsite_check(promoter) == True
# Every promoter has passed our verification so our design objective for the first toolbox was met. As we will see in the next section, the evaluation function `final_cutsite_check` could have been specified to `Maker` directly via `global_model_fn` parameter, which would automatically execute the evaluation on a part, after it was completely designed, and accept/reject it accordingly.
#
# The benefit of passing this check as a global model function to `Maker` is that the algorithm can adjust the number of trials it needs depending on an auto-estimated probability of evaluation failure.
# #### Designing the Second Toolbox - First Attempt
# For our second toolbox of promoters, we want to design $500$ variable strength promoters. Our sequence constraint will change accordingly.
tb2_seq_constraint = 'N'*20 + 'TTGNNN' + 'N'*6 + 'CCN' + 'N'*6 + 'WW' + 'WWWWWW' + 'N'*6
# ----- ------ ----- --- ---------- ------ ----
# UPS -35 SPACER PAM SPACER -10 DIS
# Notice, we introduced degeneracy in the $-35$ and opted for just weak bases (A/T) in place of the $-10$ hexamer. Additionally, the $-10$ is also preceded by weak bases to potentially design promoters with various spacer region lengths ranging from $15$ to $17$ bp. We still retain the PAM in spacer for CRISPRi.
tb2_seq_constraint # review the constraint
# Because things are more degenerate in our present sequence constraint, we might be interested in preventing cryptic hexamers within our promoters.
#
# This is easily done with another local model function, that identifies if a hexamer elsewhere within our promoter under construction, has fewer mismatches when compared to the consensus motifs than the ones placed (by `Maker`) at the intended `-35` and `-10` locations.
# helper function 1
def hamming(x, y): # score mismatches between two strings
return sum(x[i] != y[i] for i in range(min(len(x), len(y))))
assert hamming(x='000000', y='111111') == 6 # test case 1
assert hamming(x='000111', y='111111') == 3 # test case 2
# helper function 2
def cryptic_hexamer(cx, hx, dt): # returns True if hx is a cryptic hexamer
'''
cx = consensus motif for either -35 or -10
hx = current hexamer under evaluation
dt = number of mismatches between cx and current
motif placed at -35 or -10
'''
# if current hexamer (hx) is closer to consensus
# motif (cm) than the actual selected motif used
# at the intended location (dt), then we have a
# cryptic promoter under construction (True)
if hamming(cx, hx) < dt:
return True
return False
assert cryptic_hexamer(cx='TTGACA', hx='AAAAAA', dt=3) == False # test case 1
assert cryptic_hexamer(cx='TTGACA', hx='TTGAGA', dt=3) == True # test case 2
# actual local model function
def prevent_cryptic_promoter(seq, c35start, c10start, eval_index=None):
'''
seq - partial sequence under construction to be evaluated
c35start - starting index of -35 hexamer (python indexing)
c10start - starting index of -10 hexamer (python indexing)
eval_index - the location ending at which a hexamer is to
be evaluated (default=None implies use the
last hexamer in current seq, i.e. ending at
len(seq))
'''
c35 = 'TTGACA' # defined -35 consensus
c10 = 'TATAAT' # defined -10 consensus
# sequence long enough to evaluate
if len(seq) >= 6:
# current index?
end = len(seq)
# which hexamer to evaluate?
if eval_index is None: # no eval_index provided
eval_index = end # use the hexamer ending at current index (end)
# otherwise use appropriate eval_index provided
# extract current / appropriate hexamer
hx = seq[eval_index-6:eval_index]
# Case: -35 hexamer
# current hexamer is at -35 or -10 location?
# then skip evaluation for extracted hexamer
if end == c35start+6:
return (True, None) # skip -35 location
if end == c10start+6:
return (True, None) # skip -10 location
# extract current -35 hexamer
# if there is one
s35 = None
if end > c35start+6: # a -35 motif has been placed
s35 = seq[c35start:c35start+6]
# set -35 hexamer cutoff
if s35 is None: # no -35 hexamer present yet
d35 = 3 # default distance to prevent
else:
d35 = hamming(s35, c35) # actual distance to prevent
# evaluate hx for -35 hexamer
if cryptic_hexamer(cx=c35, hx=hx, dt=d35):
return (False, end-6) # our current hexamer is a cryptic -35;
# go back 6 bases
# Case: -10 hexamer
# extract current -10 hexamer
# if there is one
s10 = None
if end > c10start+6:
s10 = seq[c10start:c10start+6]
# set -10 hexamer cutoff
if s10 is None: # no -10 hexamer present yet
d10 = 3 # default distance to prevent
else:
d10 = hamming(s10, c10) # actual distance to prevent
# evaluate hx for -35 hexamer
if cryptic_hexamer(cx=c10, hx=hx, dt=d10):
return (False, end-6) # our current hexamer is a cryptic -10;
# go back 6 bases
# both -35 and -10 checks passed
return (True, None) # part is OK
# not long enough to evaluate
return (True, None) # part is OK .. so far
# test case 1 - a partial sequence with last 6 bases very similar to the -35 consensus
assert prevent_cryptic_promoter(seq='GGGGGGGGTTGACT', c35start=20, c10start=20+6+17) == (False, 8)
# test case 2 - a partial sequence with last 6 bases very similar to the -10 consensus
assert prevent_cryptic_promoter(seq='GGGGGGGTATAGT', c35start=20, c10start=20+6+17) == (False, 7)
# test case 3 - a partial sequence with last 6 bases dissimilar to the both motifs
assert prevent_cryptic_promoter(seq='GGGGGGGGAAGATC', c35start=20, c10start=20+6+17) == (True, None)
# The above local model function `prevent_cryptic_promoter` utilizes many smaller functions in order to make its evaluation, which is fine. The only thing we need to take care of in order to use this function with `Maker` is the setting of `c35start` and `c10start` parameters, which are required for the function to work (note `eval_index` has a default value of `None` so we need not worry about it right now), given that `Maker Mode` only works with local model functions that just takes in a single input - the partial sequence under construction (`seq`).
#
# Some obvious solutions would be to hardcode all parameters apart from `seq` inside the local model function, or give them default values like we did for `eval_index`, but we don't really need to do either. Instead, we have two more options.
# The first option is to define a lambda function to wrap the above function like so.
prevent_cryptic = lambda seq: prevent_cryptic_promoter(seq=seq, c35start=20, c10start=43)
# We could then set `local_model_fn=prevent_cryptic`, and our optimization would come through. This wrapping leaves the underlying function `prevent_cryptic_promoter` free for more general use later. For example, it could be used to power an additional global model function that checks the completely constructed part for cryptic hexamers by re-evaluating the hexamers at each index starting at $5$ via the `eval_index` parameter (more on this a little later).
# The second option is to define a wrapper function explicitly.
def prevent_cryptic(seq):
return prevent_cryptic_promoter(
seq=seq,
c35start=20,
c10start=43)
# Notice, that now we actually have two local model functions so far: (1) the `prevent_cryptic` function and (2) the previously used `prevent_cutsite` function used for the first toolbox. In such multi-objective design scenerios, we would have to write a meta local model function, which would run these individual local model functions along with any specific parameters. Here's an example:
# a meta local model function
def variable_promoter_local(seq):
# check the first objective
outcome,index = prevent_cutsite(seq)
# return traceback index if objective fails
if outcome == False:
return False,index
# check the second objective
outcome,index = prevent_cryptic_promoter(
seq=seq,
c35start=20,
c10start=43)
# return traceback index if objective fails
if outcome == False:
return False,index
# every objective met .. good to go!
return (True, None)
# > **Note** It is important to be careful about string indexing when the function logic becomes moderately complex. For example, in `Python` strings are $0$-indexed, which means that the $-35$ hexamer starts after the first $20$ bases in the upstream region at index $20$ (i.e. the $21$st base belongs to $-35$). It is also important to note when a model function should be evaluated. For example, if the evaluation logic requires at least an $8$-bp sequence, parts shorter than that length should be evaluated `True` to let the sequence grow long enough for evaluation.
#
# > **Note** When there are multiple local objectives at play that evaluate properties of the partial sequence at an upstream location, it is often advantageous to return the traceback index that occurs earlier in the sequence position. For example, one thing we could do in a meta local model function is evaluate two objectives, and return `(False, min(index1, index2))` if both objective functions failed with different traceback locations, or just `(False, index1)` if only the first one failed and so on. We could also weigh the various objectives differently, and choose to return the most important traceback index first, rather than the second most important traceback index etc.
#
# > **Note** It is possible to embed external models as evaluators into the `Non-Repetitive Parts Calculator`. For example, rather than only preventing cryptic motifs as shown above, we could have also used a `scikit-learn` `Lasso` model, as described in [our publication](https://www.nature.com/articles/s41587-020-0584-2), to design promoters within specific dynamic ranges. We would load the model (unpickle) into memory, and for every promoter completely constructed by `Maker`, we would evaluate the predicted initation rate and only accept parts that satisfied our criteria (a global model function). Alternatively, we could further identify, using the model, which of the components (hexamers, spacer GC etc.) prevented a part from being accepted, and returned a traceback index accordingly (i.e. converted the global into a local model function) to explore nucleotide choices concurrently with part design.
#
# > **Note** It is always a good idea to test the individual functions called by a meta local or global model function, using simple cases. Notice, how we have `assert`-ed a few test cases for the helper functions above.
#
# > **Note** If `Maker` takes an impossible amount of time to create a single part in the prescence of a model function, it is worth investigating if the model function in context gets stuck in an infinite loop or an edge case. A quick check would be to run `Maker` with all constraints except the model function. If `Maker` is able to design parts quickly in the absence of the model function, then the slow-down is naturally due to the the model function itself, which should be investigated and optimized.
# Now that our meta local model function is ready, we can define a meta global model function that calls `final_cutsite_check` as well as `prevent_cryptic_promoter` like so.
# a meta global model function
def variable_promoter_global(seq):
# check for cutsites post construction
if not final_cutsite_check(seq):
return False # cutsites found!
# note: the following block could be
# its own function, and called by this
# meta global function
# check for cyptic hexamers
# starting at the 6th base
for eval_index in range(6, len(seq)):
# use the generalized evaluation function
state, index = prevent_cryptic_promoter(
seq=seq,
c35start=20,
c10start=43,
eval_index=eval_index)
# there is a cryptic hexamer ending
# at the current location
if state is False:
return False
# all checks passed!
return True
# With all our evaluators completed, we're ready to design our second toolbox of promoters. Let's call upon `Maker` to do our bidding.
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox2_attempt1 = nrpcalc.maker(
seed=2, # reproducible results
seq_constr=tb2_seq_constraint, # as defined above
struct_constr=tb1_struct_constraint, # same as toolbox1
Lmax=12, # as stated in our goal
target_size=500, # as stated in our goal
part_type='DNA', # as stated in our goal
local_model_fn=variable_promoter_local, # as defined above
global_model_fn=variable_promoter_global) # as defined above
# Compute execution time
tf = time.time() - t0
# -
print('Run took {:.2f}s'.format(tf)) # Show run time
# Notice, that the running time increased from less than one second for the first toolbox, to about four seconds for this present toolbox. This is because the running time of `Maker` greatly depends on the complexity of the underlying model functions. For the second toolbox, we used both local and global model functions, each of which considered two sub-objectives inside them. `Maker` was able to satisfy all of these objectives and finished in under four seconds.
#
# Let's review our newly minted toolbox!
toolbox2_attempt1[0] # first promoter in second toolbox
toolbox2_attempt1[499] # last promoter in second toolbox
# #### Designing the Second Toolbox - Second Attempt
# The reason we called the previous sub-section a **"First Attempt"** is because, the second toolbox we designed above is non-repetitive to itself, but not against `toolbox1` promoters designed apriori. To verify non-repetitiveness in construction, we can use `Finder Mode`.
# combine both toolboxes
promoters = list(toolbox1.values())
promoters.extend(toolbox2_attempt1.values())
# compute the number of non-repetitive promoters
non_repetitive_promoters = len(nrpcalc.finder(
seq_list=promoters,
Lmax=12))
assert non_repetitive_promoters < 1000 # we're short of our goal ... some promoters were repetitive
# As we can see, the final non-repetitive toolbox when both toolboxes are combined together has less than $1000$ parts in it, which is short of our intended goal. This is where concept of **"background"** comes into play (check [DOCS](https://github.com/ayaanhossain/nrpcalc/blob/master/docs/DOCS.md) for `background` API details).
#
# > **Note** `Finder` is an unstable algorithm by design. What this means is that, in a scenario consisting of a list of repetitive parts, the returned non-repetitive subset is approximately the largest possible non-repetitive toolbox, but this may change slightly across multiple runs for the same set of inputs. There is no way known, generally, to be certain of what the absolutely largest toolbox size actually is (unless all parts were either repetitive or non-repetitive as in the case during verification of parts returned by `Maker`), so we retained a level of stochasticity in `Finder`. This encourages us to run `Finder` (which is pretty fast in practice) several times on a candidate toolbox of parts, and then select the largest non-repetitive toolbox returned across all runs.
# To create the second toolbox while ensuring it is non-repetitive to the first one, we will populate a temporary `background` object.
bkg = nrpcalc.background(
path='./tmp_bkg', # we store the background on disk in the 'tmp_bkg' directory on current path
Lmax=12) # same Lmax as toolbox1
bkg # checking background path and content, we see it has zero elements
# +
# # we could add the promoters one-by-one
# for promoter in toolbox1.values():
# bkg.add(promoter)
# or add it all in one-shot
bkg.multiadd(toolbox1.values())
# -
bkg # now, background is populated with toolbox1 k-mers
# With our `background` populated, we are now ready to design our actual second toolbox such that it is indeed non-repetitive to the first toolbox, therefore allowing both toolboxes to be used simultaneously. This is what we refer to as **toolbox chaining**. For example, you can chain a `Maker` job against a genome inserted in `background`. You can also use `background` with `Finder` jobs to incrementally enlarge a central collection of parts from multiple sources.
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox2 = nrpcalc.maker(
seed=3, # reproducible results
seq_constr=tb2_seq_constraint, # as defined above
struct_constr=tb1_struct_constraint, # same as toolbox1
Lmax=12, # as stated in our goal
target_size=500, # as stated in our goal
part_type='DNA', # as stated in our goal
local_model_fn=variable_promoter_local, # as defined above
global_model_fn=variable_promoter_global, # as defined above
background=bkg) # as defined above
# Compute execution time
tf = time.time() - t0
# -
print('Run took {:.2f}s'.format(tf)) # Show run time
# Notice, that the running time now increased from less than four seconds in our previous attempt to slightly more than four seconds in our current attempt. This is because we introduced `background` as an additional constraint for the design job above.
#
# Let's update our `background` and use `Finder` again to verify our construction of the second toolbox.
bkg.multiadd(toolbox2.values()) # updated with second toolbox for building next set of parts
# recreate the list of promoters for evaluation
promoters = list(toolbox1.values()) + list(toolbox2.values())
# assess non-repetitiveness
non_repetitive_promoters = len(nrpcalc.finder(
seq_list=promoters,
Lmax=12))
assert non_repetitive_promoters == 1000 # no promoters missing!
# ### Non-Repetititve Ribosome Binding Sites with `Lmax=14`
# To complement our designed promoter toolboxes, we will next design a toolbox of prokaryotic ribosome binding sites (RBSs). We will primarily be using findings from [Salis et al. (2009)](https://www.nature.com/articles/nbt.1568) for designing our RBSs.
# We aim to design $1000$ _de novo_ RBS sequences that are non-repetitive to our promoter toolboxes designed in the previous sections. Our RBS sequence constraint is therefore highly degenerate, containing a $26$-bp upstream region, a $4$-bp standby site, and a $9$-bp consensus Shine-Dalgarno (SD) motif ('UAAGGAGGA') separated from the start codon ('AUG') by a near-optimal $6$-bp spacer. Importantly, the structural constraint specifies that there will be a small hairpin on the $5'$-end of designed sequences to insulate the RBS against the formation of undesired structures that might inhibit ribosome binding to the Shine-Dalgarno motif.
#
# Let's define and review our constraints.
tb3_seq_constraint = 'N'*26 + 'N'*4 + 'UAAGGAGGA' + 'N'*6 + 'AUG'
# ----- ---- --------- ---- ---
# Upstream Region SBS SD Motif SPACER START
tb3_struct_constraint = '.(((((....)))))...............xxxxxxxxxxxxxxxxxx'
print(tb3_seq_constraint)
print(tb3_struct_constraint)
# The dots (`.`) in the structure constraint implies that the bases in the sequence constraint at the corresponding locations are free to either base-pair or not when a candidate part is generated. Bases marked with parenthesis (`(` and `)`) indicate that the folded structure must contain those designated base-pairings, for example the second base must pair with the fifteenth base and so on. Bases marked with `x` are forbidden from being part of any base pairing in the secondary `RNA` structure. This _dot-parenthesis-x_ notation is inspired from the secondary structure notation used by nucleic acid structure prediction programs such as `ViennaRNA`.
# Before we design the RBS toolbox, we must note that the constraint for RBS toolbox here includes an `Lmax` of $14$, whereas, the promoters were designed with an `Lmax` of $12$ bases. This is because, there is a big $9$-bp constant Shine-Dalgarno motif in the sequence constraint which doesn't leave too many $13$-mers (recall `Lmax=12`) for constructing thousands of non-repetitive RBSs. As proof, let's try constructing the RBS toolbox with `Lmax=12`, without using any `background`and only using the fast `mfe` (minimum free energy) structure evaluation (a relaxed design scenerio). We will additionally specify the `fail_count` parameter in `Maker` to terminate on $2000$ consecutive failures instead of the default value of $1000$ (deeper exploration of design space).
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox3_attempt1 = nrpcalc.maker(
seed=4, # reproducible results
seq_constr=tb3_seq_constraint, # as defined above
struct_constr=tb3_struct_constraint, # as defined above
Lmax=12, # as stated in our goal
target_size=1000, # as stated in our goal
part_type='RNA', # as stated in our goal
struct_type='mfe', # as defined above
fail_count=2000, # as stated in our goal
local_model_fn=None, # as stated in our goal
global_model_fn=None, # as stated in our goal
background=None) # as defined above
# Record execution time
tf = time.time() - t0
# -
print('Toolbox Size reached = {}, but Target Size was 1000'.format(
len(toolbox3_attempt1)))
print('Run took {:.2f}s'.format(tf)) # Show run time
# As we can see in the output, `Maker` first warned us that it might not be able to make $1000$ parts as specified in the `target_size`, but it ventured forth, taking approximately nine minutes to explore the design space and constructing $200+$ RBSs, before giving up.
assert ('''
Only 4 bases free in the last 13 bp window
containing the complete SD motif
-------------
|||||||||****
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNUAAGGAGGANNNNNNAUG
****|||||||||
-------------
True for the first window as well
''')
# The $9$-bp constant motif in sequence constraint leaves only $4$ degenerate bases in every $13$-bp window containing the complete SD sequence, implying at most $4^4 = 256$ possible parts for the given sequence constraint. Such _k_-mer windows which limit the overall design space and prevents the reaching of `target_size` are called **`Lmax` limiting windows**. `Maker` was able to design $219$ of the maximum possible toolbox size before failing to find suitable _k_-mers for making newer RBSs. If we wanted, we could try increasing the `jump_count` and/or `fail_count` (see [API](https://github.com/ayaanhossain/nrpcalc/blob/master/docs/DOCS.md)) to try to reach all $256$ of the possible RBSs, although the severe structure constraint might prevent selection of some _k_-mers to realize all of these sequences.
# Our goal, however, is to build $1000$ RBSs which is clearly not possible given an `Lmax` of $12$ for the specified sequence constraint. We could try introducing more degeneracy into the SD motif which might relax our constraints enough to fix the issue. But, if we don't want to alter the motif, we would have to increase our `Lmax` to expand our design space. An `Lmax` of $14$ seems reasonable, giving `Maker` $4^6 = 4096$ possible _k_-mer selection choices for all $15$-bp windows encompassing the SD motif.
# Now, that we've decided to use an `Lmax=14` for our toolbox, how do we unify our present RBS toolbox with the previously designed promoter toolboxes with `Lmax=12`, in terms of non-repetitiveness? It is a feature of the `Non-Repetitive Parts Calculator` that one can use a `background` initialized with an `Lmax` which is different from the `Lmax` specified for the design job at hand. So, for designing our RBSs, it would be legal and recommended to use `bkg` as the `background` (having `Lmax=12`), while our new RBS toolbox would be built with an `Lmax` of $14$. This would ensure that there is no $13$-mer in the designed RBSs that also exists in `bkg` (promoter _k_-mers), while also ensuring that no two RBSs in the toolbox shared a $15$-mer between each other.
#
# Alternative approaches include (1) initializing a new `background` with `Lmax=14` and inserting all previous promoters into it, followed by using the new `background` for designing RBSs, or (2) defining a new local model function that prevents every $13$-mer in the new RBSs under construction from coinciding with the previous `background` (`bkg` - containing the $13$-mers from the promoters).
#
# The first alternative solution is pretty straight-forward, and the one we'll use for designing our toolboxes in the subsequent sections (since we'd be permanently moving onto a collective `Lmax` of $14$ for the past, present and future toolboxes, and `bkg` with `Lmax=12` wouldn't be appropriate there), but for our current RBS toolbox design we'll look at the second alternative option to see an example of a model function that works with `background` objects.
# Let's look at a possible local model function for achieving the second alternative option.
def prevent_promoter_conflict(seq):
# evaluation criteria met?
if (len(seq)) >= 13:
# extract k-mer
kmer = seq[-13:]
# check for conincidence
if kmer in bkg: # k-mer conflict found
return (False, len(seq)-13) # retrace path
# no conflict
return (True, None)
# too short a sequence
else:
return (True, None)
# toolbox1 promoter fails our evaluation as expected
assert prevent_promoter_conflict(seq=toolbox1[0]) == (False, len(toolbox1[0])-13)
# toolbox2 promoter also fails our evaluation as expected
assert prevent_promoter_conflict(seq=toolbox2[499]) == (False, len(toolbox2[0])-13)
# a poly-G 13-mer was absent in the promoters, so it is OK to be used for the RBSs
assert prevent_promoter_conflict(seq='G'*13) == (True, None)
# Our local model function is done, and our `Lmax` is revised to $14$. However, unlike the previous RBS toolbox design attempt, instead of relying on just `mfe` for structure evaluation, we'll use `mfe` + `centroid` = `both` as our `struct_type` parameter to ensure both `mfe` and `centroid` conform to the given structure constraint. This ensures that designed parts fold into a given structure with very high probability (at the cost of increased computation time).
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox3 = nrpcalc.maker(
seed=5, # reproducible results
seq_constr=tb3_seq_constraint, # as defined above
struct_constr=tb3_struct_constraint, # as defined above
Lmax=14, # as revised from our previous attempt
target_size=1000, # as stated in our goal
part_type='RNA', # as stated in our goal
struct_type='both', # as revised from our previous attempt
local_model_fn=prevent_promoter_conflict, # as defined above
global_model_fn=None, # none required
background=None) # background conflict resolved via local model
# Compute execution time
tf = time.time() - t0
# -
print('Run took {:.2f}s'.format(tf)) # Show run time
# Compared to the previous attempt, the present attempt finishes in less than five minutes and we designed exactly $1000$ non-repetitive RBSs per our goal.
# Let's review the parts, and verify non-repetitiveness of the new toolbox against `bkg` by specifying it as a background to `Finder`. If `Finder` returns all $1000$ parts as non-repetitive, then our model function worked as intended despite us not using `bkg` as the `background` directly in our call to `Maker`.
toolbox3[0] # first RBS designed
toolbox3[999] # last RBS designed
assert len(nrpcalc.finder(
seq_list=toolbox3.values(),
Lmax=14,
verbose=False, # just assert
background=bkg)) == 1000 # our goal of 1000 brand new non-repetitive RBSs is achieved!
# ### Non-Repetitive Toehold Switches with `Lmax=14`
# We will now design $1000$ non-repetitive toehold RNA switches for programmable protein expression. Our constraints for designing these toehold switches are based on the work of [Green et al. (2014)](https://www.sciencedirect.com/science/article/pii/S0092867414012896).
# Before we embark on the design, let's delete the previous `background` (`bkg`) and initialize a new `background` with `Lmax=14`, into which we'll insert all of the previous toolboxes, so that we can use it for designing our toehold switches non-repetitive to all previously designed parts.
bkg.drop() # deletes the background from disk
chained_bkg = nrpcalc.background(
path='./chained_bkg', # a new background path
Lmax=14) # updated Lmax
chained_bkg # check new background path and content
chained_bkg.multiadd(toolbox1.values()) # add the first promoter toolbox
chained_bkg.multiadd(toolbox2.values()) # add the second promoter toolbox
chained_bkg.multiadd(toolbox3.values()) # add the third toolbox containing RBSs
chained_bkg # review background post insertion
# Our constraint for toehold switches primarily includes a hairpin loop, and contains a $30$-bp **trigger RNA** sequence usptream of an embedded $7$-bp consensus Shine-Delgarno motif ('AGGAGGA'), separated from the start codon ('AUG') by the remaining $6$-bp stem of domain _B_, and ends with a $21$-bp linker sequence. Notably, everything upstream of the linker portion of the design has a very specific structure requirement.
#
# Let's define the sequence and structure constraints and review them.
tb4_seq_constraint = 'N'*12 + 'N'*9 + 'N'*3 + 'N'*6 + 'AGGAGGA' + 'N'*6 + 'AUG' + 'N'*9 + 'N'*21
# ----- -------------------- ------- -------------------- -----
# Domain A Domain B with Bulge SD Motif Domain B with START LINKER
tb4_struct_constraint = 'xxxxxxxxxxxx(((((((((xxx((((((xxxxxxx))))))xxx))))))))).....................'
print('{:^30}'.format('Trigger RNA Sequence'))
print('-'*30)
print(tb4_seq_constraint)
print(tb4_struct_constraint)
# Although, at first glance, the constraints look fine and degenerate enough, there is a potential pitfall waiting for us when we feed these constraints to `Maker`.
#
# Notice, that the $7$-bp SD motif ('AGGAGGA') is flanked by domain _B_ bases on either side, which creates a $15$-bp _k_-mer window with 'AGGAGGA' in the middle and four paired bases on either side. This is illustrated below.
assert ('''
15bp
---------------
7bp
4bp ||||||| 4bp
|||| ||||
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAGGAGGANNNNNNAUGNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
xxxxxxxxxxxx(((((((((xxx((((((xxxxxxx))))))xxx))))))))).....................
|||| ||||
4bp ||||||| 4bp
7bp
---------------
15bp
''')
# In this $15$-mer window, the last four bases are always going to be complementary to the first four bases. So, as soon as the first four bases ('N's) are filled in by `Maker`, the fate of the last four bases are automatically determined (they will be complementary to the first four bases). The $7$-bp SD motif is a constant in our sequence constraint which leaves only the first four bases to be selected variably by `Maker`, resulting in the last four bases becoming a dynamically inserted constant for each imaginable run. Thus, instead of working with a degenerate $15$-bp window with $7$ bases fixed, we're actually working with a $15$-bp window with $7+4=11$ bases fixed. This leaves us with $4^4 = 256$ possible nucleotide combinations to fill up this window, implying, a theoretical maximum toolbox size of only $256$ toehold switches. Our goal for $1000$ non-repetitive toehold switches, even with `Lmax=14`, will not be fulfilled given how we have framed the sequence and structure constraint.
# Rather than abandoning hope, we may go back to the original paper for insights. It is clear that an RBS must be located between the two halves of domain _B_, but should this RBS just consist of a consensus $7$-bp SD motif only? The motif can potentially be "padded" on either side, and still leave us with effective RBSs. Accordingly, we will modify our sequence constraint to pad the SD motif with three 'N's on the $5'$-end, while ensuring that those bases remain unpaired via our modified structure constraint. This expands our design space by sixty four fold, making our goal of $1000$ non-repetitive toehold switches a real possibility. Let's re-define the constraints, and review them one last time.
tb4_seq_constraint = 'N'*12 + 'N'*9 + 'N'*3 + 'N'*6 + 'NNNAGGAGGA' + 'N'*6 + 'AUG' + 'N'*9 + 'N'*21
# ----- -------------------- ---------- -------------------- -----
# Domain A Domain B with Bulge Padded SD Domain B with START LINKER
tb4_struct_constraint = 'xxxxxxxxxxxx(((((((((xxx((((((xxxxxxxxxx))))))xxx))))))))).....................'
print('{:^30}'.format('Trigger RNA Sequence'))
print('-'*30)
print(tb4_seq_constraint)
print(tb4_struct_constraint)
# Because we're dealing with toehold switches, it is imperative to prevent any start codon in the stem separating the SD motif and the designated start codon as well any start or stop codons after the designated start codon. Time for a quick local model function.
#
# > **Note** `Maker` builds and returns either **DNA** or **RNA** strings depending on the input `part_type` specification. The `part_type` is used to ensure correct base pairing, and select the correct energy parameters for evaluating the structure constraint for the intended scenerio. For example, toehold RNA switches are designed using correct parameters so that when they finally fold in their RNA state, they have the correct conformation. This also means that all local and global model functions used for optimization should evaluate **DNA** or **RNA** strings for evaluation depending on the `part_type` for sake of correctness.
start_codon = 'AUG' # as opposed to 'ATG'
stop_codons = set(['UAG', 'UAA', 'UGA']) # all stop codons are defined
def prevent_codon(seq):
# we don't evaluate if were're at or before SD motif, or at
# the designated start codon location and the the two bases
# right after the start codon (which do not form an in-frame codon)
# case 1: at or before SD motif
if len(seq) <= 40: # pass evaluation
return (True, None)
# case 2: at the designated start codon or the two
# bases right next to it
if 47 <= len(seq) <= 49+2: # pass evaluation
return (True, None)
# actual evaluation time!
# case 1: we have entered in the stem between SD and start codon
if 41 <= len(seq) <= 46:
# extract codon candidate
cdn = seq[-3:]
# is this a start codon?
if cdn == start_codon:
return (False, len(seq)-3) # go back three places
# not a start codon
return (True, None)
# case 2: we have entered the linker region beyond the start codon
if len(seq) >= 52: # first in-frame codon after the start codon
# extract codon candidate
cdn = seq[-3:]
# is the codon candidate a stop codon?
if cdn in stop_codons:
return (False, len(seq)-3) # go back three places
# is the codon candidate a start codon?
if cdn == start_codon:
return (False, len(seq)-3)
# candidate is not a stop codon
return (True, None) # pass
assert prevent_codon(seq='A'*25) == (True, None) # short sequences pass
assert prevent_codon(seq='A'*40 + 'AUG') == (False, 40) # start codon in spacer after SD prevented
assert prevent_codon(seq='A'*46 + 'AUG') == (True, None) # start codon at designated location not evaluated
assert prevent_codon(seq='A'*50 + 'UAA') == (False, 50) # stop codons after start codon prevented
assert prevent_codon(seq='A'*50 + 'GCA') == (True, None) # other codons are fine
# Let's now build our brand new toolbox of non-repetitive toehold switches and review them!
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox4 = nrpcalc.maker(
seed=6, # reproducible results
seq_constr=tb4_seq_constraint, # as defined above
struct_constr=tb4_struct_constraint, # as defined above
Lmax=14, # as defined above
target_size=1000, # as stated in our goal
part_type='RNA', # as stated in our goal
struct_type='both', # as stated in our goal
local_model_fn=prevent_codon, # as defined above
global_model_fn=None, # no requirement of a global check
background=chained_bkg) # as defined above
# Compute execution time
tf = time.time() - t0
# -
print('Run took {:.2f}s'.format(tf)) # Show run time
toolbox4[0] # first switch in the toolbox
toolbox4[999] # last switch in the toolbox
assert len(nrpcalc.finder(
seq_list=toolbox4.values(),
Lmax=14,
verbose=False, # just assert
background=chained_bkg)) == 1000 # job done!
# ### Non-Repetitive Intrinsic Terminators with `Lmax=14`
# For our final demonstration, we will design $1000$ non-repetitive rho-independent bacterial terminators based on the works of [Chen et al. (2013)](https://www.nature.com/articles/nmeth.2515), and [Nielsen et al. (2016)](https://science.sciencemag.org/content/352/6281/aac7341)
# Our design includes a highly degenerate sequence constraint with embedded poly-A and poly-U motifs, and a $15$-bp stem in the structure constraint. Based on the paper, the $8$-bp U-rich tract is $8$ bases downstream of the stem, and pairs with the complementary A-rich tract immediately upstream of the stem. We will not be using any model functions in this example, but we will ensure that the terminators are non-repetitive to all of the toolboxes designed above.
# A Tract|Strong Bases Strong Bases U Tract
# -------|----- ------ --------
tb5_seq_constraint = 'NNNNNNNNAAAAAAAASNSNSNNNNNNNNNNNNNNNNNNNNNNNNNNNSNSNSNNNNNNNNUUUUUUUUNNNNNNNN'
tb5_struct_constraint = '........(((((((((((((((((((((((xxxxxxx)))))))))))))))xxxxxxxx))))))))........'
# ---------------
# 15-bp Stem
# Note, that the terminator structure constraint mandates a $15$-bp stem which implies that all designed terminators must have an internal repeat of $15$ bases, yet our desired `Lmax` is $14$. In such scenerios, we can set `internal_repeats=True`, and ask `Maker` to preserve parts with internal repeats, while still eliminating shared repeats between all pairs of parts.
#
# Let's update our `background` object with the previously designed toehold switches, and then design the terminators.
chained_bkg.multiadd(toolbox4.values()) # update background with toehold switches
# +
# Record starting time
t0 = time.time()
# Execute Maker
toolbox5 = nrpcalc.maker(
seed=7, # reproducible results
seq_constr=tb5_seq_constraint, # as defined above
struct_constr=tb5_struct_constraint, # as defined above
Lmax=14, # as defined above
internal_repeats=True, # as stated in our goal
target_size=1000, # as stated in our goal
part_type='RNA', # as stated in our goal
struct_type='both', # as stated in our goal
local_model_fn=None, # no requirement of a local check
global_model_fn=None, # no requirement of a global check
background=chained_bkg) # as defined above
# Compute execution time
tf = time.time() - t0
# -
print('Run took {:.2f}s'.format(tf)) # Show run time
# Let's review our designed terminators, and ensure that all toolboxes are non-repetitive to each other, as a final check.
toolbox5[0] # first terminator designed
toolbox5[999] # last terminator designed
all_toolboxes = [] # our final toolbox list
# insert all toolboxes designed so far into all_toolboxes
for toolbox in [toolbox1, toolbox2, toolbox3, toolbox4, toolbox5]:
all_toolboxes.extend(toolbox.values())
assert len(nrpcalc.finder(
seq_list=all_toolboxes,
internal_repeats=True, # allow internal repeats due to terminators
verbose=False, # just assert
Lmax=14)) == 4000 # all toolboxes we designed may be used simultaneously
# without introducing any repeat longer than 14-bp
# Notice that we didn't specify `chained_bkg` as the `background` in the `Finder` job above, because it already contains $15$-mers from the previous four toolboxes, and as such would flag all parts from these toolboxes inside `all_toolboxes` as being repetitive with respect to the `background`. We will now dispense off with `chained_bkg` since it has served its purpose, and conclude this section.
chained_bkg.drop() # goodbye!
# ### And Now, Our Watch is Ended
# We hope this notebook is useful to you in learning how to use the `Non-Repetitive Parts Calculator` effectively. We hope to convince you that the `Non-Repetitive Parts Calculator` can be a useful tool in your arsenal in your quest for genetic systems engineering. We had a lot of fun developing this notebook, and we hope you'll share it with your students and colleagues who might benefit from the `Non-Repetitive Parts Calculator`. Despite our thrust on clarity, if any part of this notebook remains ambiguous or unclear to you, please reach the authors, who'd be more than delighted to explain or update this notebook accordingly.
#
# We'd like to stress that the genetic parts discussed above are not the only ones that can be designed using the `Non-Repetitive Parts Calculator`. This algorithm and notebook is left to our synthetic biology colleagues everywhere to help them engineer ever-larger and stable genetic systems.
# ### References
# * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2019). Simultaneous repression of multiple bacterial genes using nonrepetitive extra-long sgRNA arrays. Nature Biotechnology, 37(11), 1294-1301.
#
#
# * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2013). CRISPR interference (CRISPRi) for sequence-specific control of gene expression. Nature Protocols, 8(11), 2180-2196.
#
#
# * <NAME>., <NAME>., and <NAME>. (2009). Automated design of synthetic ribosome binding sites to control protein expression. Nature Biotechnology, 27(10), 946-950.
#
#
# * <NAME>., <NAME>., <NAME>., and <NAME>. (2014). Toehold switches: de-novo-designed regulators of gene expression. Cell, 159(4), 925-939.
#
#
# * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2013). Characterization of 582 natural and synthetic terminators and quantification of their design constraints. Nature Methods, 10(7), 659-664.
#
#
# * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2016). Genetic circuit design automation. Science, 352(6281).
#
#
# * <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2020). Automated design of thousands of nonrepetitive parts for engineering stable genetic systems. Nature Biotechnology, 1-10.
| examples/NRPCalcInAction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# set tem como objetivo manter apenas valores únicos em determinada lista.
#
# produtos = {'arroz', 'feijao', 'macarrao', 'atum', 'azeite'}
#
# Semelhante a um dicionário um "set" é criado usando chaves, mas vale lembrar que não podem ser inciados com zero itens isso fará dele um dicionário.
cpf_clientes = ['762.196.080-97', '263.027.380-67', '827.363.930-40', '925.413.640-91', '870.565.160-33', '892.080.930-50', '462.126.030-81', '393.462.330-10', '393.462.330-10', '393.462.330-10', '988.305.810-11', '596.125.830-05', '596.125.830-05', '990.236.770-48']
funcionarios = ['Maria', 'José', 'Antônio', 'João', 'Francisco', 'Ana', 'Luiz', 'Paulo', 'Carlos', 'Manoel', 'Pedro', 'Francisca', 'Marcos', 'Raimundo', 'Sebastião', 'Antônia', 'Marcelo', 'Jorge', 'Márcia', 'Geraldo']
print(len(cpf_clientes))
setado = set(cpf_clientes) #Os CPF'S São únicos
print(len(setado))
| Set.Range.For.Tuplas.Etc/Set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pylab as plt
import torch
import swyft
# %load_ext autoreload
# %autoreload 2
import simulators
import heads
DEVICE = 'cuda:0'
par0 = dict(ox=5., oy=5., a=1.5, p1=0.4, p2=1.1)
state_dict = torch.load("out.pt")
s = swyft.NestedRatios.from_state_dict(state_dict, None, device = DEVICE)
samples = s.marginals(s.obs, 100)
params = list(s.prior.params())
swyft.plot.plot1d(samples, params, figsize = (20, 4), ncol = 5, grid_interpolate = True, truth = par0)
swyft.utils.sample_diagnostics(samples)
print(samples.keys())
| dev/Development - External Sim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="ee-notebook-buttons" align="left">
# <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/Arrays/LinearRegression.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
# <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Arrays/LinearRegression.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Arrays/LinearRegression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
# </table>
# ## Install Earth Engine API and geemap
# Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
# The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
#
# **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
# +
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# -
# ## Create an interactive map
# The default basemap is `Google MapS`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# ## Add Earth Engine Python script
# +
# Add Earth Engine dataset
# Simple regression of year versus NDVI.
# Define the start date and position to get images covering Montezuma Castle,
# Arizona, from 2000-2010.
start = '2000-01-01'
end = '2010-01-01'
lng = -111.83533
lat = 34.57499
region = ee.Geometry.Point(lng, lat)
# Filter to Landsat 7 images in the given time and place, filter to a regular
# time of year to avoid seasonal affects, and for each image create the bands
# we will regress on:
# 1. A 1, so the resulting array has a column of ones to capture the offset.
# 2. Fractional year past 2000-01-01.
# 3. NDVI.
images = ee.ImageCollection('LANDSAT/LE07/C01/T1') \
.filterDate(start, end) \
.filter(ee.Filter.dayOfYear(160, 240)) \
.filterBounds(region)
def func_mrj(image):
date = ee.Date(image.get('system:time_start'))
yearOffset = date.difference(ee.Date(start), 'year')
ndvi = image.normalizedDifference(['B4', 'B3'])
return ee.Image(1).addBands(yearOffset).addBands(ndvi).toDouble() \
.map(func_mrj)
# Convert to an array. Give the axes names for more readable code.
array = images.toArray()
imageAxis = 0
bandAxis = 1
# Slice off the year and ndvi, and solve for the coefficients.
x = array.arraySlice(bandAxis, 0, 2)
y = array.arraySlice(bandAxis, 2)
fit = x.matrixSolve(y)
# Get the coefficient for the year, effectively the slope of the long-term
# NDVI trend.
slope = fit.arrayGet([1, 0])
Map.setCenter(lng, lat, 12)
Map.addLayer(slope, {'min': -0.03, 'max': 0.03}, 'Slope')
# -
# ## Display Earth Engine data layers
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| JavaScripts/Arrays/LinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# C)<NAME>, Babol Noshirvani University of Technology,
# My Official Website: www.Amin-Naji.com
# My Email: <EMAIL>
# PLEASE CITE THE BELOW PAPER IF YOU USE THIS CODE
# <NAME>, <NAME>, and <NAME>, “Ensemble of CNN for Multi-Focus Image Fusion”, Information Fusion, vol. 51, pp. 21–214, 2019.
# DOI: https://doi.org/10.1016/j.inffus.2019.02.003
# -
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import torch
import numpy as numpy
import cv2
import PIL
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import matplotlib.cm as cm
import torch.nn as nn
import torchvision.transforms as transforms
import imageio
from torch.autograd import Variable
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1_1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv1_2 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv1_3 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv2_1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv2_2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv2_3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv3_1 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv3_2 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv3_3 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv4 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.conv5 = nn.Sequential(
nn.Conv2d(128*2, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.1, inplace=True),
nn.MaxPool2d(2)
)
self.fc1 = nn.Linear(256*8*4*2, 2)
def forward(self, x, y, z):
outx = self.conv1_1(x)
outx = self.conv2_1(outx)
outx = self.conv3_1(outx)
outx = self.conv4(outx)
outx = outx.view(outx.size(0), -1)
outy = self.conv1_2(y)
outy = self.conv2_2(outy)
outy = self.conv3_2(outy)
outz = self.conv1_3(z)
outz = self.conv2_3(outz)
outz = self.conv3_3(outz)
oyz=torch.cat([outy,outz],1)
oyz = self.conv5(oyz)
oyz = oyz.view(oyz.size(0), -1)
oo=torch.cat([outx,oyz],1)
out = self.fc1(oo)
return out
model=CNN()
model
# +
model_path='./ECNN_trained_network_wights.pth'
use_gpu=torch.cuda.is_available()
if use_gpu:
print('GPU Mode Acitavted')
model = model.cuda()
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
model.load_state_dict(torch.load(model_path))
else:
print('CPU Mode Acitavted')
state_dict = torch.load(model_path,map_location='cpu')
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
# -
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
# +
# original_path1= './lytro-03-A.jpg'
# original_path2= './lytro-03-B.jpg'
original_path1= './flower1.tif'
original_path2= './flower2.tif'
# +
tfms1 = transforms.Compose([
transforms.Resize((64, 32)),
transforms.ToTensor(),
transforms.Normalize([0.45 ], [0.1])
])
tfms2 = transforms.Compose([
transforms.Resize((64, 32)),
transforms.ToTensor(),
transforms.Normalize([ 0.050], [ 0.09])
])
tfms3 = transforms.Compose([
transforms.Resize((64, 32)),
transforms.ToTensor(),
transforms.Normalize([0.06], [ 0.09])
])
img1_org = Image.open(original_path1)
img2_org = Image.open(original_path2)
img1_org = np.asarray(img1_org)
img2_org = np.asarray(img2_org)
height=img1_org.shape[0]
width=img2_org.shape[1]
windows_size=32
# stride can be set as 2 or 4 or 8 based on the size of input images
if width>= 500 and height>=500:
factor=1
stride=4
else:
factor=2
stride=8
dim1=(width, height)
dim2 = (int(width*factor), int(height*factor))
img1 = cv2.resize(img1_org, dim2, interpolation = cv2.INTER_AREA)
img2 = cv2.resize(img2_org, dim2, interpolation = cv2.INTER_AREA)
kernel=np.array([[-1 , -2 , -1], [0 , 0 , 0], [1 , 2, 1]])
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1_GY = cv2.filter2D(img1_gray,-1,kernel)
img1_GX = cv2.filter2D(img1_gray,-1,np.transpose(kernel))
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img2_GY = cv2.filter2D(img2_gray,-1,kernel)
img2_GX = cv2.filter2D(img2_gray,-1,np.transpose(kernel))
test_image1_1=img1_gray
test_image1_2=img1_GX
test_image1_3=img1_GY
test_image2_1=img2_gray
test_image2_2=img2_GX
test_image2_3=img2_GY
source1=img1
source2=img2
j=0
MAP=np.zeros([img1.shape[0], img1.shape[1]])
score1=0
score2=0
FUSED=np.zeros(test_image1_1.shape)
windowsize_r = windows_size-1
windowsize_c = windows_size-1
map1=np.zeros([img1.shape[0], img1.shape[1]])
map2=np.zeros([img2.shape[0], img2.shape[1]])
for r in tqdm(range(0,img1.shape[0] - windowsize_r, stride)):
for c in range(0,img1.shape[1] - windowsize_c, stride):
block_test1_1 = test_image1_1[r:r+windowsize_r+1,c:c+windowsize_c+1]
block_test1_2 = test_image1_2[r:r+windowsize_r+1,c:c+windowsize_c+1]
block_test1_3 = test_image1_3[r:r+windowsize_r+1,c:c+windowsize_c+1]
block_test2_1 = test_image2_1[r:r+windowsize_r+1,c:c+windowsize_c+1]
block_test2_2 = test_image2_2[r:r+windowsize_r+1,c:c+windowsize_c+1]
block_test2_3 = test_image2_3[r:r+windowsize_r+1,c:c+windowsize_c+1]
block1_1= np.concatenate((block_test1_1, block_test2_1), axis=0)
block2_1= np.concatenate((block_test2_1, block_test1_1), axis=0)
block1_1 = Image.fromarray(block1_1, 'L')
block2_1 = Image.fromarray(block2_1, 'L')
block1_2= np.concatenate((block_test1_2, block_test2_2), axis=0)
block2_2= np.concatenate((block_test2_2, block_test1_2), axis=0)
block1_2 = Image.fromarray(block1_2, 'L')
block2_2 = Image.fromarray(block2_2, 'L')
block1_3= np.concatenate((block_test1_3, block_test2_3), axis=0)
block2_3= np.concatenate((block_test2_3, block_test1_3), axis=0)
block1_3 = Image.fromarray(block1_3, 'L')
block2_3 = Image.fromarray(block2_3, 'L')
imout1_1=tfms1(block1_1)
imout2_1=tfms1(block2_1)
imout1_2=tfms2(block1_2)
imout2_2=tfms2(block2_2)
imout1_3=tfms3(block1_3)
imout2_3=tfms3(block2_3)
if use_gpu:
imout1_1=to_var(imout1_1)
imout2_1=to_var(imout2_1)
imout1_2=to_var(imout1_2)
imout2_2=to_var(imout2_2)
imout1_3=to_var(imout1_3)
imout2_3=to_var(imout2_3)
imout1_1=(imout1_1)
imout2_1=(imout2_1)
imout1_2=(imout1_2)
imout2_2=(imout2_2)
imout1_3=(imout1_3)
imout2_3=(imout2_3)
inputs1_1 = imout1_1.unsqueeze(0)
inputs2_1 = imout2_1.unsqueeze(0)
inputs1_2 = imout1_2.unsqueeze(0)
inputs2_2 = imout2_2.unsqueeze(0)
inputs1_3 = imout1_3.unsqueeze(0)
inputs2_3 = imout2_3.unsqueeze(0)
model.eval()
outputs1 = model(inputs1_1,inputs1_2,inputs1_3)
_, predicted1 = torch.max(outputs1.data, 1)
score1=predicted1.detach().cpu().numpy()
model.eval()
outputs2 = model(inputs2_1,inputs2_2,inputs2_3)
_, predicted2 = torch.max(outputs2.data, 1)
score2=predicted2.detach().cpu().numpy()
map2[r:r+windowsize_r+1,c:c+windowsize_c+1] += 1
if score1 <= score2:
map1[r:r+windowsize_r+1,c:c+windowsize_c+1] += +1
else:
map1[r:r+windowsize_r+1,c:c+windowsize_c+1] += -1
map1 = cv2.resize(map1, dim1, interpolation = cv2.INTER_AREA)
test_image1 = img1_org
test_image2 = img2_org
map3=np.zeros([img1_org.shape[0], img2_org.shape[1]])
FUSED=np.zeros(img1_org.shape)
for r in range(0,img1_org.shape[0], 1):
for c in range(0,img1_org.shape[1], 1):
if map1[r,c] < 0:
map3[r,c] =0
FUSED[r,c]=img2_org[r,c]
else:
map3[r,c] =1
FUSED[r,c]=img1_org[r,c]
FUSED_8=FUSED.astype(np.uint8)
plt.imshow(map3, cm.gray)
plt.show()
plt.imshow(FUSED_8)
plt.show()
imageio.imwrite('./output.tif', FUSED_8)
imageio.imwrite('./output.jpg', FUSED_8)
# +
# x=test_image1.shape[0]//20
# z=test_image1.shape[1]//20
# kernel = np.ones((x,z),np.float32)/(x*z)
# MAP2 = cv2.filter2D(map1,-1,kernel)
# MAP2[MAP2<0.5] = 0
# MAP2[MAP2>=0.5] = 1
# FUSED_CV=np.zeros(img1.shape)
# FUSED=np.zeros(img1.shape)
# for r in range(0,img1.shape[0], 1):
# for c in range(0,img1.shape[1], 1):
# if MAP2[r,c] < 0.5:
# FUSED_CV[r,c]=img1[r,c]
# else:
# FUSED_CV[r,c]=img2[r,c]
# FUSED_CV=FUSED_CV.astype(np.uint8)
# # imageio.imwrite('./output_CV.tif', FUSED_CV)
# # imageio.imwrite('./output_CV.jpg', FUSED_CV)
# plt.imshow(MAP2, cm.gray)
# -
| ECNN_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py38
# language: python
# name: py38
# ---
import numpy as np
# $$
# s = |(E_1, p_{1z}) + (m_2, 0)|^2 \\
# = (E_1 + m_2)^2 - p_{1z}^2 \\
# = E_1^2 + 2 E_1 m_2 + m_2^2 - p_{1z}^2 \\
# = m_1^2 + m_2^2 + 2 E_1 m_2 \\
# \approx 2 E_1 m_2
# $$
#
# $$
# \sqrt{s} = \sqrt{2 E_1 m_2}
# $$
# lead
r = 82/208.
# proton-fixed target R = 6.5 TV
(2 * 6.5e3 * 0.93)**0.5
# proton-fixed target R = 7 TV
(2 * 7e3 * 0.93)**0.5
# lead-fixed target R = 6.5 TV
(2 * 6.5e3 * 0.93 * r)**0.5
# lead-fixed target R = 6.5 TV
(2 * 7e3 * 0.93 * r)**0.5
| Energy conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import time
import numpy as np
import torch
print("PyTorch version:",torch.__version__)
if torch.cuda.is_available():
for i in range(torch.cuda.device_count()):
print(f"CUDA GPU {i+1}: {torch.cuda.get_device_name(i)} [Compute Capability: {torch.cuda.get_device_capability(0)[0]}.{torch.cuda.get_device_capability(0)[1]}]")
device = torch.device('cuda')
kwargs = {'num_workers': 8, 'pin_memory': True}
torch.backends.cudnn.benchmark = True
else:
device = torch.device('cpu')
print("CUDA GPU is not available. :(")
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
print ("PyTorch Lightning version:",pl.__version__)
import scipy.sparse as sp
from argparse import Namespace
from utilities.custom_lightning import CSVProfiler
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.debug("Logging enabled at DEBUG level.")
from constants import (SEED, DATA_DIR, LOG_DIR, TRAIN_DATA_PATH, VAL_DATA_PATH, TEST_DATA_PATH)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
import optuna
from optuna.integration import PyTorchLightningPruningCallback
print("Optuna version:",optuna.__version__)
# -
NAME = r'AdamOptunaUXML'
SAVE_PATH = DATA_DIR+r'/'+NAME+r'.pt'
PROFILE_PATH = LOG_DIR+'\\'+NAME+'\\profile.csv'
# +
class Interactions(Dataset):
"""
Create interactions matrix.
"""
def __init__(self, matrix):
self.matrix = matrix
self.n_users = self.matrix.shape[0]
self.n_items = self.matrix.shape[1]
def __getitem__(self, index):
row = self.matrix.row[index]
col = self.matrix.col[index]
val = self.matrix.data[index]
return (row, col), val
def __len__(self):
return self.matrix.nnz
interaction = Interactions
# -
class TestingCallbacks(pl.Callback):
def on_test_start(self, trainer, pl_module):
global y_hat
y_hat = sp.dok_matrix((hparams.total_users, hparams.total_items), dtype=np.float32)
def on_test_end(self, trainer, pl_module):
logging.debug(f"Non-zero values in prediction matrix: {y_hat.nnz:,}")
sp.save_npz(DATA_DIR+NAME+r'-y_hat.npz',y_hat.tocoo())
class MetricsCallback(pl.Callback):
def __init__(self):
super().__init__()
self.metrics = []
def on_validation_end(self, trainer, pl_module):
self.metrics.append(trainer.callback_metrics)
class AdamUXML(pl.LightningModule):
def __init__(self, trial, hparams):
super(AdamUXML, self).__init__()
self.hparams = hparams
n_factors = trial.suggest_int("n_factors", 5, 40)
dropout_p = trial.suggest_uniform("dropout_p", 0.0, 0.8)
self.user_factors = nn.Embedding(hparams.total_users, n_factors, sparse=hparams.sparse)
self.item_factors = nn.Embedding(hparams.total_items, n_factors, sparse=hparams.sparse)
self.user_biases = nn.Embedding(hparams.total_users, 1, sparse=hparams.sparse)
self.item_biases = nn.Embedding(hparams.total_items, 1, sparse=hparams.sparse)
self.dropout = nn.Dropout(p=dropout_p)
def forward(self, users, items):
user_factors_users = self.user_factors(users)
item_factors_items = self.item_factors(items)
predictions = self.user_biases(users)
predictions += self.item_biases(items)
predictions += (self.dropout(user_factors_users) * self.dropout(item_factors_items)).sum(dim=1, keepdim=True)
return predictions.squeeze()
def MSELoss(self, logits, labels):
return nn.functional.mse_loss(logits, labels)
def training_step(self, train_batch, batch_idx):
x, y = train_batch
row, column = x
row = row.long()
column = column.long()
logits = self.forward(row,column)
loss = self.MSELoss(logits, y)
logs = {'train_loss': loss}
return {'loss': loss}
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
row, column = x
row = row.long()
column = column.long()
logits = self.forward(row,column)
loss = self.MSELoss(logits, y)
return {'val_loss': loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
return {"log": {"avg_val_loss": avg_loss}}
def test_step(self, test_batch, batch_idx):
x, y = test_batch
row, column = x
row = row.long()
column = column.long()
logits = self.forward(row,column)
loss = self.MSELoss(logits, y)
logits_array = logits.cpu().numpy()
r = row.cpu().numpy()
c = column.cpu().numpy()
for i in range(len(logits_array)):
y_hat[r[i],c[i]]=logits_array[i]
return {'test_loss': loss}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'MSE': avg_loss}
print(f"Test Mean Squared Error (MSE): {avg_loss}")
return {"log": {"avg_test_loss": avg_loss}}
def prepare_data(self):
self.train_dataset = sp.load_npz(TRAIN_DATA_PATH)
self.val_dataset = sp.load_npz(VAL_DATA_PATH)
self.test_dataset = sp.load_npz(TEST_DATA_PATH)
def train_dataloader(self):
return DataLoader(interaction(self.train_dataset), batch_size=self.hparams.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(interaction(self.val_dataset), batch_size=self.hparams.batch_size, shuffle=False)
def test_dataloader(self):
return DataLoader(interaction(self.test_dataset), batch_size=self.hparams.batch_size, shuffle=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(),
lr=self.hparams.learning_rate,
betas=self.hparams.betas,
amsgrad=self.hparams.amsgrad)
return optimizer
uxm = sp.load_npz(TRAIN_DATA_PATH)
total_users = uxm.shape[0]
total_items = uxm.shape[1]
del uxm
def objective(trial):
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join(LOG_DIR+"\Optuna","trial_{}".format(trial.number), "{epoch}"), monitor="avg_val_loss"
)
metrics_callback = MetricsCallback()
trainer = pl.Trainer(
logger=False,
val_percent_check=hparams.val_percent_check,
checkpoint_callback=checkpoint_callback,
max_epochs=hparams.max_epochs,
gpus=1,
callbacks=[metrics_callback],
early_stop_callback=PyTorchLightningPruningCallback(trial, monitor="avg_val_loss"),
)
model = AdamUXML(trial, hparams)
trainer.fit(model)
return metrics_callback.metrics[-1]["avg_val_loss"]
hparams = Namespace(**{
'batch_size': 1024,
'val_percent_check': 1.0, # set this to <1.0 to use data subset for validation
'learning_rate': 0.001,
'betas': (0.9, 0.999),
# 'dropout_p': 0.2, ### delegated to Optuna
# 'n_factors': 20, ### delegated to Optuna
'sparse': False,
'max_epochs': 10,
'amsgrad': False,
'action':"store_true", # do we need this?
'pruning': True,
'total_users': total_users,
'total_items': total_items
})
# +
start_time = time.time()
pruner = optuna.pruners.MedianPruner() if hparams.pruning else optuna.pruners.NopPruner()
study = optuna.create_study(direction='minimize', pruner=pruner)
study.optimize(objective, n_trials=100, timeout=None)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
print(f"Elapsed time: {time.time()-start_time:.2f} seconds")
# -
optuna.visualization.plot_optimization_history(study)
optuna.visualization.plot_optimization_history(study).write_image("./export/optuna-optimization-history.svg")
optuna.visualization.plot_slice(study)
optuna.visualization.plot_slice(study).write_image("./export/optuna-plot-slice.svg")
optuna.visualization.plot_contour(study, params=['n_factors', 'dropout_p'])
optuna.visualization.plot_contour(study, params=['n_factors', 'dropout_p']).write_image("./export/optuna-plot-contour.svg")
optuna.visualization.plot_parallel_coordinate(study)
optuna.visualization.plot_parallel_coordinate(study).write_image("./export/optuna-plot-parallel-coordinate.svg")
| train-uxml-adam-optuna.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from gensim.models import word2vec
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from keras.preprocessing import sequence
from keras.models import Sequential,Model
from keras.layers import Dense, Embedding, SpatialDropout1D, Dropout, Activation, Input, LSTM, Bidirectional
from keras.optimizers import RMSprop, Adam
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
import re
#time
from datetime import datetime
from datetime import timedelta
import jieba
import jieba.analyse
jieba.set_dictionary('dict.idkrsi.txt') # 改預設字典
jieba.analyse.set_stop_words("stopword.goatwang.kang.txt") #指定stopwords字典
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# get data
# # ! conda install pandas-datareader s
#import pandas_datareader as pdr
# visual
# # ! pip install mpl-finance
#import matplotlib.pyplot as plt
#import mpl_finance as mpf
#import seaborn as sns
# https://github.com/mrjbq7/ta-lib
# # ! pip install ta-lib
#import talib
# +
df_bbs = pd.read_csv("bda2019_dataset/bbs2.csv",encoding="utf-8")
df_forum = pd.read_csv("bda2019_dataset/forum2.csv",encoding="utf-8")
df_news = pd.read_csv("bda2019_dataset/news2.csv",encoding="utf-8")
df_news['comment_count']=0
df_article = pd.concat([df_forum, df_bbs, df_news]) #三個合併
del df_bbs, df_forum, df_news
df_article['post_time'] = pd.to_datetime(df_article['post_time'])
df_article['post_time2'] = df_article['post_time'].dt.date # .dt.date用在dataframe .date()用在一個 #只留日期
#df_article['label'] = 'even'
df_article['content'] = df_article['content'].astype(str).str.replace(',' , ' ').str.replace('\n' , ' ').str.replace('"' , ' ').str.replace("'" , ' ')
df_article['title'] = df_article['title'].astype(str).str.replace(',' , ' ').str.replace('\n' , ' ').str.replace('"' , ' ').str.replace("'" , ' ')
df_article = df_article.sort_values(by=['post_time']).reset_index(drop=True) # 用post_time排序 # 在重設index
df_article.head(2)
#df_article2 = df_article[['post_time2','title','content']]
# +
df_TWSE2018 = pd.read_csv("bda2019_dataset/TWSE2018.csv",encoding="utf-8")
df_TWSE2017 = pd.read_csv("bda2019_dataset/TWSE2017.csv",encoding="utf-8")
df_TWSE2016 = pd.read_csv("bda2019_dataset/TWSE2016.csv",encoding="utf-8")
df_TWSE = pd.concat([df_TWSE2016, df_TWSE2017, df_TWSE2018]) #三年合併
del df_TWSE2016, df_TWSE2017, df_TWSE2018
# ['開盤價(元)', '最高價(元)', '最低價(元)', '收盤價(元)', '成交量(千股)', '成交值(千元)', '成交筆數(筆)', '流通在外股數(千股)', '本益比-TSE', '股價淨值比-TSE']
df_TWSE['證券代碼'] = df_TWSE['證券代碼'].astype(str)
df_TWSE['年月日'] = pd.to_datetime(df_TWSE['年月日'])
df_TWSE['開盤價(元)'] = df_TWSE['開盤價(元)'].str.replace(',' , '').astype('float64') # 1,000 to 1000 to float
df_TWSE['最高價(元)'] = df_TWSE['最高價(元)'].str.replace(',' , '').astype('float64')
df_TWSE['最低價(元)'] = df_TWSE['最低價(元)'].str.replace(',' , '').astype('float64')
df_TWSE['收盤價(元)'] = df_TWSE['收盤價(元)'].str.replace(',' , '').astype('float64')
df_TWSE['成交量(千股)'] = df_TWSE['成交量(千股)'].str.replace(',' , '').astype('float64')
df_TWSE['成交值(千元)'] = df_TWSE['成交值(千元)'].str.replace(',' , '').astype('float64')
df_TWSE['成交筆數(筆)'] = df_TWSE['成交筆數(筆)'].str.replace(',' , '').astype('int64')
df_TWSE['流通在外股數(千股)'] = df_TWSE['流通在外股數(千股)'].str.replace(',' , '').astype('float64')
df_TWSE['本益比-TSE'] = df_TWSE['本益比-TSE'].str.replace(',' , '').astype('float64')
df_TWSE['股價淨值比-TSE'] = df_TWSE['股價淨值比-TSE'].astype('float64')
df_TWSE.head(2)
# +
# 選那家股票
#company_name = '國巨'
company_name = '奇力新'
# 文章包含那家字
#company_words = '被動元件|積層陶瓷電容|MLCC|電感|晶片電阻|車用電子|凱美|同欣電|大毅|君耀|普斯|國巨'
company_words = '被動元件|積層陶瓷電容|MLCC|電感|晶片電阻|車用電子|飛磁|旺詮|美磊|美桀|向華科技|奇力新'
# 漲跌幾%
PA = 0.05
# even幾%
PAE = 0.003
# +
# 用日期排序 再把index重排
#2327
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('國巨')].sort_values(by=['年月日']).reset_index(drop=True)
#2456
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('奇力新')].sort_values(by=['年月日']).reset_index(drop=True)
#2478
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('大毅')].sort_values(by=['年月日']).reset_index(drop=True)
#6271
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('同欣電')].sort_values(by=['年月日']).reset_index(drop=True)
df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains(company_name)].sort_values(by=['年月日']).reset_index(drop=True)
del df_TWSE
df_trend.head(2)
# -
##增欄位:fluctuation幅度 tag漲跌平
df_trend['fluctuation'] = 0.0
df_trend['tag']='--'
df_trend['closeshift'] = 0.0
df_trend.head(2)
# +
# ##增欄位:fluctuation幅度 tag漲跌平
# df_trend['fluctuation'] = 0.0
# df_trend['tag']='--'
# ###計算漲跌
# for index, row in df_trend.iterrows():
# try:
# margin =(float(df_trend.loc[index,'收盤價(元)']) - float(df_trend.loc[index-1,'收盤價(元)']) )/ float(df_trend.loc[index-1,'收盤價(元)'])
# df_trend.loc[index,'fluctuation']=margin
# if margin >=0.03:
# df_trend.loc[index,'tag']='up'
# elif margin <= -0.03:
# df_trend.loc[index,'tag']='down'
# else:
# df_trend.loc[index,'tag']='even'
# except:
# continue
# -
df_trend['closeshift'] = df_trend['收盤價(元)'].shift(periods=1)#.fillna(value=0.0, inplace=True)
#df_trend['closeshift'].fillna(value= 0.0, inplace=True)
df_trend.head(2)
df_trend['fluctuation'] = (df_trend['收盤價(元)'] - df_trend['closeshift']) / df_trend['closeshift']
df_trend.head(2)
df_trend.loc[df_trend['fluctuation'] >= PA, 'tag'] = 'up'
df_trend.loc[df_trend['fluctuation'] <= -PA, 'tag'] = 'down'
df_trend.loc[(df_trend['fluctuation'] >= -PAE) & (df_trend['fluctuation'] <= PAE), 'tag'] = 'even'
df_trend.head(2)
len(df_trend[df_trend['tag']=='up'])
len(df_trend[df_trend['tag']=='down'])
len(df_trend[df_trend['tag']=='even'])
#df_company = df_article[ df_article['content'].str.contains('國巨')] # df 某欄位 string contains "國巨"
#df_company = df_article[ df_article['content'].str.contains('奇力新')]
#df_company = df_article[ df_article['content'].str.contains('大毅')]
#df_company = df_article[ df_article['content'].str.contains('同欣電 ')]
df_company = df_article[ df_article['content'].str.contains(company_words)]
print(len(df_company))
del df_article
df_company.head(2)
stopwords=list()
with open('stopword.goatwang.kang.txt', 'r',encoding='utf-8') as data:
for stopword in data:
stopwords.append(stopword.strip('\n'))
# +
# 'content'全部切詞
corpus = [] # array
for index, row in df_company.iterrows():
not_cut = df_company.loc[index,'content']
# not_cut = row['description'] # 跟上一行一樣意思
seg_generator = jieba.cut(not_cut, cut_all=False) # genarator
seglist = list(seg_generator) # 整篇文章string切出來的list
seglist = list(filter(lambda a: a not in stopwords and a != '\n', seglist )) #去除停用詞 #未必需要這步驟
corpus.append(' '.join(seglist)) # ' '.join(seg_generator)也可
df_company["content2"]=corpus
# -
df_company.head(2)
df_trend.loc[2,'年月日'].date() + timedelta(days=-1) == df_trend.loc[1,'年月日'].date()
df_trend.loc[5,'年月日'].date() + timedelta(days=-1) == df_trend.loc[4,'年月日'].date()
d = df_trend.loc[1,'年月日'].date() - df_trend.loc[ 1-1 ,'年月日'].date() #相減差幾天
d
d.days #只取天數
int(d.days) #幾天 轉整數
df_trend.loc[3,'年月日'].date()
df_company[ df_company['post_time2'] == df_trend.loc[3,'年月日'].date() ].head() # 某欄位 == n 的 全部撈出來
# +
# # 演算法
# for index, row in df_2327.iterrows():
# try:
# if df_2327.loc[index,'年月日'].date() + timedelta(days=-1) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# # 如果股票前一筆差1天 # 那前1天的文章標上當天的漲跌
# elif df_2327.loc[index,'年月日'].date() + timedelta(days=-2) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-2), 'label'] = df_2327.loc[index,'tag']
# # 如果股票前一筆差2天 #那前2天的文章標上當天的漲跌
# elif df_2327.loc[index,'年月日'].date() + timedelta(days=-3) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-2), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-3), 'label'] = df_2327.loc[index,'tag']
# elif df_2327.loc[index,'年月日'].date() + timedelta(days=-4) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-2), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-3), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-4), 'label'] = df_2327.loc[index,'tag']
# except:
# continue
# +
# 看所有相差的天數
# for index, row in df_2327.iterrows():
# try:
# n = df_2327.loc[index,'年月日'].date() - df_2327.loc[index-1,'年月日'].date()
# print(n)
# except:
# continue
# 最多12天
# -
# 如果股票前一筆差n天 # 那前n天的文章標上當天的漲跌
df_company['label5566']='--'
for index, row in df_trend.iterrows():
try:
n = int((df_trend.loc[index,'年月日'].date() - df_trend.loc[index-1,'年月日'].date()).days ) # 差幾個datetime # 轉天數 # 再轉整數
# print(n)
for i in range(1, n+1):
# print(i)
df_company.loc[df_company['post_time2'] == df_trend.loc[index,'年月日'].date() + timedelta(days=-i), 'label5566'] = df_trend.loc[index,'tag']
except:
continue
print(len(df_company[df_company['label5566']=='down']))
df_company[df_company['label5566']=='down'].head(2)
print(len(df_company[df_company['label5566']=='up']))
df_company[df_company['label5566']=='up'].head(2)
print(len(df_company[df_company['label5566']=='even']))
df_company[df_company['label5566']=='even'].head(2)
df_company2 = df_company[df_company['label5566'].str.contains('up|down|even')] #只取漲跌
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 50000
# Max number of words in each complaint.
MAX_SEQUENCE_LENGTH = 1000
# This is fixed.
EMBEDDING_DIM = 300
# ### 下面的几条语句,会自动帮助我们,把分词之后的评论信息,转换成为一系列的数字组成的序列。
# +
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(df_company2['content2'].to_numpy())
word_index = tokenizer.word_index # 每個詞的索引
print('Found %s unique tokens.' % len(word_index))
#print(word_index)
# -
X = tokenizer.texts_to_sequences(df_company2['content2'].to_numpy()) # 轉成向量
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH) # 向量長短不一 統一化補0
print('Shape of X:', X.shape)
# +
# import re
# features = [] # features=list()
# with open('finance.words.txt', 'r',encoding='utf-8') as data:
# for line in data:
# # line = re.sub('[a-zA-Z0-9\W]', '', line) # 把數字英文去掉
# line = re.sub('[0-9]', '', line) # 把數字去掉
# features.append(line.replace('\n', '').replace(' ', '')) # 空格 \n去掉
# print(len(features))
# print(type(features))
# features[:10]
# -
le = LabelEncoder() # label 如果是string轉成整數
df_company2['label5567'] = le.fit_transform(df_company2['label5566'])
Y = pd.get_dummies(df_company2['label5567']).to_numpy() # label轉乘2維矩陣 # keras不吃1維label
print('Shape of Y:', Y.shape)
from sklearn import model_selection
#將X:features array, Y:lable array 都切成 1:4
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size,
random_state=seed,
stratify=Y)
print(X_train.shape, Y_train.shape)
print(X_validation.shape, Y_validation.shape)
# ### 在Keras模型中使用预训练的词向量
# https://keras-cn-docs.readthedocs.io/zh_CN/latest/blog/word_embedding/
#
# ### Embedding layer设置
# ### 下載的詞向量文件中解析出每个词和它所对应的词向量,并用字典的方式存储
#
# #### 詞向量載點
# https://fasttext.cc/docs/en/crawl-vectors.html
#
# https://github.com/lintseju/word_embedding
# +
import os
import sys
embeddings_index = {}
f = open('cc.zh.300.vec', 'r', encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# -
# ### 此时,我们可以根据得到的字典生成上文所定义的词向量矩阵
# +
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print(embedding_matrix.shape)
print(embedding_matrix)
# -
# ### 另一個讀取詞向量的方法
# https://sspai.com/post/49121
#
# https://github.com/wshuyi/demo-chinese-text-classification-lstm-keras/blob/master/demo.ipynb
# +
# from gensim.models import KeyedVectors
# zh_model = KeyedVectors.load_word2vec_format('zh_wiki_fasttext_300.txt')
# zh_model.vectors[0]
# +
# list(iter(zh_model.vocab))[:5]
# +
# len(zh_model[next(iter(zh_model.vocab))])
# +
# embedding_dim = len(zh_model[next(iter(zh_model.vocab))])
# +
# embedding_matrix = np.random.rand(MAX_NB_WORDS, EMBEDDING_DIM)
# +
# embedding_matrix
# +
# embedding_matrix = (embedding_matrix - 0.5) * 2
# +
# embedding_matrix
# +
# zh_model.get_vector('的')
# -
# for word, i in word_index.items():
# if i < MAX_NB_WORDS:
# try:
# embedding_vector = zh_model.get_vector(word)
# embedding_matrix[i] = embedding_vector
# except:
# pass
# +
# embedding_matrix
# -
# ### 现在我们将这个词向量矩阵加载到Embedding层中,注意,我们设置trainable=False使得这个编码层不可再训练。
# +
## LSTM
#inputs = Input(name='inputs',shape=[MAX_SEQUENCE_LENGTH])
model = Sequential()
## Embedding(词汇表大小,batch大小,每个新闻的词长)
# model.add( Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH) )
model.add( Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False))
model.add( Bidirectional(LSTM(128, return_sequences=False )) )
model.add( Dense(128,activation="relu") )
model.add( Dense(128,activation="relu") )
model.add( Dropout(0.5))
model.add( Dense(3,activation="softmax") )
#model = Model(inputs=inputs,outputs=layer)
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer='adam', # optimizer=Adam(lr=0.0001, decay=1e-6),
metrics=["accuracy"])
# -
model_fit = model.fit(X_train, Y_train, batch_size=64,epochs=20,
validation_data=(X_validation,Y_validation),
callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.0001)] ## 当val-loss不再提升时停止训练
)
np.argmax(Y_train,axis=1) # 轉成1維
predictions = model.predict(X_validation)
print(accuracy_score(np.argmax(Y_validation,axis=1), np.argmax(predictions,axis=1)))
print(confusion_matrix(np.argmax(Y_validation,axis=1), np.argmax(predictions,axis=1)))
print(classification_report(np.argmax(Y_validation,axis=1), np.argmax(predictions,axis=1)))
# +
#del model
# -
| 3.per_article_pretain_vec_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### DEMSLV03
# # Compute fixedpoint of $f(x) = x^{0.5}$
# Compute fixedpoint of $f(x) = x^{0.5}$ using Newton, Broyden, and function iteration methods.
#
# Initial values generated randomly. Some alrorithms may fail to converge, depending on the initial value.
#
# True fixedpoint is $x=1$.
from demos.setup import np, tic, toc
from compecon import NLP
# ### Randomly generate starting point
xinit = np.random.rand(1) + 0.5
# ### Set up the problem
# +
def g(x):
return np.sqrt(x)
problem_as_fixpoint = NLP(g, xinit)
# -
# ### Equivalent Rootfinding Formulation
# +
def f(x):
fval = x - np.sqrt(x)
fjac = 1-0.5 / np.sqrt(x)
return fval, fjac
problem_as_zero = NLP(f, xinit)
# -
# ### Compute fixed-point using Newton method
t0 = tic()
x1 = problem_as_zero.newton()
t1 = 100 * toc(t0)
n1 = problem_as_zero.fnorm
# ### Compute fixed-point using Broyden method
t0 = tic()
x2 = problem_as_zero.broyden()
t2 = 100 * toc(t0)
n2 = problem_as_zero.fnorm
# ### Compute fixed-point using function iteration
t0 = tic()
x3 = problem_as_fixpoint.fixpoint()
t3 = 100 * toc(t0)
n3 = np.linalg.norm(problem_as_fixpoint.fx - x3)
# ### Print results
print('Hundredths of seconds required to compute fixed-point of g(x)=sqrt(x)')
print('using Newton, Broyden, and function iteration methods, starting at')
print('x = %4.2f\n' % xinit)
print(' Method Time Norm of f x\n', '-' * 40)
ff = '%9s %8.2f %8.0e %5.2f'
print(ff % ('Newton', t1, n1, x1))
print(ff % ('Broyden', t2, n2, x2))
print(ff % ('Function', t3, n3, x3))
| notebooks/slv/03 Compute fixedpoint of f(x) = sqrt(x).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# What is pandas ?
#
# pandas is an open source python library that contains data structures and tools for dealing with data (http://pandas.pydata.org/).
#
# Why pandas ?
#
# pandas is very intuitive and used a dataframe theme to handle data.
# A bit like excel spreadsheet, but many, many times more powerful ad flexible.
#
# Which dataset are we doing ?
#
# The Titanic dataset which consists of details of the majority of passengers who travelled on RMS Titanic.
#
# "The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.
#
# One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class"
#
#
#
# source: kaggle.com
#
# +
import pandas as pd
#import seaborn as sns
# %matplotlib inline
# -
??pd.read_csv
# +
# Read in the titanic dataset as a pandas dataframe. Change path_to_file to where the fule is stored in your laptop
my_df = pd.read_csv("Titanic.csv")
# Find out dimensions of the dataframe
my_df.shape
# +
# Look at top five rows of the dataframe
my_df.head()
# -
my_df.columns
# +
# Look at last five rows of our dataframe
my_df.tail()
# -
#Select any column/s by column name
my_df[["Name","Fare"]].head()
my_df.columns
#Select any column by integer position
my_df.iloc[0:3,5:7]
#Select any row by integer position
my_df[0:3]
# +
# Select specific rows that match a given condition
select_df = my_df[my_df["Sex"] == "male"]
select_df.shape
#select_df.tail()
# -
my_df["Sex"].unique()
#Different file types that can be read in (demo), sql, tables, json, etc.
pd.
# +
# How to sort a dataframe
sorted_df = my_df.sort_values(by = ["Name","Age"],inplace = False)
sorted_df.head()
# +
# How to transpose a dataframe
my_df_T = my_df.T
my_df_T.head()
# -
my_df_T.shape
# # Drill Time !
# For each question, you can type in the response below it i.e.open up a new code cell below it and write code there !
my_df.head()
# From this dataset, how many passengers were male ?
#
my_df[my_df["Sex"]=='male'].shape
# From this dataset, how many passengers were female ?
my_df[my_df["Sex"]=='female'].shape
# How many male passengers survived the sinking ? (0 = No, 1 = Yes)
my_df[(my_df["Sex"]=='male') & (my_df['Survived']==1)].shape
# How many female passengers survived the sinking ?
my_df[(my_df["Sex"]=='female') & (my_df['Survived']==1)].shape
# How many people above age 25 were in the ship at the time of sinking ?
my_df[my_df['Age']>25].shape
# How many females below age 50 survived the sinking ?
my_df[(my_df["Sex"]=='female') & (my_df['Survived']==1)& (my_df['Age'] <50)].shape
# How many people who travelled Pclass 1 survived ?
my_df[(my_df["Pclass"]==1) & (my_df['Survived']==1)].shape
# How many people who travelled Pclass 3 survived ?
my_df[(my_df["Pclass"]==3) & (my_df['Survived']==1)].shape
# Can you make a dataframe of all passengers sorted by their age ?
sorted_by_age = my_df.sort_values(by = 'Age')
sorted_by_age.head()
# How many people below age 20 survived the sinking ?
my_df[(my_df["Age"]<20) & (my_df['Survived']==1)].shape
| 2020Fall/INFO6105/Module 1/pandas_I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from mpl_toolkits import mplot3d
import pandas as pd
x=([[0,1,2,3,4],
[0,1,2,3,4],
[0,1,2,3,4],
[0,1,2,3,4],
[0,1,2,3,4]])
print(x)
y=([[0,1,2,3,4],
[0,1,2,3,4],
[0,1,2,3,4],
[0,1,2,3,4],
[0,1,2,3,4]])
print(y)
plt.plot(x,y, 'o',color='black')
xvalues=np.array([0,1,2,3,4]);
yvalues=np.array([0,1,2,3,4]);
xx,yy = np.meshgrid(xvalues, yvalues)
print(xx,yy)
def f(xvalues, yvalues):
return np.sin(xvalues)**10 + np.cos(10 + yvalues * xvalues) * np.cos(xvalues)
zz = f(xx,yy)
print(zz)
np.shape(xx)
np.shape(yy)
np.shape(zz)
x2=xx.flatten()
print(x2)
y2=yy.flatten()
print(y2)
z2=zz.flatten()
print(z2)
data = {'x':x2,
'y':y2,
'z':z2}
df=pd.DataFrame(data)
df
df.plot(kind ='scatter',x='x',y='y',color='grey')
plt.savefig('q4plot.png')
df.plot.scatter(x='x',y='y',c='z', colormap='inferno')
plt.savefig('q5plot.png')
plt.contour(xx,yy,zz,colors='red')
plt.savefig('q6plot.png')
# !head -5 actualbathy.csv
df_bathy = pd.read_csv('actualbathy.csv')
df_bathy
df_bathy.plot.scatter(x='East',y=' North',c=' Elevation', colormap='inferno');
plt.savefig('Q9plot.png')
eastlist = list(df_bathy['East']),
northlist = list(df_bathy[' North']),
elevationlist = list(df_bathy[' Elevation'])
# +
ax = plt.axes(projection='3d')
xdata= eastlist
ydata= northlist
zdata= elevationlist
ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='inferno');
plt.savefig('Q10plot.png')
| diaz/Diaz_PythonBasics3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''covid19-sir'': pipenv)'
# name: python_defaultSpec_1599558922114
# ---
# # Usage: phases
# Here, how to set phases in scenario analysis will be explained. As an example, use the records in India.
# ## Preperation
# +
# Standard users
# # !pip install covsirphy
# -
# Developers (Note: this notebook is in example directory)
import os
os.chdir("../")
# + tags=[]
import covsirphy as cs
cs.__version__
# + tags=[]
# Load datasets
data_loader = cs.DataLoader("input")
jhu_data = data_loader.jhu(verbose=False)
population_data = data_loader.population(verbose=False)
print(jhu_data.citation)
# -
# Create instance of `Scenario` class
snl = cs.Scenario(jhu_data, population_data, country="India", province=None)
# + tags=[]
# Summarize main scenario
snl.summary()
# -
# Note:
# This is empty because no phases are registered. Term "phase" means a sequential dates in which the parameters of SIR-derived models are fixed. The details will be explained later.
# Just for demonstration, we will fix the first/end date of the records here. This is un-necessary for your analysis.
# + tags=[]
# Fix the first date of records
snl.first_date = "01Mar2020"
# Fix the last date of records
snl.last_date = "16Jul2020"
# Summarize (Empty because no phases are registered)
snl.summary()
# -
# ## Set phases with S-R trend analysis
# How can we set phases?
#
# Here, we will use "S-R trend analysis" explained in [S-R trend analysis section in "Kaggle: COVID-19 data with SIR model"](https://www.kaggle.com/lisphilar/covid-19-data-with-sir-model#S-R-trend-analysis). $S$ (Susceptible) and $R$ (Recovered) show the following relationship in SIR-derived models. $S_{(R)}$ means $S$ as a function of $R$.
#
# $$S_{(R)} = N e^{-a R}$$
#
# where $N$ is total population and $a=\cfrac{\beta}{N \gamma}$ is a constant.
# This leads to
# $$\log S_{(R)} = - a R + \log N$$
# $\log S$ decreases constantly with increase of $R$, when the records follow a SIR-derived model and the parameter values of the model are constant. With logarithmic y-axis scale, plot of $(x, y) = (R, S)$ shows a line.
# The slope of the line may change when the parameter values ($\beta$ and $\gamma$) are changed. To detect the dates when the parameter values were changed, we will find the change points of $\log S$ as a function of $R$, using [ruptures: change point detection in Python](https://github.com/deepcharles/ruptures). We call the period from a change point to the next change point as a "phase" in this analysis.
# + tags=[]
# Set phase with S-R trend analysis
snl.trend()
snl.summary()
# If necessary, initial (0th) phase can be disabled
# scenario.disable(phases=["0th"])
# -
# ## Remove un-necessary change points manually
# The slope of 4th phase seems to equal to that of 3rd phase. Here, we will combine 3rd phase and 4th phase here and reset phase names.
# + tags=[]
# Before: 3rd is 12Jun2020-19Jun2020, 4th is 20Jun2020-29Jun2020
# After: 3rd is 12Jun2020-29Jun2020, 4th is the same as old 5th
snl.combine(phases=["3rd", "4th"])
snl.summary()
# -
# Show the trend with manually set change points.
# + tags=[]
snl.trend(force=False)
# -
# ## Separate phases manually
# Because point of $(x, y) = (S, R)$ jumped on 29May2020, we will separete the 1st phase with this change point and reset phase names.
# + tags=[]
# Before: 1st is 20May2020-02Jun2020
# After: 1st is 20May2020-28May2020, 2nd is 29May2020-02Jun2020
snl.separate(date="29May2020", phase="1st")
snl.summary()
# + tags=[]
snl.trend(force=False)
# -
# ## Disable/enable phases manually
# Phases can be disable for parameter eatimation. Disabled phases can be enabled with `Scenario.enable()` method.
# Disable 0th phase
snl.disable(phases=["0th"])
snl.summary()
snl.trend(force=False)
# Enable 0th phase
snl.enable(phases=["0th"])
snl.summary()
# ## Add phases one by one
# In the previous sections, we started with S-R trend analysis. We can create phases without this analysis when you have a new idea to find change points.
# Clear all phases
snl.clear(include_past=True).summary()
# Set 0th phase to set the start date of 1st phase
snl.add(end_date="19May2020")
# Add 1st phase and disable 0th phase
snl.add(end_date="28May2020").disable(phases=["0th"])
# Add 2nd phase
snl.add(end_date="02Jun2020")
# Add 3rd phase
snl.add(end_date="11Jun2020")
# Add 4th phase
snl.add(end_date="29Jun2020")
# Add 5th phase to the last of the records
snl.add().summary()
| docs/usage_phases.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:AKSDeploymentKeras]
# language: python
# name: conda-env-AKSDeploymentKeras-py
# ---
# # Test deployed web application
# This notebook pulls some images and tests them against the deployed web application on AKS.
# +
import matplotlib.pyplot as plt
import numpy as np
from testing_utilities import *
import requests
# %matplotlib inline
# -
# Get the external url for the web application running on AKS cluster.
# service_json = !kubectl get service azure-dl -o json
service_dict = json.loads(''.join(service_json))
app_url = service_dict['status']['loadBalancer']['ingress'][0]['ip']
# Quickly check if the web application is working.
scoring_url = 'http://{}/score'.format(app_url)
version_url = 'http://{}/version'.format(app_url)
health_url = 'http://{}/'.format(app_url)
# !curl $health_url
# !curl $version_url # Reports the tensorflow version
# Pull an image of a Lynx to test it with.
IMAGEURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg"
plt.imshow(to_img(IMAGEURL))
jsonimg = img_url_to_json(IMAGEURL)
headers = {'content-type': 'application/json'}
r = requests.post(scoring_url, data=jsonimg, headers=headers) # Run the request twice since the first time takes a
# %time r = requests.post(scoring_url, data=jsonimg, headers=headers) # little longer due to the loading of the model
print(r)
r.json()
# From the results above we can see that the model correctly classifies this as an Lynx.
# Let's try a few more images.
images = ('https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg',
'https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg',
'http://www.worldshipsociety.org/wp-content/themes/construct/lib/scripts/timthumb/thumb.php?src=http://www.worldshipsociety.org/wp-content/uploads/2013/04/stock-photo-5495905-cruise-ship.jpg&w=570&h=370&zc=1&q=100',
'http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/',
'https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg',
'http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg')
results = [requests.post(scoring_url, data=img_url_to_json(img), headers=headers) for img in images]
plot_predictions_dict(images, results)
# The labels predicted by our model seem to be consistent with the images supplied.
# Next let's quickly check what the request response performance is for the deployed model on AKS cluster.
image_data = list(map(img_url_to_json, images)) # Retrieve the images and data
timer_results = list()
for img in image_data:
# res=%timeit -r 1 -o -q requests.post(scoring_url, data=img, headers=headers)
timer_results.append(res.best)
timer_results
print('Average time taken: {0:4.2f} ms'.format(10**3 * np.mean(timer_results)))
# We have tested that the model works and we can now move on to the [next notebook to get a sense of its throughput](06_SpeedTestWebApp.ipynb).
| Keras_Tensorflow/05_TestWebApp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8
# language: python
# name: python3.8
# ---
# # OPEN-CV PROJECT: LIS
# # Import all dependencies
import cv2
import time
import mediapipe as mp
#import tensorflow
import numpy as np
import os
from matplotlib import pyplot as plt
# # Holistic model
mp_holistic = mp.solutions.holistic
mp_drawing = mp.solutions.drawing_utils
def mediapipe_detection(image, model):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = model.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image, results
# +
def draw_landmarks(image, results):
mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
def draw_landmarks_custom(image, results):
mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION,
mp_drawing.DrawingSpec(color=(0,0,255),thickness=1, circle_radius=1),
mp_drawing.DrawingSpec(color=(0,0,255),thickness=1, circle_radius=1),
)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(80,110,10),thickness=1, circle_radius=1),
mp_drawing.DrawingSpec(color=(80,256,121),thickness=1, circle_radius=1),
)
mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(255,0,0),thickness=3, circle_radius=5),
mp_drawing.DrawingSpec(color=(255,0,0),thickness=3, circle_radius=5),
)
mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(0,0,255),thickness=3, circle_radius=5),
mp_drawing.DrawingSpec(color=(0,0,255),thickness=3, circle_radius=5),
)
cv2.putText(frame, 'Left Hand', (15,132), cv2.FONT_HERSHEY_SIMPLEX, 1 ,(255,0,0), 4, cv2.LINE_AA)
cv2.putText(frame, 'Right Hand', (15,232), cv2.FONT_HERSHEY_SIMPLEX, 1 ,(0,0,255), 4, cv2.LINE_AA)
# -
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=0.5,
min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
#black = np.zeros((1080, 1920, 3))
ret, frame = cap.read()
#frame = cv2.flip(frame,1)
# make detection
image, results = mediapipe_detection(frame, holistic)
#print(results)
draw_landmarks_custom(frame, results)
cv2.imshow('LIS', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# # Extract point for training and prediction
def points_detection(results):
rh = np.array([[points.x, points.y, points.z] for points in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)
lh = np.array([[points.x, points.y, points.z] for points in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)
po = np.array([[points.x, points.y, points.z] for points in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(99)
return np.concatenate([lh, rh, po])
# # Setup Folders for dataset
DATA_PATH = os.path.join('LIS_data')
labels = np.array(['a', 'b', 'c']) # put the entire alphabet in the future
no_sequences = 101 # the first one will be skipped
sequence_lenght = 30
# +
#for label in labels:
# for sequence in range(no_sequences):
# try:
# os.makedirs(os.path.join(DATA_PATH, label, str(sequence)))
# except:
# pass
# -
# # Collect data for training
# +
data = []
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
for label in labels:
for id in range(no_sequences):
ret, frame = cap.read()
# make detection
image, results = mediapipe_detection(frame, holistic)
#print(results)
draw_landmarks_custom(frame, results)
if id == 0:
cv2.putText(frame, 'STARTING COLLECTION', (120,200), cv2.FONT_HERSHEY_SIMPLEX, 1 ,(0,255,0), 4, cv2.LINE_AA)
#cv2.putText(frame, f'Collecting frame {id} for {label}', (15,32), cv2.FONT_HERSHEY_SIMPLEX, 1 ,(0,255,0), 4, cv2.LINE_AA)
cv2.imshow('LIS', frame)
cv2.waitKey(2000)
else:
data.append(points_detection(results))
cv2.putText(frame, f'Collecting frame {id} for {label}', (15,32), cv2.FONT_HERSHEY_SIMPLEX, 1 ,(0,255,0), 4, cv2.LINE_AA)
cv2.waitKey(100)
cv2.imshow('LIS', frame)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
import pandas as pd
df =pd.read_csv('data.csv')
df = df.iloc[:,1:]
import pandas as pd
df = pd.DataFrame(np.array(data))
y=[]
for i in labels:
y = np.concatenate([y, [i] * (no_sequences-1)])
df['y'] = y
pd.DataFrame(df).to_csv('data.csv')
df
# # Model
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from keras.callbacks import TensorBoard
from sklearn.preprocessing import OneHotEncoder, StandardScaler, LabelEncoder
import datetime
from datetime import datetime
# +
model_name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
tensorboard = TensorBoard(
log_dir="logs/scalars/" + model_name,
histogram_freq=0,
write_graph=True,
write_images=True,
profile_batch=0)
# -
label_map = {label:num for num, label in enumerate(labels)}
label_map
# +
# split into input (X) and output (y) variables
X = np.array(df.iloc[:,0:-1])
y = np.array(df.iloc[:,-1])
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(y)
#print(integer_encoded)
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
y = onehot_encoder.fit_transform(integer_encoded)
# invert first example
#inverted = label_encoder.inverse_transform([argmax(onehot_encoded[0, :])])
#print(inverted)
# -
# define the keras model
model = Sequential()
model.add(Dense(32, input_dim=225, activation='relu'))
model.add(Dense(16, activation='relu'))
#model.add(Dense(16, activation='relu'))
#model.add(Dense(16, activation='relu'))
model.add(Dense(labels.shape[0], activation='softmax'))
# compile the keras model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
# fit the keras model on the dataset
model.fit(X, y, epochs=100, batch_size=20, callbacks=[tensorboard], validation_split=0.2)
labels[np.argmax(model.predict(X)[233])]
# # Save models
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
# # Evaluate model
from sklearn.metrics import multilabel_confusion_matrix, accuracy_score
yhat = model.predict(X)
ytrue = np.argmax(y, axis=1).tolist()
yhat = np.argmax(yhat, axis=1).tolist()
multilabel_confusion_matrix(ytrue, yhat)
accuracy_score(ytrue, yhat)
# # Testing
# +
from keras.models import model_from_json
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# +
threshold = 0.1 # not used for the moment
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=0.5,
min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
ret, frame = cap.read()
#frame = cv2.flip(frame,1)
# make detection
image, results = mediapipe_detection(frame, holistic)
#print(results)
prediction = labels[np.argmax(model.predict(np.array([points_detection(results)])))]
pred_prob = int(np.max(model.predict(np.array([points_detection(results)])))*100)
if pred_prob > int(threshold*100):
cv2.putText(frame, f'Lettera: {prediction} ({pred_prob}%)',
(120,200),
cv2.FONT_HERSHEY_SIMPLEX,
2 ,
(0,255,0),
4,
cv2.LINE_AA)
else:
cv2.putText(frame, f'Lettera: Uknown',
(120,200),
cv2.FONT_HERSHEY_SIMPLEX,
2 ,
(0,255,0),
4,
cv2.LINE_AA)
#frame = cv2.flip(frame,1)
#draw_landmarks_custom(frame, results)
cv2.imshow('LIS', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
| jupyter/LIS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-N59E9S4BzSr"
# ##### Copyright 2020 The Cirq Developers
# + cellView="form" id="_SAdH7I0B2rz"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="NZv6mLU-B_b0"
# # Get started with Quantum Computing Service
# + [markdown] id="vBsLfco2CD12"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://quantumai.google/cirq/tutorials/google/start"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/google/start.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/google/start.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/google/start.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="fe7e28f44667"
# ## Setup
# Note: this notebook relies on unreleased Cirq features. If you want to try these features, make sure you install cirq via `pip install cirq-google --pre`.
# + id="846b32703c5c"
try:
import cirq
import cirq_google as cg
except ImportError:
print("installing cirq-google...")
# !pip install --quiet cirq-google --pre
print("installed cirq-google.")
import cirq
import cirq_google as cg
# + [markdown] id="So5iLqkNPnFm"
# Quantum Computing Service enables researchers to run quantum programs on Google's quantum processors. This notebook is a tutorial to get you started with the typical setup, using the open source Python framework [Cirq](https://github.com/quantumlib/Cirq), in the free cloud Jupyter notebook environment, [Google Colab](https://colab.research.google.com/).
#
# Access is currently restricted to those in an approved group, and you must be in that group before running this tutorial.
#
# You can find more about running this in colaboratory in the [Colab documentation](https://colab.research.google.com/notebooks/welcome.ipynb) or in our Cirq-specific guide to [running in Colab](colab.ipynb). You can download this notebook from the [GitHub repository](https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/google/start.ipynb).
# + [markdown] id="da8NOLsZqLsU"
# ## Before you begin
#
# * First, decide which project you will use the Quantum Computing Services from. All of your quantum programs and results will live under a project which you specify when creating and running these programs using Quantum Engine. You can use an existing project or create a new project. [Learn more about creating a project](https://cloud.google.com/docs/overview/).
# * Log in and agree to [Terms of Service](https://cloud.google.com/terms/).
# * Follow this link to [**enable the Quantum Engine API**](https://console.cloud.google.com/apis/library/quantum.googleapis.com?returnUrl=quantum) in your Google Cloud Platform project.
#
# After the API is enabled, you should be redirected to the [Quantum Engine console](https://console.cloud.google.com/quantum) and it should look like the following screenshot.
#
# 
#
# **Enter your project id into the input text box below**. To find your project id, click on the project menu in the blue bar at the top of the console. This will open a menu that displays your project name (e.g. "My project") and unique **project id** (e.g. my-project-1234). Enter the **project id** into the input below. ([Help on finding your project id](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects).)
#
# 
#
# **Run the code in the next block (the one with the text box)**, which will prompt you to authenticate Google Cloud SDK to use your project. You can run the code by either clicking the play button (pointed by arrow below) or by selecting the block and pressing CTRL-ENTER. After running the block you will see a link which you should click. This will open a new browser window. Follow the authentication flow for this window. After you authenticate and allow access to your project, you will be given a string which you should enter into the text box that appears in the run area (and then press return). If you see "Authentication complete" you have done this step successfully. If this fails, make sure that you have cut and paste the string correctly (e.g. the clipboard button seems to not work for some browser/OS combinations).
#
# 
# + cellView="form" id="YoqI9GrOPExP"
# The Google Cloud Project id to use.
project_id = "" #@param {type:"string"}
processor_id = "pacific" #@param {type:"string"}
from cirq_google.engine.qcs_notebook import get_qcs_objects_for_notebook
device_sampler = get_qcs_objects_for_notebook(project_id, processor_id)
if not device_sampler.signed_in:
raise Exception("Please setup project_id in this cell or set the `GOOGLE_CLOUD_PROJECT` env var to your project id.")
# + [markdown] id="ocy3JossWU57"
# **Authentication details** Double clicking on the project_id block above should expose the code that is run when you run this code block. This code uses the [colabtools](https://github.com/googlecolab/colabtools/blob/master/google/colab/auth.py) auth module to ensure that *Application Default Credentials* are set and then creates a variable `colab_auth` which can be used in Cirq to authenticate your calls to Quantum Computing Service.
#
# If you are going to run code outside of colab and want to authenticate, see the below section on running from the command-line.
# + [markdown] id="47n3bsx3Nl9g"
# ## Create a circuit
# Now that you've enabled Quantum Computing Service and configured the notebook, let's create a basic program with Cirq. After reviewing the code, **run this block** to run a circuit, and print a circuit diagram and results. To learn more, refer to the [Cirq overview](https://quantumai.google/cirq) and [Cirq basics](../basics.ipynb) pages.
# + cellView="both" id="EQoTYZIEPa9S"
# Define a qubit at an arbitrary grid location.
qubit = cirq.GridQubit(0, 0)
# Create a circuit (qubits start in the |0> state).
circuit = cirq.Circuit(
cirq.X(qubit), # NOT gate.
cirq.measure(qubit, key='result') # Measurement.
)
print("Circuit:")
print(circuit)
# + [markdown] id="BJIWCdhKgVo7"
# ## Simulate the circuit using Cirq
#
# Let's quickly use Cirq to simulate the circuit above.
# + id="TW_zU_pagVP0"
# Simulate the circuit, repeating 1000 times.
print("Simulating circuit using Cirq...\n")
results = cirq.sample(circuit, repetitions=1000)
print("Measurement results:")
print(results)
# + [markdown] id="4ZK4GQWmyo-u"
# ## Run on quantum hardware
# Approved users can access quantum hardware in two modes. First, all approved users have access to a processor in "open-swim" which is a first-in-first-out queue with fairness algorithm that balances jobs across users in the queue. Secondly, processors can be reserved in hourly blocks if the user is approved. You can learn more about the reservation system on the [concepts page](../../google/concepts.ipynb). We'll use the processor `pacific` in this demo.
# + [markdown] id="uz0xSlml3fkA"
# ### Create a Quantum Engine client
# Interactions with hardware are facilitated by the Quantum Computing Service. A client must first be initialized with your Google Cloud project to perform these interactions.
# + id="V40sPIi63f0I"
# Create an Engine client.
engine = cg.get_engine()
# + [markdown] id="FS42e_sx0Hqw"
# ### View the processor's topology
# Each processor has a set of available qubits laid out on a grid with limited couplings between qubits. The device specification can be printed to inspect the topology of a processor.
# + id="O-Jrib9y1TFY"
processor = engine.get_processor(processor_id)
# Print the device showing qubit connectivity.
device = processor.get_device()
print(device)
# + [markdown] id="bJ7ePCCNrh82"
# Note that the qubit that we used for the simulation above, `(0, 0)`, does not exist on the hardware. Since the grid of available qubits may change over time, we'll programatically select a valid qubit by inspecting `device.qubits`. We then use the `transform_qubits()` method to remap the circuit onto that qubit.
#
# In order to run on hardware, we must also ensure that the circuit only contains gates that the hardware supports. The basic gates used here are always available, so this circuit can be run without any further changes, but in general you may need to apply additional transformations before running arbitrary circuits. See the [best practices](../../google/best_practices.md) guide for more information about running circuits on hardware.
# + id="ma8JDhTUR389"
valid_qubit = device.qubits[0]
# Transform circuit to use an available hardware qubit.
hw_circuit = circuit.transform_qubits(lambda q: valid_qubit)
print(hw_circuit)
# + [markdown] id="TH4vtUeq2ryV"
# ### Create a job on the Quantum Engine
#
# Cirq circuits are represented in the Quantum Computing Service as _Programs_. To run a _Program_, you must create a _Job_ that specifies details about the execution, _e.g._ the processor to use and the number of times to repeat the experiment. This enables a single circuit to be run multiple times in different configurations. For a one-off use, these steps can be done together by using the `engine.run_sweep` utility to create both a _Program_ and a _Job_.
#
# A new _Job_ will be scheduled on the requested hardware as it becomes available. The execution of your _Job_ will likely be completed within a few seconds and the results will be displayed below. The output will include a link to the [console](https://console.cloud.google.com/quantum), where you can view the status and results of your jobs.
# + id="-a0I0cbGyivS"
print("Uploading program and scheduling job on the Quantum Engine...\n")
# Upload the program and submit jobs to run in one call.
job = engine.run_sweep(
program=hw_circuit,
repetitions=10000,
processor_ids=[processor.processor_id])
print("Scheduled. View the job at: https://console.cloud.google.com/quantum/"
"programs/{}?&project={}".format(job.program_id, project_id))
# Print out the results. This blocks until the results are returned.
results = job.results()
print("\nMeasurement results:")
for result in results:
print(result)
# + [markdown] id="3u4dLfDugI9l"
# ## Running from the command line
#
# If you are planning to access Quantum Computing Service from the command line, follow these instructions to get started. If you plan on executing all of your programs from an ipython notebook, you can skip this section.
#
# ### Setup Cirq
#
# Follow the [Cirq Install](../../install.md) page to install Cirq locally. We highly recommend that you setup a virtual environment for this installation to isolate your development stack from your overall system installations. Make sure to setup the virtual environment for Python 3 and not Python 2.
#
# ### Setup Google Cloud authentication
#
# In this quickstart we will authenticate using the gcloud command line cool. To do this, one must first install gcloud. Follow the instructions for this at https://cloud.google.com/sdk/install. We will authenticate using Application Default Credentials. To do this simply run the following on your shell command line
#
# `gcloud auth application-default login`
#
# This will open up a browser window or give you a link to a webpage you can navigate to in order to go through an authentication flow. Complete this using your Google account. After this command is run, credentials will be stored on your local machine. If at any point you want to revoke these credentials you can run `gcloud auth application-default revoke`.
# + [markdown] id="quSdrNseg8LC"
# ### Write and run a short quantum program
#
# Using your favorite IDE or editor, read and then paste the following hello_qubit program into a file called `hello_qubit.py`. Make sure to replace the `'your-project-id'` string with the project id you created above.
#
# + id="4RXHE070gleY"
import cirq
import cirq_google as cg
def example():
"""Hello qubit example run against a quantum processor."""
# Define a qubit.
qubit = cirq.GridQubit(5, 2)
# Create a circuit (qubits start in the |0> state).
circuit = cirq.Circuit(
cirq.X(qubit)**0.5, # Square root of NOT.
cirq.measure(qubit, key='result') # Measurement.
)
# Create an Engine object. This uses the project id of your
# Google cloud project.
project_id = 'your-project-id'
engine = cg.Engine(project_id=project_id)
print("Uploading program and scheduling job on Quantum Engine...\n")
# Upload the program and submit jobs to run in one call.
job = engine.run_sweep(
program=circuit,
repetitions=1000,
processor_ids=[processor.processor_id])
print("Scheduled. View the job at: https://console.cloud.google.com/quantum/"
f"programs/{job.program_id}/jobs/{job.job_id}"
f"/overview?project={project_id}")
# Print out the results. This blocks until the results are returned.
results = job.results()
print("\nMeasurement results:")
for result in results:
print(result)
if __name__ == '__main__':
example()
# + [markdown] id="TjYi3VOTgyvh"
# You should then be able to run this program from your command line using:
#
# `python hello_qubit.py`
#
# The output should be something like:
#
# ```
# Uploading program and scheduling job on Quantum Engine...
#
# Scheduled. View the job at: https://console.cloud.google.com/quantum/programs/example-T5K9Y9/jobs/job-0?mods=quantum_ng2&project=quantum-cloud-client
#
# Measurement results:
# 1010101010111011000011101010101011010011100111110001101101101011100011111010100100011000010101110010011000100001110000100010011100111101001101101010000000111101010100001001101110101010110000010011110101100000101111101000010000000100010111010101110001110101101100010001101111100110001001010000100000110110100000111111110011100101111000010110010101100010111111100010001100111011010101111011000110100001010000011100000100101010111110111010011010100011011111110110011101010001000101000011101000111000001001101000110000111111000111100001001000010101011111000111111010100001011001010011011000010110111010001011000001011110001010110100001101011101110011111100100101010011100010010001100101100001101010010011001010100011010010011000010010001010111101001000011000101111011111100111111111101001010010001010000101011101100100001101011100101010101110111001101000100101111101100000100010000101000011110110101001101100000000000111000000111011101101110000001110110001111011000100111111101110111111101001110110000110
# ```
#
# + [markdown] id="WCKQ1lIJSgJ7"
# ## Next steps
# * Use [this template colab](colab.ipynb) as a base for your own explorations.
# * Explore [best practices](../../google/best_practices.md) for getting circuits to run on hardware.
#
| docs/tutorials/google/start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction aux outils numériques
#
# Nous allons faire un survol de quelques commandes que nous utiliserons dans les autres TP numériques.
#
# ## Objectifs de cette séance
#
# Comme vous avez tous une formation de base en Matlab, nous allons nous appuyer sur cela pour réaliser quelques opérations simples que nous employerons sur des problèmes d'aérodynamique par la suite.
#
# Réalisez les exercices avec Matlab et dans ce notebook pour vous familiariser avec les commandes python.
#
# ## Modules
#
# Comme Matlab, Python est langage interprété. On s'appuie sur des fonctions pré-compilées pour rendre les opérations de calcul efficace.
#
# Contrairement à Matlab, les modules ne sont pas chargés automatiquement. Il faut donc toujours importer ce dont on a besoin. Dans le cadre de ce cours, nous aurons besoin essentiellement de `numpy` et `matplotlib`.
#
# [NumPy](https://numpy.org) est une bibliothèque d'objets et de fonctions adaptés au calcul vectoriel c'est-à-dire à des opérations sur des listes ou tableaux (N-dimensions) homogènes de nombres (_array_). Grace à cette bibliothèque, les opérations principales que l'on réalise avec Matlab sont réalisable sous Python. Souvent les fonctions ont le même nom.
#
# [MatplotLib](https://matplotlib.org/) est une bibliothèque d'objets et de fonctions pour réaliser des représentations graphiques 2D comme dans Matlab (et certainement même plus !). On utilise le sous-module `pyplot` qui contient tout ce dont on a besoin.
#
# Pour charger un module, on utilise la fonction `import` et on lui donne un petit nom pour raccourcir nos appels.
import numpy as np
import matplotlib.pyplot as plt
# Pour avoir des graphiques de bonne qualité, nous utilisons les paramètres suivants.
plt.rcParams['figure.autolayout'] = True
plt.rcParams['figure.dpi'] = 100
plt.rcParams['font.size'] = 12
# Voilà vous êtes prêt !
# ## Exercice 1 - Intersection de deux droites
#
# On souhaite calculer le point d'intersection de deux droites définies par les équations suivantes :
# \begin{equation}
# y = 2 x -5 \hspace{2cm} \text{et} \hspace{2cm}
# y = -x + 3
# \end{equation}
#
# 1. Construisez les tableaux suivants :
# * `x` de 20 points uniformément répartis de -5 à 5. Utilisez la fonction `linspace` de `numpy`.
# * `y1` et `y2` les ordonnées des points aux abscisses `x` des points sur les droites 1 et 2.
# 2. Tracez les deux droites ainsi définies :
# * La droite 1 en bleu, la droite 2 en rouge et en pointillés.
# * Affichez une grille, une légende et des labels sur les axes.
# 3. Définissez le problème matriciel $M X = B$ à résoudre pour trouver l'intersection des droites et créez les tableaux numpy `M` et `B`. Utilisez la fonction `array`, vérifiez les formes avec `shape` et utilisez `reshape` pour corriger.
# 4. Résolvez le problème en utilisant le sous-module d'algèbre linéaire [`linalg`](https://numpy.org/doc/stable/reference/routines.linalg.html#solving-equations-and-inverting-matrices) de `numpy`. Le produit matriciel (`dot`) est implémenté avec le symbole `@`
# 5. Reprennez la figure de la question 2 et ajoutez le point d'intersection avec un symbole vert. Si l'intersection n'est pas correcte, vérifiez vos matrices et leurs formes. Enregistrez la figure sous un format image.
# ## Exercice 2 - Les profils NACA 4 chiffres
#
# Les profils NACA sont des formes d'aile testées par le [National Advisory Committee for Aeronautics](https://fr.wikipedia.org/wiki/National_Advisory_Committee_for_Aeronautics). C'est des profils dont la forme correspond à une équation mathématique dont les formes sont paramétrées et dont les coefficients aérodynamiques en fonction de l'incidence (angle d'attaque) ont été mesurés de manière systématique en soufflerie.
#
# Ici on s'intéressera aux profils dits 4 chiffres symmétrique `NACA00XX`. Il sont définis par 2 paramètres :
# * La corde du profil $c$ (distance bord d'attaque/bord de fuite).
# * L'épaisseur maximale du profil est caractérisée par le paramètre $t$ qui est un pourcentage de la corde :
# \begin{equation}
# t = \frac{\texttt{XX}}{100}
# \end{equation}
#
# On définit la coordoonnée $x_c$ qui varie de 0 à 1 le long de la corde. La demi-épaisseur du profil $y_t$ (distance entre ses deux côtés) est alors paramétrée en fonction de cette coordonnée par l'équation suivante :
# \begin{equation}
# y_t = 5 t \left( 0.2969 \sqrt{x_c} - 0.1260 \, x_c - 0.3516 \, x_c^2 + 0.2843 \, x_c^3 - 0.1036 \, x_c^4\right)
# \end{equation}
#
# Ainsi un profil dont la forme pourra être obtenue grace au deux courbes (redimmensionnées):
#
# \begin{align}
# x_{up} &= x_c \, c & et \hspace{1cm} y_{up} &= + y_t \, c \\
# x_{down} &= x_c \, c & et \hspace{1cm} y_{down} &= - y_t \, c
# \end{align}
#
# Pour les questions suivantes, n'hésitez pas à contacter l'enseignant. Les questions sont choisies pour vous faire réfléchir et découvrir des options des modules.
# 1. Construisez et tracez le profil NACA007 de corde 12 cm avec 15 points de chaque côté. Déterminez l'épaisseur maximum obtenue la position sur la corde où ce point est atteint. Que pouvez vous dire ?
# +
# Parametres du profil
# Parametre de discretisation
# +
# Coordonnee de corde
# Epaisseur
# +
# Extrados
# Intrados
# -
# Epaisseur max
# Trace dimensionnel et adimensionne par la corde
# 2. Créez une fonction qui permette d'obtenir les coordonnées dimensionnées à partir des paramètres du profil. Vous pourrez utiliser un distionnaire pour retourner les tableaux nécessaires. Tracez sur une même graphique les profils `NACA0005`, `NACA0012` et `NACA0017` de corde 10 cm, 5 cm et 7.5 cm respectivement.
def NACA00_shape(chiffres34,corde,npts=15):
return
# +
# Traces
# -
# 3. Que peut-on faire pour rendre les tracés pour lisse ? Réalisez un zoom sur les 5 premiers millimètres de corde et commentez.
# +
# Nouveau trace
# -
# 4. La forme du bord d'attaque et du bord de fuite ont un rôle crucial sur la portance d'un profil aérodynamique. Il est parfois utile d'utiliser une distribution de points non uniforme pour calculer les efforts aérodynamiques.
# Reprennez la fonction de la question 2 et ajoutez une option pour avoir une distribution cosinusoidale selon :
# \begin{align}
# x_c = \frac{1}{2}\left(1 - \cos{\zeta}\right) \hspace{1cm} \text{avec : } \hspace{0.2cm} \zeta \in [0, \pi]
# \end{align}
def NACA00_shape_improved(chiffres34,corde,npts=15,distri='cos'):
return
# 5. Pour les 3 mêmes profils de la question 2, avec une discretisation cosinusoidale de 41 points, réalisez un zoom sur les 5 premiers millimètres de corde et comparez à la distribution uniforme avec le même nombre de points. Exportez la figure en image.
# +
# Traces cote a cote
fig, ax = plt.subplots(1,2,constrained_layout=True,tight_layout=False,figsize=(8,4))
plt.show()
# -
# ## Exercice 3 - Calcul symbolique
#
# Si vous n'avez pas de TI, sachez que le module [SymPy](https://www.sympy.org) vous permet de faire du calcul symbolique. Nous allons voir quelques fonction de base qui pourront vous être pour la théorie 2D et 3D des profils.
#
# À nouveau tout d'abord il faut l'importer. On active l'affichage sous forme formel.
import sympy as sb
sb.init_printing()
# On souhaite calculer la dérivée et la primitive de la fonction suivante :
# \begin{align}
# \int{{\left(\cos{\theta}\right)}^3 \, d\theta}
# \end{align}
# 1. Les variables formelles sont des objets de type `Symbol`. Définissez le symbole $\theta$.
theta = sb.Symbol('theta')
theta
# 2. Calculez la dérivée première et seconde de ${\left(\cos{\theta}\right)}^3$ à l'aide de la fonction `diff`
f = sb.cos(theta)**3
fp = sb.diff(f,theta)
fp
fpp = sb.diff(fp,theta)
fpp
fpp_2 = sb.diff(f,theta,2)
fpp_2
# 3. Calculez la primitive de ${\left(\cos{\theta}\right)}^3$ à l'aide de la fonction `integrate`
I = sb.integrate(f,theta)
I
| enonces/introduction/IntroNum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sample Homework
# This homework illustrates the coding guidelines for this course. These guidelines are:
#
#
# 1. Whenever possible, use functions instead of scripts. This is because functions facilitate reuse, and functions are testable. Never use copy and paste for reuse.
#
# 1. Use meaningful names for functions are variables. Function names should be verbs. For example, a function that calculates a fast Fourier transform might be named ``calcFFT``. A bad name for this function would be the single letter ``f``.
#
# 1. Constants used in the notebook should have a name in all capital letters. For example, use PI, not pi. (By definition, a constant is a variable that is assigned a value only once.)
#
# 1. The following should be used for functions:
#
# 1. Code cells should contain at most one function definition.
#
# 1. Functions should contain documentation that specifies: (a) what it does; (b) data types and semantics of its input parameters; (c) data type and semantics of what it returns.
#
# 1. The code cell in which a function resides should contain an "assert" statement that runs the function and performs a test on its output.
#
# 1. Variables in a function are either parameters of the function, local to the function, or global constants.
#
# Install all packages needed. Only runs if change "False" to "True".
if False:
# !pip install numpy
# !pip install matplotlib
# !pip install tellurium
# !pip install lmfit
# !pip install SBMLLint
# !pip install SBstoat
# !pip install scipy
# Import packages at the top of the notebook
import numpy as np
# Global constants
PI = np.pi # Constants are assigned once. Their names are in all capitals.
# +
def calculateSum(vector):
"""
Calculates the sum of values in a vector.
Parameters
----------
vector: numpy.array
Returns
-------
float
"""
return np.sum(vector)
# Test
array = np.array([1, 3, 4])
assert(calculateSum(array) == 8)
# -
# Example of a failed assert
assert(calculateSum(array) == 7)
| coding_guidelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JuliusCaezarEugenio/Demo1/blob/main/CPEN21_A_CPE_1_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="wRAar_-8mZ6x"
# ##Introduction to Python Programming
#
# + colab={"base_uri": "https://localhost:8080/"} id="JXUIr_aDnwK2" outputId="6a9b9909-b177-4e92-abb5-2417115c88fd"
b = "Sally"
print(b)
# + colab={"base_uri": "https://localhost:8080/"} id="nIWvgUkEplWm" outputId="75dbc610-b02d-40a9-e7eb-dd7f8535a17a"
b = "Sally"
print(type(b))
# + [markdown] id="WLvO0Rt7oQ49"
# #Naming Variables
# + colab={"base_uri": "https://localhost:8080/"} id="vc1pnXeQpHlf" outputId="0373c201-c703-4597-85d5-b7f17afb0f5e"
a = 'Sally'
A = "John"
print(a)
print(A)
# + colab={"base_uri": "https://localhost:8080/"} id="uaEEsX2ep1Iv" outputId="b2436c22-a5aa-468b-d1e6-0ff21a006ccb"
a, b, c = 0, 1, 2
print(type(a)) #This is a program using typing function
print(b)
print(c)
# + colab={"base_uri": "https://localhost:8080/"} id="3aug3FOQqPr1" outputId="7dc00128-3702-4c4c-a989-f139eb0702a7"
a = 4.50
print(type(a))
# + [markdown] id="E00ZOIaaqm69"
# ##One Value to Multiple Variables
# + colab={"base_uri": "https://localhost:8080/"} id="TZx_mGI5qrbm" outputId="7e453312-3f05-40ef-8050-14f01f11c506"
x = y = z = "four"
print(x)
print(y)
print(z)
# + colab={"base_uri": "https://localhost:8080/"} id="6GNJXaIirhZY" outputId="a7686987-1c57-48b1-f784-51474110f105"
x = "enjoying"
print("Python programming is "+ x)
# + colab={"base_uri": "https://localhost:8080/"} id="6YzGdTVfruz-" outputId="d514c2e4-bd56-462d-d2f7-2a4fdcdb2539"
x = 4
y = 5
print(x+y)
print(x-y)
# + colab={"base_uri": "https://localhost:8080/"} id="MdeWhg98r14f" outputId="65fbb723-3701-41f2-b60c-075b9e145cee"
not(x>y or x==x) #This is an example of program using logical operator
| CPEN21_A_CPE_1_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import re
import glob
from datetime import datetime
import sys
import itertools
from collections import defaultdict
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
sys.path.append("..") # Adds higher directory to python modules path for importing from src dir
from src.algorithms import SimpleLSTM, MLPClassifier, SVC
from src.datasets import NyseStocksDataset
from src.evaluation import Evaluator
from src.preparation import prepare_data
from src.utils import print_metrics_as_md, pandas_df_to_markdown_table
from src.pipeline import run_pipeline, build_pipeline
# from src.experiments import get_metrics
# %matplotlib inline
# %load_ext autotime
# %load_ext autoreload
# %autoreload 2
# -
# #### Existing companies
companies = pd.Series(['A', 'AAL', 'AAP', 'AAPL', 'ABBV', 'ABC', 'ABT', 'ACN', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'AEE', 'AEP', 'AES', 'AET', 'AFL', 'AGN', 'AIG', 'AIV', 'AIZ', 'AJG', 'AKAM', 'ALB', 'ALK', 'ALL', 'ALLE', 'ALXN', 'AMAT', 'AME', 'AMG', 'AMGN', 'AMP', 'AMT', 'AMZN', 'AN', 'ANTM', 'AON', 'APA', 'APC', 'APD', 'APH', 'ARNC', 'ATVI', 'AVB', 'AVGO', 'AVY', 'AWK', 'AXP', 'AYI', 'AZO', 'BA', 'BAC', 'BAX', 'BBBY', 'BBT', 'BBY', 'BCR', 'BDX', 'BEN', 'BHI', 'BIIB', 'BK', 'BLK', 'BLL', 'BMY', 'BSX', 'BWA', 'BXP', 'C', 'CA', 'CAG', 'CAH', 'CAT', 'CB', 'CBG', 'CBS', 'CCI', 'CCL', 'CELG', 'CERN', 'CF', 'CFG', 'CHD', 'CHK', 'CHRW', 'CHTR', 'CI', 'CINF', 'CL', 'CLX', 'CMA', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMS', 'CNC', 'CNP', 'COF', 'COG', 'COH', 'COL', 'COO', 'COP', 'COST', 'COTY', 'CPB', 'CRM', 'CSCO', 'CSRA', 'CSX', 'CTAS', 'CTL', 'CTSH', 'CTXS', 'CVS', 'CVX', 'CXO', 'D', 'DAL', 'DD', 'DE', 'DFS', 'DG', 'DGX', 'DHI', 'DHR', 'DIS', 'DISCA', 'DISCK', 'DLPH', 'DLR', 'DLTR', 'DNB', 'DOV', 'DOW', 'DPS', 'DRI', 'DTE', 'DUK', 'DVA', 'DVN', 'EA', 'EBAY', 'ECL', 'ED', 'EFX', 'EIX', 'EL', 'EMN', 'EMR', 'ENDP', 'EOG', 'EQIX', 'EQR', 'EQT', 'ES', 'ESRX', 'ESS', 'ETFC', 'ETN', 'ETR', 'EVHC', 'EW', 'EXC', 'EXPD', 'EXPE', 'EXR', 'F', 'FAST', 'FB', 'FBHS', 'FCX', 'FDX', 'FE', 'FFIV', 'FIS', 'FISV', 'FITB', 'FL', 'FLIR', 'FLR', 'FLS', 'FMC', 'FOX', 'FOXA', 'FRT', 'FSLR', 'FTI', 'FTR', 'FTV', 'GD', 'GE', 'GGP', 'GILD', 'GIS', 'GLW', 'GM', 'GOOG', 'GOOGL', 'GPC', 'GPN', 'GPS', 'GRMN', 'GS', 'GT', 'GWW', 'HAL', 'HAR', 'HAS', 'HBAN', 'HBI', 'HCA', 'HCN', 'HCP', 'HD', 'HES', 'HIG', 'HOG', 'HOLX', 'HON', 'HP', 'HPE', 'HPQ', 'HRB', 'HRL', 'HRS', 'HSIC', 'HST', 'HSY', 'HUM', 'IBM', 'ICE', 'IDXX', 'IFF', 'ILMN', 'INTC', 'INTU', 'IP', 'IPG', 'IR', 'IRM', 'ISRG', 'ITW', 'IVZ', 'JBHT', 'JCI', 'JEC', 'JNJ', 'JNPR', 'JPM', 'JWN', 'K', 'KEY', 'KHC', 'KIM', 'KLAC', 'KMB', 'KMI', 'KMX', 'KO', 'KORS', 'KR', 'KSS', 'KSU', 'L', 'LB', 'LEG', 'LEN', 'LH', 'LKQ', 'LLL', 'LLTC', 'LLY', 'LMT', 'LNC', 'LNT', 'LOW', 'LRCX', 'LUK', 'LUV', 'LVLT', 'LYB', 'M', 'MA', 'MAA', 'MAC', 'MAR', 'MAS', 'MAT', 'MCD', 'MCHP', 'MCK', 'MCO', 'MDLZ', 'MDT', 'MET', 'MHK', 'MJN', 'MKC', 'MLM', 'MMC', 'MMM', 'MNK', 'MNST', 'MO', 'MON', 'MOS', 'MPC', 'MRK', 'MRO', 'MSFT', 'MSI', 'MTB', 'MTD', 'MU', 'MUR', 'MYL', 'NAVI', 'NBL', 'NDAQ', 'NEE', 'NEM', 'NFLX', 'NFX', 'NI', 'NKE', 'NLSN', 'NOC', 'NOV', 'NRG', 'NSC', 'NTAP', 'NTRS', 'NUE', 'NVDA', 'NWL', 'NWS', 'NWSA', 'O', 'OKE', 'OMC', 'ORCL', 'ORLY', 'OXY', 'PAYX', 'PBCT', 'PBI', 'PCAR', 'PCG', 'PCLN', 'PDCO', 'PEG', 'PEP', 'PFE', 'PFG', 'PG', 'PGR', 'PH', 'PHM', 'PKI', 'PLD', 'PM', 'PNC', 'PNR', 'PNW', 'PPG', 'PPL', 'PRGO', 'PRU', 'PSA', 'PSX', 'PVH', 'PWR', 'PX', 'PXD', 'PYPL', 'QCOM', 'QRVO', 'R', 'RAI', 'RCL', 'REGN', 'RF', 'RHI', 'RHT', 'RIG', 'RL', 'ROK', 'ROP', 'ROST', 'RRC', 'RSG', 'RTN', 'SBUX', 'SCG', 'SCHW', 'SE', 'SEE', 'SHW', 'SIG', 'SJM', 'SLB', 'SLG', 'SNA', 'SNI', 'SO', 'SPG', 'SPGI', 'SPLS', 'SRCL', 'SRE', 'STI', 'STT', 'STX', 'STZ', 'SWK', 'SWKS', 'SWN', 'SYF', 'SYK', 'SYMC', 'SYY', 'T', 'TAP', 'TDC', 'TDG', 'TEL', 'TGNA', 'TGT', 'TIF', 'TJX', 'TMK', 'TMO', 'TRIP', 'TROW', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSS', 'TWX', 'TXN', 'TXT', 'UAA', 'UAL', 'UDR', 'UHS', 'ULTA', 'UNH', 'UNM', 'UNP', 'UPS', 'URBN', 'URI', 'USB', 'UTX', 'V', 'VAR', 'VFC', 'VIAB', 'VLO', 'VMC', 'VNO', 'VRSK', 'VRSN', 'VRTX', 'VTR', 'VZ', 'WAT', 'WBA', 'WDC', 'WEC', 'WFC', 'WFM', 'WHR', 'WLTW', 'WM', 'WMB', 'WMT', 'WRK', 'WU', 'WY', 'WYN', 'WYNN', 'XEC', 'XEL', 'XL', 'XLNX', 'XOM', 'XRAY', 'XRX', 'XYL', 'YHOO', 'YUM', 'ZBH', 'ZION', 'ZTS'])
companies[19::20] = ['\n'+x for x in companies[19::20]]
print(' '.join(companies))
ds = NyseStocksDataset('OC-3C', file_path='../data/nyse/prices.csv', companies=['GOOGL', 'GOOG', 'AMZN', 'APC'])
ds.load();
X_train, y_train, X_test, y_test = ds.data()
pdata = prepare_data(ds, train_size = 45000, test_size = 4500, downsample=True)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
print(X_train.date.min(), 'to', X_train.date.max())
X_train.head()
print(*[x.shape for x in pdata])
pdata[0].head()
# +
_predictor = MLPClassifier()
pipeline = build_pipeline(_predictor, pdata)
def pipeline_transform(X, fit=False):
X = pipeline.steps[0][1].transform(X)
if fit:
pipeline.steps[1][1].fit(X)
X = pipeline.steps[1][1].transform(X)
# https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#robustscaler
return pipeline.steps[2][1].transform(X)
pX_train = pipeline_transform(pdata[0], fit=True)
pX_test = pipeline_transform(pdata[2])
pd.DataFrame(pX_train).head()
# -
pd.DataFrame(pX_train).hist(figsize=(20, 20))
# plt.gcf().savefig('plots/features-scaling.png')
pd.DataFrame(pX_test).hist(figsize=(20, 20))
plt.gcf().savefig('plots/features-scaling.png')
# +
ds2 = NyseStocksDataset('OC-3C', file_path='../data/nyse/prices.csv')
ds2.load()
pdata2 = prepare_data(ds2, 300000, 30000)
pX_train2 = pipeline_transform(pdata2[0], fit=True)
pX_test2 = pipeline_transform(pdata2[2])
pd.DataFrame(pX_train2).iloc[:, 0].hist(figsize=(10, 10), bins=100)
plt.gcf().savefig('day-7-open-after-robust-scaling.png')
| research/archive/9.0 Company Specific Data Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Model
path_img = '/examples/IMG/'
path_data = 'examples/driving_log.csv'
from keras.models import Sequential
from keras.layers import Lambda
from keras.layers import Dense, Flatten
import imageio as imgio
import numpy as np
import cv2
import csv
def read_data(img_path, driving_data): #this part was borrowed from the Udacity's prep video on this lab
lines = []
with open(driving_data) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
firstline = True
for line in lines:
if firstline:
firstline = False
continue
# Append the images for the left, center and right cameras to an input data list. Append measurements to a label list
source_path = line[0]
#source_path_left = line[1]
#source_path_right = line[2]
filename = source_path.split('/')[-1]
#filename_left = source_path_left.split('/')[-1]
#filename_right = source_path_right.split('/')[-1]
current_path = img_path + filename
image = imgio.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
return images, measurements
def image_manipulation(in_image):
dim = (200, 66)
resized_img = cv2.resize(in_image, dim, interpolation = cv2.INTER_AREA)
image_yuv = cv2.cvtColor(resized_img, cv2.COLOR_RGB2YUV)
#Y_img, U_img, V_img = cv2.split(image_yuv)
#out_image = np.stack([Y_s, U_i, V_i])
return image_yuv
images, measurements = read_data(path_img, path_data)
X_train = np.array(images)
y_train = np.array(measurements)
## Create a CNN model. We are taking the Nvidia's End-to-End Deep Learning model
model = Sequential()
# Convert to YUV color scheme
model.add(Lambda(lambda x: image_manipulation(x), input_shape=(66,200,3)))
# Normalization
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(66,200,3)))
# Convolutional layer 1
model.add(Conv2D(3, (5,5), stride=2, padding = 'valid'))
# Convolutional layer 2
model.add(Conv2D(24, (5,5), stride=2, padding = 'valid'))
# Convolutional layer 3
model.add(Conv2D(36, (5,5), stride=2, padding = 'valid'))
# Convolutional layer 4
model.add(Conv2D(48, (3,3), padding = 'valid'))
# Convolutional layer 5
model.add(Conv2D(64, (3,3), padding = 'valid'))
# Convolutional layer 6
model.add(Conv2D(64, (3,3), padding = 'valid'))
model.add(Flatten())
# Dropout layer
model.add(Dropout(0.6))
# Fully connected layers
model.add(Dense(1164))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=7)
model.save('model.h5')
| .ipynb_checkpoints/Untitled1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Aerospike Connect for Spark - H2O Tutorial for Python
# ## Tested with Java 8, Spark 2.4.0, H2O 3.30.1.2, h2o_pysparkling_2.4, Python 3.7, and Aerospike Spark Connector 2.5
# ### Setup
#
# Below, a seed address for your Aerospike database cluster is required
#
# Check the given namespace is available, and your feature key is located as per AS_FEATURE_KEY_PATH
#
# Finally, review https://www.aerospike.com/enterprise/download/connectors/ to ensure AEROSPIKE_SPARK_JAR_VERSION is correct
# +
# IP Address or DNS name for one host in your Aerospike cluster
AS_HOST ="127.0.0.1"
# Name of one of your namespaces. Type 'show namespaces' at the aql prompt if you are not sure
AS_NAMESPACE = "test"
AS_FEATURE_KEY_PATH = "/etc/aerospike/features.conf"
AEROSPIKE_SPARK_JAR_VERSION="2.5.0"
AS_PORT = 3000 # Usually 3000, but change here if not
AS_CONNECTION_STRING = AS_HOST + ":"+ str(AS_PORT)
# +
# Next we locate the Spark installation - this will be found using the SPARK_HOME environment variable that you will have set
# if you followed the repository README
import findspark
findspark.init()
# -
import h2o
h2o.init()
# +
# Here we download the Aerospike Spark jar
import urllib
import os
def aerospike_spark_jar_download_url(version=AEROSPIKE_SPARK_JAR_VERSION):
DOWNLOAD_PREFIX="https://www.aerospike.com/enterprise/download/connectors/aerospike-spark/"
DOWNLOAD_SUFFIX="/artifact/jar"
AEROSPIKE_SPARK_JAR_DOWNLOAD_URL = DOWNLOAD_PREFIX+AEROSPIKE_SPARK_JAR_VERSION+DOWNLOAD_SUFFIX
return AEROSPIKE_SPARK_JAR_DOWNLOAD_URL
def download_aerospike_spark_jar(version=AEROSPIKE_SPARK_JAR_VERSION):
JAR_NAME="aerospike-spark-assembly-"+AEROSPIKE_SPARK_JAR_VERSION+".jar"
if(not(os.path.exists(JAR_NAME))) :
urllib.request.urlretrieve(aerospike_spark_jar_download_url(),JAR_NAME)
else :
print(JAR_NAME+" already downloaded")
return os.path.join(os.getcwd(),JAR_NAME)
AEROSPIKE_JAR_PATH=download_aerospike_spark_jar()
os.environ["PYSPARK_SUBMIT_ARGS"] = '--jars ' + AEROSPIKE_JAR_PATH + ' pyspark-shell'
# -
import pyspark
from pyspark.context import SparkContext
from pyspark.sql.context import SQLContext
from pyspark.sql.session import SparkSession
from pyspark.sql.types import StringType, StructField, StructType, ArrayType, IntegerType, MapType, LongType, DoubleType
from pysparkling import *
# Get a spark session object and set required Aerospike configuration properties
# Set up spark and point aerospike db to AS_HOST
sc = SparkContext.getOrCreate()
spark = SparkSession(sc)
sqlContext = SQLContext(sc)
spark.conf.set("aerospike.namespace",AS_NAMESPACE)
spark.conf.set("aerospike.seedhost",AS_CONNECTION_STRING)
spark.conf.set("aerospike.keyPath",AS_FEATURE_KEY_PATH )
h2oContext = H2OContext.getOrCreate()
# ## Create Sample Data and load it into Aerospike
# +
# We create age vs salary data, using three different Gaussian distributions
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
# Create covariance matrix from std devs + correlation
def covariance_matrix(std_dev_1,std_dev_2,correlation):
return [[std_dev_1 ** 2, correlation * std_dev_1 * std_dev_2],
[correlation * std_dev_1 * std_dev_2, std_dev_2 ** 2]]
# Return a bivariate sample given means/std dev/correlation
def age_salary_sample(distribution_params,sample_size):
mean = [distribution_params["age_mean"], distribution_params["salary_mean"]]
cov = covariance_matrix(distribution_params["age_std_dev"],distribution_params["salary_std_dev"],
distribution_params["age_salary_correlation"])
return np.random.multivariate_normal(mean, cov, sample_size).T
# Define the characteristics of our age/salary distribution
age_salary_distribution_1 = {"age_mean":25,"salary_mean":50000,
"age_std_dev":1,"salary_std_dev":5000,"age_salary_correlation":0.3}
age_salary_distribution_2 = {"age_mean":45,"salary_mean":80000,
"age_std_dev":4,"salary_std_dev":10000,"age_salary_correlation":0.7}
age_salary_distribution_3 = {"age_mean":35,"salary_mean":70000,
"age_std_dev":2,"salary_std_dev":9000,"age_salary_correlation":0.1}
distribution_data = [age_salary_distribution_1,age_salary_distribution_2,age_salary_distribution_3]
# Sample age/salary data for each distributions
group_1_ages,group_1_salaries = age_salary_sample(age_salary_distribution_1,sample_size=100)
group_2_ages,group_2_salaries = age_salary_sample(age_salary_distribution_2,sample_size=120)
group_3_ages,group_3_salaries = age_salary_sample(age_salary_distribution_3,sample_size=80)
ages=np.concatenate([group_1_ages,group_2_ages,group_3_ages])
salaries=np.concatenate([group_1_salaries,group_2_salaries,group_3_salaries])
print("Data created")
# +
# Turn the above records into a Data Frame
# First of all, create an array of arrays
inputBuf = []
for i in range(0, len(ages)) :
id = i + 1 # Avoid counting from zero
name = "Individual: {:03d}".format(id)
# Note we need to make sure values are typed correctly
# salary will have type numpy.float64 - if it is not cast as below, an error will be thrown
age = float(ages[i])
salary = int(salaries[i])
inputBuf.append((id, name,age,salary))
# Convert to an RDD
inputRDD = spark.sparkContext.parallelize(inputBuf)
# Convert to a data frame using a schema
schema = StructType([
StructField("id", IntegerType(), True),
StructField("name", StringType(), True),
StructField("age", DoubleType(), True),
StructField("salary",IntegerType(), True)
])
inputDF=spark.createDataFrame(inputRDD,schema)
#Write the data frame to Aerospike, the id field is used as the primary key
inputDF \
.write \
.mode('overwrite') \
.format("com.aerospike.spark.sql") \
.option("aerospike.set", "salary_data")\
.option("aerospike.updateByKey", "id") \
.save()
# -
# ## Step 1: Load data into a DataFrame using user specified schema
# +
# If we explicitly set the schema, using the previously created schema object
# we effectively type the rows in the Data Frame
loadedDFWithSchema=spark \
.read \
.format("com.aerospike.spark.sql") \
.schema(schema) \
.option("aerospike.set", "salary_data").load()
loadedDFWithSchema.show(5)
# -
# ## Step 2: Load Data from Spark DataFrame into H2OFrame
#Save into an H2OFrame using a Key. A key is an entry in the H2O Key value store that maps to an object in H2O.
loadedDFWithSchema.write.format("h2o").option("key", "key_one").save()
# +
#List the current contents of the H2O cluster, you can use the h2o.ls.
h2o.ls()
h2oframe = h2o.get_frame("key_one")
# -
# ## Step 3: Create a model using SparkML
h2oframe.summary()
| spark/other_notebooks/AerospikeSparkH2ODemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Testing script for installation #
# - Ignore warning messages! You may need it for public notebooks!
# - If you don't see error messages, the test is passed!
# + [markdown] tags=[]
# ### CS5489 ###
# +
# %matplotlib inline
import IPython.core.display
# setup output image format (Chrome works best)
# jpg is better for images
IPython.core.display.set_matplotlib_formats("jpg")
import matplotlib.pyplot as plt
import matplotlib
from numpy import *
from sklearn import *
import glob
import pandas as pd
import os
import csv
import string
import skimage.io
import skimage.color
import skimage.util
import zipfile
import fnmatch
import skimage.transform
import time
# +
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Conv2D, Flatten, Dropout, Input, BatchNormalization, \
GlobalAveragePooling2D, Concatenate, Average
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers.schedules import ExponentialDecay
import zipfile
import logging
logging.basicConfig()
import struct
print(keras.__version__, tf.__version__)
# use keras backend (K) to force channels-last ordering
K.set_image_data_format('channels_last')
# -
#2.5.0
from tensorflow.keras.applications import ResNet50, InceptionV3, EfficientNetB0
#import tensorflow.keras.applications.efficientnet.preprocess_input as pi_enb0
#import tensorflow.keras.applications.resnet.preprocess_input as pi_rn50
#import tensorflow.keras.applications.inception_v3.preprocess_input as pi_inv3
# +
# compatible with v1 or v2
def tf_do_i_have_gpu():
print(tf.test.is_built_with_cuda())
if tf.__version__[0] == '1':
print(tf.test.is_gpu_available())
print(tf.test.gpu_device_name())
return tf.test.is_gpu_available()
else:
print(tf.config.list_physical_devices('GPU'))
return len(tf.config.list_physical_devices('GPU')) > 0
if tf_do_i_have_gpu():
if tf.__version__[0] == '1':
config = tf.compat.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9 # 0.6 sometimes works better for folks
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
else:
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9 # 0.6 sometimes works better for folks
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
# -
# ### CS6493 ###
# !nvidia-smi
import torch
print("PyTorch version: ", torch.__version__)
print("GPU support: ", torch.cuda.is_available())
print("Available devices count: ", torch.cuda.device_count())
import torchvision
import torchaudio
# ### CS5491 ###
# +
#import numpy as np
from scipy.spatial import Voronoi, voronoi_plot_2d
#import matplotlib.pyplot as plt
import networkx as nx
# -
#
| test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: DESI master
# language: python
# name: desi-master
# ---
# # Examining the choice of minimum $\Delta \chi^2$ with CMX Tile 70500
#
# Tile 70500 has 3 exposures of 450s from SV0. Using VI redshifts, I examine the choice of minimum $\Delta \chi^2 = 40$ vs $100$
# by looking at the true and false positive rates and looking at their spectra and redrock fits.
#
#
# Details of the exposures can be found in [TS SV0](https://desi.lbl.gov/trac/wiki/TargetSelectionWG/SV0)
# +
import os
import glob
import h5py
import fitsio
import numpy as np
from astropy.table import Table
from redrock import templates
from desitarget.cmx import cmx_targetmask
# -
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
# +
# read VI table
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/Andes_reinspection/BGS', 'Truth_table_Andes_reinspection_BGS_70500_20200303_v1.csv')
fvi = os.path.join(fvi)
vi = Table.read(fvi, format='ascii.csv', include_names=['TARGETID', 'Redrock_z', 'best z', 'best quality', 'DELTACHI2', 'ZWARN', 'FLUX_R', 'TARGET_RA', 'TARGET_DEC'])
# target id
vi_id = vi['TARGETID']
vi_id = np.array(vi_id).astype(int)
# redrock redshift
zrr_vi = np.array(vi['Redrock_z'])
dchi2_vi = np.array(vi['DELTACHI2']) # delta chi2 from redrock
zwarn_vi = np.array(vi['ZWARN']) # zwarn flag from redrock
# visually inspected redshift
ztrue = np.array(vi['best z'])
# quality flag
qa_flag = np.array(vi['best quality'])
rflux_vi = np.array(vi['FLUX_R']) # r band flux
vi_ra = np.array(vi['TARGET_RA']) # RA
vi_dec = np.array(vi['TARGET_DEC']) # dec
# -
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
tile, date = 70500, 20200303
# +
def zsuccess_rate(prop, zsuccess_cond, range=None, nbins=20, bin_min=2):
''' measure the redshift success rate along with property `prop`
:params prop:
array of properties (i.e. Legacy r-band magnitude)
:params zsuccess_cond:
boolean array indicating redshift success
:params range: (default: None)
range of the `prop`
:params nbins: (default: 20)
number of bins to divide `prop` by
:params bin_min: (default: 2)
minimum number of objects in bin to exlcude it
:return wmean:
weighted mean of `prop` in the bins
:return e1:
redshift success rate in the bins
:return ee1:
simple poisson error on the success rate
'''
h0, bins = np.histogram(prop, bins=nbins, range=range)
hv, _ = np.histogram(prop, bins=bins, weights=prop)
h1, _ = np.histogram(prop[zsuccess_cond], bins=bins)
good = h0 > bin_min
hv = hv[good]
h0 = h0[good]
h1 = h1[good]
wmean = hv / h0 # weighted mean
rate = h1.astype("float") / (h0.astype('float') + (h0==0))
e_rate = np.sqrt(rate * (1 - rate)) / np.sqrt(h0.astype('float') + (h0 == 0))
return wmean, rate, e_rate
def zsuccess(zrr, ztrue, zwarn, deltachi2=None, min_deltachi2=9.):
''' apply redshift success crition
|z_redrock - z_true|/(1+z_true) < 0.003 and ZWARN flag = 0
:params zrr:
redrock best-fit redshift
:params ztrue:
true redshift
:params zwarn:
zwarn flag value
:return crit:
boolean array indiciate which redshifts were successfully
measured by redrock
'''
dz_1pz = np.abs(ztrue - zrr)/(1.+ztrue)
if deltachi2 is None:
crit = (dz_1pz < 0.003) & (zwarn == 0)
else:
crit = (dz_1pz < 0.003) & (zwarn == 0) & (deltachi2 > min_deltachi2)
return crit
# -
# ## $z$ success rate with $\Delta \chi^2=40$ vs $100$
# Lets first see how much the redshift success rate is impacted by the choice of $\Delta \chi^2$
# +
expids = np.unique([int(fcoadd.split('-')[-1].replace('.fits', '')) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-%i-%i-*.fits' % (tile, date)))])
fig = plt.figure(figsize=(15,5*(len(expids)//3)))
for i_exp, expid in enumerate(expids):
print('--- %i ---' % expid)
petals = np.unique([int(fcoadd.split('-')[-2]) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-%i-%i-*%i.fits' % (tile, date, expid)))])
# compile coadds from the petals
n_bgs = 0
ids, fluxes, rmags, rfibs, z_rrs, zwarns, chi2s, dchi2s = [], [], [], [], [], [], [], []
for petal in petals:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tile, date, petal, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
# read zbest files
f_zbest = f_coadd.replace('coadd-', 'zbest-')
zbest = fitsio.read(f_zbest)
# only keep BGS targets
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
# remove galaxies with no flux...
gal_cut = np.ones(len(coadd['CMX_TARGET'])).astype(bool)#is_BGS & (np.sum(coadd_flux, axis=1) != 0)
n_bgs += np.sum(gal_cut)
fluxes.append(coadd_flux[gal_cut,:])
ids.append(coadd['TARGETID'][gal_cut])
rmags.append(22.5 - 2.5 * np.log10(coadd['FLUX_R'][gal_cut]))
rfibs.append(22.5 - 2.5 * np.log10(coadd['FIBERFLUX_R'][gal_cut]))
z_rrs.append(zbest['Z'][gal_cut])
zwarns.append(zbest['ZWARN'][gal_cut])
chi2s.append(zbest['CHI2'][gal_cut])
dchi2s.append(zbest['DELTACHI2'][gal_cut])
ids = np.concatenate(ids)
# match VI targets to coadd targets
_, m_vi, _m_coadd = np.intersect1d(vi_id, ids, return_indices=True)
isort = np.argsort(m_vi)
m_coadd = _m_coadd[isort]
assert np.array_equal(m_vi[isort], np.arange(len(vi_id)))
print(' %i of %i BGS targets have matches to VI' % (len(m_vi), n_bgs))
assert np.array_equal(vi_id, ids[m_coadd])
fluxes = np.concatenate(fluxes, axis=0)[m_coadd]
rmags = np.concatenate(rmags)[m_coadd]
rfibs = np.concatenate(rfibs)[m_coadd]
z_rrs = np.concatenate(z_rrs)[m_coadd]
zwarns = np.concatenate(zwarns)[m_coadd]
chi2s = np.concatenate(chi2s)[m_coadd]
dchi2s = np.concatenate(dchi2s)[m_coadd]
zs_dchi2_40 = zsuccess(z_rrs, ztrue, zwarns, deltachi2=dchi2s, min_deltachi2=40)
zs_dchi2_100 = zsuccess(z_rrs, ztrue, zwarns, deltachi2=dchi2s, min_deltachi2=100)
print(' %i successful r < 20 redshifts delta chi2=40' % np.sum(zs_dchi2_40 & (rmags < 20.)))
print(' %i successful r < 20 redshifts delta chi2=100' % np.sum(zs_dchi2_100 & (rmags < 20.)))
sub = fig.add_subplot(len(expids)//3,3,i_exp+1)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = zsuccess_rate(rmags, zs_dchi2_40, range=[15,22], nbins=28, bin_min=10)
# write out to file
np.savetxt(
os.path.join(dir_coadd, 'vi_zsuccess.dchi2_40.coadd-%i-%i-%i.txt' % (tile, date, expid)),
np.array([wmean, rate, err_rate]).T)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label=r'$\Delta \chi^2 = 40$')
wmean, rate, err_rate = zsuccess_rate(rmags, zs_dchi2_100, range=[15,22], nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label=r'$\Delta \chi^2 = 100$')
sub.set_xlabel(r'$r$ magnitude', fontsize=20)
sub.set_xlim(16., 20.5)
sub.text(0.95, 0.95, 'exposure %i' % expid, transform=sub.transAxes, fontsize=20, ha='right', va='top')
if i_exp == 0:
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
# -
# There's a **significant** impact, especially for faint galaxies: ~5% at r~19.5, ~10% at r~20.
# +
expids = np.unique([int(fcoadd.split('-')[-1].replace('.fits', '')) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-%i-%i-*.fits' % (tile, date)))])
fig = plt.figure(figsize=(15,5*(len(expids)//3)))
for i_exp, expid in enumerate(expids):
print('--- %i ---' % expid)
petals = np.unique([int(fcoadd.split('-')[-2]) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-%i-%i-*%i.fits' % (tile, date, expid)))])
# compile coadds from the petals
n_bgs = 0
ids, fluxes, rmags, rfibs, z_rrs, zwarns, chi2s, dchi2s = [], [], [], [], [], [], [], []
for petal in petals:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tile, date, petal, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
# read zbest files
f_zbest = f_coadd.replace('coadd-', 'zbest-')
zbest = fitsio.read(f_zbest)
# only keep BGS targets
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
# remove galaxies with no flux...
gal_cut = np.ones(len(coadd['CMX_TARGET'])).astype(bool)#is_BGS & (np.sum(coadd_flux, axis=1) != 0)
n_bgs += np.sum(gal_cut)
fluxes.append(coadd_flux[gal_cut,:])
ids.append(coadd['TARGETID'][gal_cut])
rmags.append(22.5 - 2.5 * np.log10(coadd['FLUX_R'][gal_cut]))
rfibs.append(22.5 - 2.5 * np.log10(coadd['FIBERFLUX_R'][gal_cut]))
z_rrs.append(zbest['Z'][gal_cut])
zwarns.append(zbest['ZWARN'][gal_cut])
chi2s.append(zbest['CHI2'][gal_cut])
dchi2s.append(zbest['DELTACHI2'][gal_cut])
ids = np.concatenate(ids)
# match VI targets to coadd targets
_, m_vi, _m_coadd = np.intersect1d(vi_id, ids, return_indices=True)
isort = np.argsort(m_vi)
m_coadd = _m_coadd[isort]
assert np.array_equal(m_vi[isort], np.arange(len(vi_id)))
print(' %i of %i BGS targets have matches to VI' % (len(m_vi), n_bgs))
assert np.array_equal(vi_id, ids[m_coadd])
fluxes = np.concatenate(fluxes, axis=0)[m_coadd]
rmags = np.concatenate(rmags)[m_coadd]
rfibs = np.concatenate(rfibs)[m_coadd]
z_rrs = np.concatenate(z_rrs)[m_coadd]
zwarns = np.concatenate(zwarns)[m_coadd]
chi2s = np.concatenate(chi2s)[m_coadd]
dchi2s = np.concatenate(dchi2s)[m_coadd]
zs_dchi2_40 = zsuccess(z_rrs, ztrue, zwarns, deltachi2=dchi2s, min_deltachi2=40)
zs_dchi2_100 = zsuccess(z_rrs, ztrue, zwarns, deltachi2=dchi2s, min_deltachi2=100)
print(' %i successful r_fib < 21 redshifts delta chi2=40' % np.sum(zs_dchi2_40 & (rfibs < 21.)))
print(' %i successful r_fib < 21 redshifts delta chi2=100' % np.sum(zs_dchi2_100 & (rmags < 21.)))
sub = fig.add_subplot(len(expids)//3,3,i_exp+1)
sub.plot([16, 25], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = zsuccess_rate(rfibs, zs_dchi2_40, range=[17,24], nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label=r'$\Delta \chi^2 = 40$')
wmean, rate, err_rate = zsuccess_rate(rfibs, zs_dchi2_100, range=[17,24], nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label=r'$\Delta \chi^2 = 100$')
sub.set_xlabel(r'$r$ fiber magnitude', fontsize=20)
sub.set_xlim(18.5, 23)
sub.text(0.95, 0.95, 'exposure %i' % expid, transform=sub.transAxes, fontsize=20, ha='right', va='top')
if i_exp == 0:
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
# -
# ## "true positives" with $40 < \Delta \chi^2 < 100$
# Spectra where redrock got the right redshifts but with $40 < \Delta \chi^2 < 100$
galaxy_templates = templates.Template('rrtemplate-galaxy.fits')
# +
fig = plt.figure(figsize=(15,5))
expids = np.unique([int(fcoadd.split('-')[-1].replace('.fits', '')) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-66003-20200315-*.fits'))])
for i_exp, expid in enumerate(expids):
print('--- %i ---' % expid)
petals = np.unique([int(fcoadd.split('-')[-2]) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-66003-20200315-*%i.fits' % expid))])
# compile coadds from the petals
n_bgs = 0
fluxes, ids, rmags, rfibs, z_rrs, zwarns, zcoeffs, chi2s, dchi2s = [], [], [], [], [], [], [], [], []
for petal in petals:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-66003-20200315-%i-%s.fits' % (petal, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
# read zbest files
f_zbest = f_coadd.replace('coadd-', 'zbest-')
zbest = fitsio.read(f_zbest)
# only keep BGS targets
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
# remove galaxies with no flux...
gal_cut = np.ones(len(coadd['CMX_TARGET'])).astype(bool)#is_BGS & (np.sum(coadd_flux, axis=1) != 0)
n_bgs += np.sum(gal_cut)
fluxes.append(coadd_flux[gal_cut])
ids.append(coadd['TARGETID'][gal_cut])
rmags.append(22.5 - 2.5 * np.log10(coadd['FLUX_R'][gal_cut]))
rfibs.append(22.5 - 2.5 * np.log10(coadd['FIBERFLUX_R'][gal_cut]))
z_rrs.append(zbest['Z'][gal_cut])
zwarns.append(zbest['ZWARN'][gal_cut])
zcoeffs.append(zbest['COEFF'][gal_cut])
chi2s.append(zbest['CHI2'][gal_cut])
dchi2s.append(zbest['DELTACHI2'][gal_cut])
ids = np.concatenate(ids)
# match VI targets to coadd targets
_, m_vi, _m_coadd = np.intersect1d(vi_id, ids, return_indices=True)
isort = np.argsort(m_vi)
m_coadd = _m_coadd[isort]
assert np.array_equal(m_vi[isort], np.arange(len(vi_id)))
print(' %i of %i BGS targets have matches to VI' % (len(m_vi), n_bgs))
assert np.array_equal(vi_id, ids[m_coadd])
fluxes = np.concatenate(fluxes, axis=0)[m_coadd]
rmags = np.concatenate(rmags)[m_coadd]
rfibs = np.concatenate(rfibs)[m_coadd]
z_rrs = np.concatenate(z_rrs)[m_coadd]
zwarns = np.concatenate(zwarns)[m_coadd]
zcoeffs = np.concatenate(zcoeffs, axis=0)[m_coadd]
chi2s = np.concatenate(chi2s)[m_coadd]
dchi2s = np.concatenate(dchi2s)[m_coadd]
dz_1pz = np.abs(ztrue - z_rrs)/(1.+ztrue)
true_positive = (dz_1pz < 0.003) & (zwarns == 0) & (dchi2s > 40.) & (dchi2s < 100.) & (rmags < 20.)
n_tp = np.sum(true_positive)
n_pass = np.sum((zwarns == 0) & (dchi2s > 40.) & (dchi2s < 100.) & (rmags < 20.))
print(' %i of %i r < 20. galaxies with ZWARN=0 and 40 < delta chi^2 < 100 are true positives' % (np.sum(true_positive), n_pass))
bestfit_template = galaxy_templates.eval(zcoeffs[true_positive,:][0], coadd_wave, z_rrs[true_positive][0]) * (1+z_rrs[true_positive][0])
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
sub.plot(coadd_wave, fluxes[true_positive,:][0])
sub.plot(coadd_wave, bestfit_template, c='k', ls=':')
sub.text(0.95, 0.95, r'$r=%.2f$' % rmags[true_positive][0], transform=sub.transAxes, ha='right', va='top', fontsize=20)
sub.set_xlim(coadd_wave.min(), coadd_wave.max())
sub.set_ylim(-1., 10)
# -
# ## "false positives" with $40 < \Delta \chi^2 < 100$
# Spectra that redrock *thinks* it got the right redshifts (ZWARN = 0 and $\Delta \chi^2 > 40$) but was wrong
# +
fig = plt.figure(figsize=(15,5))
expids = np.unique([int(fcoadd.split('-')[-1].replace('.fits', '')) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-66003-20200315-*.fits'))])
for i_exp, expid in enumerate(expids):
print('--- %i ---' % expid)
petals = np.unique([int(fcoadd.split('-')[-2]) for fcoadd in glob.glob(os.path.join(dir_coadd, 'coadd-66003-20200315-*%i.fits' % expid))])
# compile coadds from the petals
n_bgs = 0
fluxes, ids, rmags, rfibs, z_rrs, zwarns, zcoeffs, chi2s, dchi2s = [], [], [], [], [], [], [], [], []
for petal in petals:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-66003-20200315-%i-%s.fits' % (petal, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
# read zbest files
f_zbest = f_coadd.replace('coadd-', 'zbest-')
zbest = fitsio.read(f_zbest)
# only keep BGS targets
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
# remove galaxies with no flux...
gal_cut = np.ones(len(coadd['CMX_TARGET'])).astype(bool)#is_BGS & (np.sum(coadd_flux, axis=1) != 0)
n_bgs += np.sum(gal_cut)
fluxes.append(coadd_flux[gal_cut])
ids.append(coadd['TARGETID'][gal_cut])
rmags.append(22.5 - 2.5 * np.log10(coadd['FLUX_R'][gal_cut]))
rfibs.append(22.5 - 2.5 * np.log10(coadd['FIBERFLUX_R'][gal_cut]))
z_rrs.append(zbest['Z'][gal_cut])
zwarns.append(zbest['ZWARN'][gal_cut])
zcoeffs.append(zbest['COEFF'][gal_cut])
chi2s.append(zbest['CHI2'][gal_cut])
dchi2s.append(zbest['DELTACHI2'][gal_cut])
ids = np.concatenate(ids)
# match VI targets to coadd targets
_, m_vi, _m_coadd = np.intersect1d(vi_id, ids, return_indices=True)
isort = np.argsort(m_vi)
m_coadd = _m_coadd[isort]
assert np.array_equal(m_vi[isort], np.arange(len(vi_id)))
print(' %i of %i BGS targets have matches to VI' % (len(m_vi), n_bgs))
assert np.array_equal(vi_id, ids[m_coadd])
fluxes = np.concatenate(fluxes, axis=0)[m_coadd]
rmags = np.concatenate(rmags)[m_coadd]
rfibs = np.concatenate(rfibs)[m_coadd]
z_rrs = np.concatenate(z_rrs)[m_coadd]
zwarns = np.concatenate(zwarns)[m_coadd]
zcoeffs = np.concatenate(zcoeffs, axis=0)[m_coadd]
chi2s = np.concatenate(chi2s)[m_coadd]
dchi2s = np.concatenate(dchi2s)[m_coadd]
dz_1pz = np.abs(ztrue - z_rrs)/(1.+ztrue)
false_positive = (dz_1pz >= 0.003) & (zwarns == 0) & (dchi2s > 40.) & (dchi2s < 100.) & (rmags < 20.)
n_fp = np.sum(false_positive)
n_pass = np.sum((zwarns == 0) & (dchi2s > 40.) & (dchi2s < 100.) & (rmags < 20.))
print(' %i of %i r < 20. galaxies with ZWARN=0 and 40 < delta chi^2 < 100 are false positives' % (np.sum(false_positive), n_pass))
if n_fp > 0:
bestfit_template = galaxy_templates.eval(zcoeffs[false_positive,:][0], coadd_wave, z_rrs[false_positive][0]) * (1+z_rrs[false_positive][0])
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
sub.plot(coadd_wave, fluxes[false_positive,:][0])
sub.plot(coadd_wave, bestfit_template, c='k', ls=':')
sub.text(0.95, 0.95, r'$r=%.2f$' % rmags[false_positive][0], transform=sub.transAxes, ha='right', va='top', fontsize=20)
sub.set_xlim(coadd_wave.min(), coadd_wave.max())
sub.set_ylim(-1., 25)
# -
# Out of 170 spectra, 4 false positives
| notebook/cmx/cmx_dchi2_round2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch1.7(py3.8.5)
# language: python
# name: torch1.7
# ---
# +
import sys
sys.path.append("/home/ly/workspace/mmsa")
seed = 1000003
import numpy as np
import torch
from torch import nn
from torch import optim
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from models.mvsa_text_cnn import *
from utils.train import *
from typing import *
from collections import Counter
from utils.load_mvsa import *
from utils.dataset import *
from utils.train import *
config
# -
config["use_img"] = False
config
# +
# %%time
train_set, valid_set, test_set= load_glove_data(config)
batch_size = 64
workers = 4
train_loader, valid_loader, test_loader = get_loader(batch_size, workers, get_collate_fn(config), train_set, valid_set, test_set)
model = Model(config).cuda()
loss = nn.CrossEntropyLoss()
print(get_parameter_number(model), loss)
_interval = 5
lr = 1e-3
epoches = 50
stoping_step = 10
optimizer = get_regal_optimizer(model, optim.AdamW, lr)
viz = get_Visdom()
batch_loss_drawer = VisdomScalar(viz, f"batch_loss interval:{_interval}")
epoch_loss_drawer = VisdomScalar(viz, f"Train and valid loss", 2)
acc_drawer = VisdomScalar(viz, "Train and valid accuracy", 2)
text_writer = VisdomTextWriter(viz, "Training")
batch_loss = []
train_loss = []
valid_loss = []
train_acc = []
valid_acc = []
res, model = train_visdom_v2(model, optimizer, loss, viz, train_loader,
valid_loader, epoches, batch_loss, batch_loss_drawer,
train_loss, valid_loss, epoch_loss_drawer,
train_acc, valid_acc, acc_drawer, text_writer,
_interval=_interval, early_stop=stoping_step)
# -
eval_model(model, test_loader, loss)
# 
| new/mvsa-text-cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # O que faremos aqui
# Neste notebook, iremos preparar as predições para serem submetidas ao site da competição. Lembrando que a estratégia que nós usamos foi escolher, para cada tarefa (campo a ser extraído) o modelo com melhor valor de word f1 nos dados de validação. Os melhores modelos escolhidos foram então treinados utilizando todos dados rotulados disponíveis (dados de treino e validação), com o número de epochs atingidos no checkpoint.
# Nos arquivos de submissão na competição, a resposta de cada campo será dada pelo modelo escolhido na fase anterior. Na tabela abaixo temos os modelos escolhidos para cada tarefa:
#
# | Task | Initial finetuning experiment id / name | Final finetuning experiment id / name|
# | :-: | :-: | :-: |
# | extract_company | [FIN-27 / t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-27/details) | [FIN-51 / FIN-27-t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-51/details)
# | extract_total | [FIN-38 / t5-base_extract_total_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-38/details) | [FIN-53 / all_labelled_data-FIN-38-t5-base_extract_total_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-53/details)
# | extract_address | [FIN-41 / t5-base_extract_address_newlines_as_pipes](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-41/details) | [FIN-50 / t5-all_labelled_data-FIN-41-t5-base_extract_address_newlines_as_pipes](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-50/details)
# | extract_date | [FIN-28 / t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-28/details) | [FIN-52 / all_labelled_data-FIN-28-t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-52/details)
# **extract_company**
#
# [FIN-27 / t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-27/details)
#
# [FIN-51 / FIN-27-t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-51/details)
#
# **extract_total**
#
# [FIN-38 / t5-base_extract_total_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-38/details)
#
# [FIN-53 / all_labelled_data-FIN-38-t5-base_extract_total_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-53/details)
#
# **extract_address**
#
# [FIN-41 / t5-base_extract_address_newlines_as_pipes](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-41/details)
#
# [FIN-50 / t5-all_labelled_data-FIN-41-t5-base_extract_address_newlines_as_pipes](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-50/details)
#
# **extract_date**
#
# [FIN-28 / t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-28/details)
#
# [FIN-52 / all_labelled_data-FIN-28-t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-52/details)
import os
from src.data.sroie import get_all_keynames_from_dir, load_labels
from src.evaluation.sroie_eval_utils import extract_base_keyname
from src.evaluation.sroie_eval_utils import save_predictions_in_dir
# +
map_field_to_initial_exp = {
'company': 'FIN-27',
'total': 'FIN-38',
'address': 'FIN-41',
'date': 'FIN-28'
}
map_initial_exp_to_final_exp = {
'FIN-27': 'FIN-51',
'FIN-38': 'FIN-53',
'FIN-41': 'FIN-50',
'FIN-28': 'FIN-52'
}
map_field_to_final_exp = {
k: map_initial_exp_to_final_exp[v]
for k,v in map_field_to_initial_exp.items()
}
map_field_to_final_exp
# +
predictions_basedir = "/home/marcospiau/final_project_ia376j/data/sroie_receipt_dataset/predictions"
def get_keynames_from_experiment(
experiment_id,
partition,
predictions_basedir=predictions_basedir
):
keynames_path = os.path.join(predictions_basedir, experiment_id, partition)
return get_all_keynames_from_dir(keynames_path)
def get_labels_dict(keynames):
"""Loads labels (or predictions) for `keynames`.txt.
"""
return {extract_base_keyname(x): load_labels(x) for x in keynames}
def get_predictions_for_experiment(exp_id, field):
all_preds = get_labels_dict(get_keynames_from_experiment(exp_id, ''))
filter_preds = {k: {field: v[field]} for k,v in all_preds.items()}
return filter_preds
def merge_predictions_for_all_experiments(map_field_to_exp):
# dict, where keys are the keynames, and the values are {field: prediction}
broadcast_pred = [
get_predictions_for_experiment(exp_id, field)
for field, exp_id in map_field_to_exp.items()
]
# keynames must be the same in all experiments
keynames_set_list = [set(x.keys()) for x in broadcast_pred]
keynames = keynames_set_list[0]
assert all(keynames == x for x in keynames_set_list[1:])
final_preds = {k: {} for k in keynames}
for field_pred in broadcast_pred:
for keyname_pred in keynames:
final_preds[keyname_pred].update(field_pred[keyname_pred])
return final_preds
# + jupyter={"outputs_hidden": true}
final_test_predictions = merge_predictions_for_all_experiments(
map_field_to_final_exp
)
final_test_predictions
# -
submissions_path = "/home/marcospiau/final_project_ia376j/data/sroie_receipt_dataset/submissions/v1_checkpoint_selection_by_task"
save_predictions_in_dir(final_test_predictions, submissions_path)
# Não está registrado aqui, mas, a critério de teste, também vou tentar uma submissão utilizando o melhor modelo único ([FIN-28 / t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-28/details) | [FIN-52 / all_labelled_data-FIN-28-t5-base_all_tasks_concat_newlines_as_spaces](https://ui.neptune.ai/marcospiau/final-project-ia376j-1/e/FIN-52/details)), que por acaso também é o mesmo modelo que foi o melhor para o campo `date`.
os.listdir(f'{submissions_path}/..')
| notebooks/sroie_t5_ocr_baseline_prepare_competition_submission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from os import path
from CSVUtils import *
import ta
import matplotlib.pyplot as plt
import seaborn as sn
import calendar
from pprint import pprint
# +
DIR = "./input/yahoo"
nameList = ["^GSPC", "^GDAXI", "^HSI"]
df_list = []
startDate = pd.to_datetime("2015-01-01")
endDate = pd.to_datetime("2019-12-31")
for name in nameList:
df = csv2df(csv_path=DIR, csv_name=nameList[2]+".csv", source="yahoo")
df = df.dropna()
df_list.append(df)
# +
intersect_dates = df_list[0]['Date']
for df in df_list[1:]:
df.dropna(inplace = True)
intersect_dates = np.intersect1d(intersect_dates, df['Date'])
# Remove all NAN in the df
for i, df in enumerate(df_list):
df.dropna(inplace = True)
df = df[df['Date'].isin(intersect_dates)].reset_index(drop=True)
df = df[(df['Date'] >= startDate) & (df['Date'] <= endDate)]
df_list[i] = df.reset_index(drop=True)
# -
annual_rebalance_date = ["12-31"]
semiAnnual_rebalance_date = ["06-30", "12-31"]
quarter_rebalance_date = ["03-31", "06-30", "09-30", "12-31"]
monthly_rebalance_date = ["01-31", "02-28", "03-31", "04-30", "05-31", "06-30", "07-31", "08-31", "09-30", "10-31", "11-30", "12-31"]
# +
init_net_worth = 100000
avg_net_worth = init_net_worth / (len(df_list)+1)
asset_list = nameList+['CASH']
init_price_list = [df.Price[0] for df in df_list] + [1]
init_num_list = [avg_net_worth / price for price in init_price_list]
status_list = []
for i, asset in enumerate(asset_list):
status = {
'Asset': asset_list[i],
'Number': init_num_list[i],
'Price': init_price_list[i],
'Net Value': avg_net_worth,
'Delta Number': init_num_list[i],
}
status_list.append(status)
final_result = []
final_result.append({
'Date': df_list[0]['Date'][0],
'Record': status_list
})
# -
def nearest(items, pivot):
return min(items, key=lambda x: abs(x - pivot))
start_year = 2015
final_year = 2019
dateList_name = ['annual_rebalance', 'semiAnnual_rebalance', 'quarter_rebalance', 'monthly_rebalance']
for j, balance_freq_list in enumerate(
[annual_rebalance_date, semiAnnual_rebalance_date, quarter_rebalance_date, monthly_rebalance_date]):
final_result = []
final_result.append({
'Date': df_list[0]['Date'][0],
'Record': status_list
})
prev_num_list = init_num_list
net_value_list = []
for year in range(start_year, final_year+1):
for date_str in balance_freq_list:
rebalance_date = pd.to_datetime(str(year)+'-'+date_str)
price_list = []
nearest_date_after = nearest(df_list[0]['Date'][df_list[0]['Date']>=rebalance_date], rebalance_date)
for df in df_list:
price_list.append(df.Price[df.Date==nearest_date_after].values[0])
price_list.append(1)
current_net_worth = sum(np.array(prev_num_list)*np.array(price_list))
current_num_list = (current_net_worth/len(asset_list))/np.array(price_list)
status_list = []
for i, asset in enumerate(asset_list):
status = {
'Asset': asset_list[i],
'Number': current_num_list[i],
'Price': price_list[i],
'Net Value': current_num_list[i]*price_list[i],
'Delta Number': current_num_list[i]-prev_num_list[i],
}
status_list.append(status)
prev_num_list = current_num_list
final_result.append({
'Date': nearest_date_after,
'Record': status_list
})
net_value_list.append({
'Date': nearest_date_after,
'Net Value': current_net_worth
})
pd.DataFrame(net_value_list).plot('Date', 'Net Value', title=dateList_name[j])
(final_result[0]['Record'][0]['Asset'], final_result[0]['Record'][0]['Net Value'])
plot_dict_list = []
for record in final_result:
plot_dict_list.append({
'Date': record['Date'],
'Asset Net Value': [tmp['Net Value'] for tmp in record['Record']],
})
pd.DataFrame(plot_dict_list)
plot_dict_list
final_result
| 0225 - BuyAndHold with Balancing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Install package
# %pip install --upgrade git+git://github.com/rahulspsec/portfoliotools.git
# +
from portfoliotools.screener.mutual_fund_screener import MutualFundScreener
import pandas as pd
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
import plotly.graph_objects as go
from plotly.subplots import make_subplots
warnings.filterwarnings("ignore")
pd.options.display.max_columns = None
pd.options.display.max_rows = None
register_matplotlib_converters()
# %matplotlib inline
sns.set()
obj = MutualFundScreener()
# -
# ### Search Funds
schemes = obj.searchSchemes(includes = ('ICICI', 'GROWTH', 'DIRECT', 'Blue'), excludes = ('Dividend', 'Sensex', 'Debt'))
schemes = obj.searchSchemes(isin_list=['inf846k01ew2', 'INF843K01IZ3'])
schemes
# ### Fund Details
obj.get_scheme_details(codes=schemes.index.tolist())
# +
### Historical NAV
nav = obj.get_historical_nav(codes = schemes.index.tolist())
fig = make_subplots(rows = 1, cols = 1)
for col in nav.columns.tolist():
fig.add_trace(go.Scatter(x=nav.index, y=nav[col], name=col),row = 1, col = 1)
fig.show()
# -
| jupyter notebooks/User Guide/Mutual Fund Reports.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="n_5oRe0SXilM"
# # Data Science Fundamentals 5
#
# Basic introduction on how to perform typical machine learning tasks with Python.
#
# Prepared by <NAME> & <NAME>,
# Science IT Support, University Of Bern, 2020
#
# This work is licensed under <a href="https://creativecommons.org/share-your-work/public-domain/cc0/">CC0</a>.
#
# # Solutions to Part 1.
# + colab_type="code" id="hVJn0ilgOS8F" colab={}
from sklearn import linear_model
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn import metrics
from matplotlib import pyplot as plt
import numpy as np
import os
from imageio import imread
import pandas as pd
from time import time as timer
import tensorflow as tf
# %matplotlib inline
from matplotlib import animation
from IPython.display import HTML
# + colab_type="code" id="8Y7aMevU3Ug8" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="eb38f6f4-848a-4c90-8c55-4b8b483efeb8"
if not os.path.exists('data'):
path = os.path.abspath('.')+'/colab_material.tgz'
tf.keras.utils.get_file(path, 'https://github.com/neworldemancer/DSF5/raw/master/colab_material.tgz')
# !tar -xvzf colab_material.tgz > /dev/null 2>&1
# + [markdown] colab_type="text" id="pclZR6uFklf_"
# # Datasets
# + [markdown] colab_type="text" id="s_wxOrdWko8W"
# In this course we will use several synthetic and real-world datasets to ilustrate the behavior of the models and excercise our skills.
# + [markdown] colab_type="text" id="8UQgU5I-lEll"
# ## 1. Synthetic linear
# + colab_type="code" id="jGfWOWRjlWPa" colab={}
def get_linear(n_d=1, n_points=10, w=None, b=None, sigma=5):
x = np.random.uniform(0, 10, size=(n_points, n_d))
w = w or np.random.uniform(0.1, 10, n_d)
b = b or np.random.uniform(-10, 10)
y = np.dot(x, w) + b + np.random.normal(0, sigma, size=n_points)
print('true w =', w, '; b =', b)
return x, y
# + colab_type="code" id="5RLYxGy_nBZG" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="9726f29e-d20d-4f8f-e361-172fa7897464"
x, y = get_linear(n_d=1, sigma=0)
plt.plot(x[:, 0], y, '*')
# + colab_type="code" id="10ODDOp4nX4S" colab={"base_uri": "https://localhost:8080/", "height": 501} outputId="1440792b-9b45-4917-ac20-9153ab8085d0"
n_d = 2
x, y = get_linear(n_d=n_d, n_points=100)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[:,0], x[:,1], y, marker='x', color='b',s=40)
# + [markdown] colab_type="text" id="FJ5rjq7fIe8Q"
# ## 2. House prices
# + [markdown] colab_type="text" id="A-45usskInlD"
# Subset of the the hous pricess kaggle dataset: https://www.kaggle.com/c/house-prices-advanced-regression-techniques
# + colab_type="code" id="dVv2ID96IyN0" colab={}
def house_prices_dataset(return_df=False, price_max=400000, area_max=40000):
path = 'data/AmesHousing.csv'
df = pd.read_csv(path, na_values=('NaN', ''), keep_default_na=False)
rename_dict = {k:k.replace(' ', '').replace('/', '') for k in df.keys()}
df.rename(columns=rename_dict, inplace=True)
useful_fields = ['LotArea',
'Utilities', 'OverallQual', 'OverallCond',
'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond',
'HeatingQC', 'CentralAir', 'Electrical',
'1stFlrSF', '2ndFlrSF','GrLivArea',
'FullBath', 'HalfBath',
'BedroomAbvGr', 'KitchenAbvGr', 'KitchenQual', 'TotRmsAbvGrd',
'Functional','PoolArea',
'YrSold', 'MoSold'
]
target_field = 'SalePrice'
df.dropna(axis=0, subset=useful_fields+[target_field], inplace=True)
cleanup_nums = {'Street': {'Grvl': 0, 'Pave': 1},
'LotFrontage': {'NA':0},
'Alley': {'NA':0, 'Grvl': 1, 'Pave': 2},
'LotShape': {'IR3':0, 'IR2': 1, 'IR1': 2, 'Reg':3},
'Utilities': {'ELO':0, 'NoSeWa': 1, 'NoSewr': 2, 'AllPub': 3},
'LandSlope': {'Sev':0, 'Mod': 1, 'Gtl': 3},
'ExterQual': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'ExterCond': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'BsmtQual': {'NA':0, 'Po':1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex':5},
'BsmtCond': {'NA':0, 'Po':1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex':5},
'BsmtExposure':{'NA':0, 'No':1, 'Mn': 2, 'Av': 3, 'Gd': 4},
'BsmtFinType1':{'NA':0, 'Unf':1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ':5, 'GLQ':6},
'BsmtFinType2':{'NA':0, 'Unf':1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ':5, 'GLQ':6},
'HeatingQC': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'CentralAir': {'N':0, 'Y': 1},
'Electrical': {'':0, 'NA':0, 'Mix':1, 'FuseP':2, 'FuseF': 3, 'FuseA': 4, 'SBrkr': 5},
'KitchenQual': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'Functional': {'Sal':0, 'Sev':1, 'Maj2': 2, 'Maj1': 3, 'Mod': 4, 'Min2':5, 'Min1':6, 'Typ':7},
'FireplaceQu': {'NA':0, 'Po':1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex':5},
'PoolQC': {'NA':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'Fence': {'NA':0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv':4},
}
df_X = df[useful_fields].copy()
df_X.replace(cleanup_nums, inplace=True) # convert continous categorial variables to numerical
df_Y = df[target_field].copy()
x = df_X.to_numpy().astype(np.float32)
y = df_Y.to_numpy().astype(np.float32)
if price_max>0:
idxs = y<price_max
x = x[idxs]
y = y[idxs]
if area_max>0:
idxs = x[:,0]<area_max
x = x[idxs]
y = y[idxs]
return (x, y, df) if return_df else (x,y)
# + colab_type="code" id="uUuJQDt9KwCs" colab={"base_uri": "https://localhost:8080/", "height": 268} outputId="6f8ef5d9-98ef-4b55-cb58-15bcee7fc244"
x, y, df = house_prices_dataset(return_df=True)
print(x.shape, y.shape)
df.head()
# + colab_type="code" id="91nj7znzMEpA" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="cd7441ea-495f-409a-9b01-560c28524bc3"
plt.plot(x[:, 0], y, '.')
plt.xlabel('area, sq.ft')
plt.ylabel('price, $');
# + [markdown] colab_type="text" id="q7CNxkPdNB4L"
# ## 3. Blobs
# + colab_type="code" id="j8wXhleONKgZ" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="11d02762-6c76-458d-a0f3-faa4c1b40096"
x, y = make_blobs(n_samples=1000, centers=[[0,0], [5,5], [10, 0]])
colors = "bry"
for i, color in enumerate(colors):
idx = y == i
plt.scatter(x[idx, 0], x[idx, 1], c=color, edgecolor='gray', s=25)
# + [markdown] colab_type="text" id="ITfbaOgfYNsq"
# ## 4. Fashion MNIST
# + [markdown] colab_type="text" id="jgzzOS7YYTru"
# `Fashion-MNIST` is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. (from https://github.com/zalandoresearch/fashion-mnist)
# + colab_type="code" id="RcV2gzmuYljJ" colab={"base_uri": "https://localhost:8080/", "height": 155} outputId="c58d826b-ec5b-41c6-9899-44df28ff5147"
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# + [markdown] colab_type="text" id="SPw6-GoPbT6U"
# Let's chech few samples:
# + colab_type="code" id="tHFd0sFHY4Li" colab={"base_uri": "https://localhost:8080/", "height": 462} outputId="655076d3-5dd2-4c14-8846-386e384532f9"
n = 3
fig, ax = plt.subplots(n, n, figsize=(2*n, 2*n))
ax = [ax_xy for ax_y in ax for ax_xy in ax_y]
for axi, im_idx in zip(ax, np.random.choice(len(train_images), n**2)):
im = train_images[im_idx]
im_class = train_labels[im_idx]
axi.imshow(im, cmap='gray')
axi.text(1, 4, f'{im_class}', color='r', size=16)
plt.tight_layout(0,0,0)
# + [markdown] colab_type="text" id="iHEA0tCLagoV"
# Each training and test example is assigned to one of the following labels:
#
# | Label | Description |
# | --- | --- |
# | 0 | T-shirt/top |
# | 1 | Trouser |
# | 2 | Pullover |
# | 3 | Dress |
# | 4 | Coat |
# | 5 | Sandal |
# | 6 | Shirt |
# | 7 | Sneaker |
# | 8 | Bag |
# | 9 | Ankle boot |
# + [markdown] colab_type="text" id="zI6s2Amob48j"
# # EXERCISE 1.
# + colab_type="code" id="LPur5e4E7pZs" colab={"base_uri": "https://localhost:8080/", "height": 400} outputId="1de1c311-4ab4-4c76-cc61-2b15a8645811"
# Solution:
x, y = house_prices_dataset()
# 1. make train/test split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# 2. fit the model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
# 3. evaluate MSE, MAD, and R2 on train and test datasets
#prediction:
y_p_train = reg.predict(x_train)
y_p_test = reg.predict(x_test)
# mse
print('train mse =', np.std(y_train - y_p_train))
print('test mse =', np.std(y_test - y_p_test))
# mse
print('train mae =', np.mean(np.abs(y_train - y_p_train)))
print('test mae =', np.mean(np.abs(y_test - y_p_test)))
# R2
print('train R2 =', reg.score(x_train, y_train))
print('test R2 =', reg.score(x_test, y_test))
# 4. plot y vs predicted y for test and train parts
plt.plot(y_train, y_p_train, 'b.', label='train')
plt.plot(y_test, y_p_test, 'r.', label='test')
plt.plot([0], [0], 'w.') # dummy to have origin
plt.xlabel('true')
plt.ylabel('predicted')
plt.gca().set_aspect('equal')
plt.legend()
# + [markdown] colab_type="text" id="AQ69XKdbZcA3"
# # EXERCISE 2.
# + colab_type="code" id="__9jcqXzZaQp" colab={}
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# + [markdown] colab_type="text" id="304Ul40adUT2"
# We will reshape 2-d images to 1-d arrays for use in scikit-learn:
# + colab_type="code" id="DtD8C8_4a7dP" colab={}
n_train = len(train_labels)
x_train = train_images.reshape((n_train, -1))
y_train = train_labels
n_test = len(test_labels)
x_test = test_images.reshape((n_test, -1))
y_test = test_labels
# + [markdown] colab_type="text" id="UJj7ofWD_Wp2"
# Now use a multinomial logistic regression classifier, and measure the accuracy:
# + colab_type="code" id="8-zcz6jfbj3n" colab={"base_uri": "https://localhost:8080/", "height": 103} outputId="4f70478b-883d-47c2-f7f3-d134a9ec0abe"
#solution
# 1. Create classifier
multi_class = 'multinomial'
clf = linear_model.LogisticRegression(solver='sag', max_iter=20,
multi_class=multi_class)
# 2. fit the model
t1 = timer()
clf.fit(x_train, y_train)
t2 = timer()
print ('training time: %.1fs'%(t2-t1))
# 3. evaluate accuracy on train and test datasets
print("training score : %.3f" % (clf.score(x_train, y_train)))
print("test score : %.3f" % (clf.score(x_test, y_test)))
| Solutions_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Accuracy: Pitfalls and Edge Cases
#
# This notebook describes OpenDP's accuracy calculations, and ways in which an analyst might be tripped up by them.
#
# ### Overview
#
# #### Accuracy vs. Confidence Intervals
#
# Each privatizing mechanism (e.g. Laplace, Gaussian) in OpenDP has an associated accuracy that is a function of alpha and
# either sigma or accuracy. Imagine you have data $D$, and you want, for some function $\phi$ to return $\phi(D)$ in a
# differentially private way -- we will call this value $\phi_{dp}(D)$. An $\alpha$-level accuracy guarantee $a$ promises
# that, over infinite runs of the privatizing mechanism on the data in question,
# $$ \phi(D) \in [\phi_{dp}(D) - a, \phi_{dp}(D) + a] $$
# with probability $1 - \alpha$.
#
# This looks very much like the traditional confidence interval, but it is important to note a major difference. In a
# canonical confidence interval, the uncertainty being represented is due to sampling error -- that is, how often will it
# be the case that $\phi(P)$ (the value of $\phi$ on the underlying population) is within some range of the realized
# $\phi(D)$.
#
# In OpenDP (and differentially private data analysis generally), there is an extra layer of uncertainty due to the noise
# added to $\phi(D)$ to produce $\phi_{dp}(D)$. OpenDP's accuracy metrics deal only with the uncertainty of $\phi_{dp}(D)$
# relative to $\phi(D)$ and not the uncertainty of $\phi(D)$ relative to $\phi(P)$.
#
# #### What is $D$?
#
# OpenDP allows for analysis of data with an unknown number of rows by resizing the data to ensure consistency with an
# estimated size
# (see the [unknown dataset size notebook](https://github.com/opendp/opendp/blob/main/python/example/unknown_dataset_size.ipynb)
# for more details). Accuracy guarantees are always relative to the preprocessed data $\tilde{D}$ and operations such as
# imputation and clipping are not factored into the accuracy.
#
# #### Synopsis
#
# Let's say an analyst releases $\phi_{dp}(D)$ and gets an accuracy guarantee of $a$ at accuracy-level $\alpha$. $D$
# is a dataset of unknown size drawn from population $P$ and will be resized to $\tilde{D}$. This suggests that over
# infinite runs of this procedure,
#
# - $\phi_{dp}(D) \in [\phi(\tilde{D}) - a, \phi(\tilde{D}) + a]$ with probability $1 - \alpha$
# - It is likely that $\phi_{dp}(D) \in [\phi(D) - a, \phi(D) + a]$ with probability $\approx 1 - \alpha$, though we
# cannot make any guarantee. For many cases (e.g. resizing the data based on $n$ obtained from a differentially private
# count and reasonable bounds on the data elements), this is likely to be approximately true. In the next section, we will
# explore some examples of cases where this statement holds to varying extents.
#
# - We cannot directly make statements about the relationship uncertainty of $\phi_{dp}(D)$ relative to $\phi(P)$.
#
# ### Accuracy Guarantees In Practice
#
# We now move to some empirical evaluations of how well our accuracy guarantees translate from $\phi(\tilde{D})$ to
# $\phi(D)$. We first consider the case where we actually know the size of the underlying data and are able to set
# plausible lower/upper bounds on `age`.
#
# + pycharm={"name": "#%%\n"}
# load libraries
import os
import numpy as np
import pandas as pd
from opendp.accuracy import laplacian_scale_to_accuracy
from opendp.meas import make_base_laplace, AllDomain, InherentNullDomain
from opendp.mod import enable_features, binary_search_param
from opendp.trans import make_split_dataframe, make_clamp, make_bounded_resize, \
make_sized_bounded_mean, make_select_column, make_cast_inherent, make_impute_constant, make_cast
enable_features("contrib")
enable_features("floating-point")
data_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000', 'data.csv')
var_names = ["age", "sex", "educ", "race", "income", "married", "pid"]
D = pd.read_csv(data_path, names=var_names)
age = D.age
D_mean_age = np.mean(age)
# This will provide the data that will be passed to the aggregator
with open(data_path, 'r') as infile:
data = infile.read()
# establish extra information for this simulation
age_bounds = (0., 100.)
n_sims = 1_000
epsilon = 1.
alpha = 0.05
D_tilde_mean_age = np.mean(np.clip(D.age, age_bounds[0], age_bounds[1]))
impute_constant = 50.
def make_mean_aggregator(data_size):
return (
# Convert data into a dataframe of string columns
make_split_dataframe(separator=",", col_names=var_names) >>
# Selects a column of df, Vec<str>
make_select_column(key="age", TOA=str) >>
# Cast the column as Vec<float>
make_cast(TIA=str, TOA=float) >>
# Impute null values
make_impute_constant(impute_constant) >>
# Clamp age values
make_clamp(bounds=age_bounds) >>
# Resize the dataset to length `data_size`.
# If there are fewer than `data_size` rows in the data, fill with a constant.
# If there are more than `data_size` rows in the data, only keep `data_size` rows
make_bounded_resize(size=data_size, bounds=age_bounds, constant=impute_constant) >>
make_sized_bounded_mean(size=data_size, bounds=age_bounds)
)
# + pycharm={"name": "#%%\n"}
data_size = 1_000
mean_aggregator = make_mean_aggregator(data_size)
scale = binary_search_param(lambda s: mean_aggregator >> make_base_laplace(s), 1, epsilon)
measurement = mean_aggregator >> make_base_laplace(scale)
releases = [measurement(data) for _ in range(n_sims)]
accuracy = laplacian_scale_to_accuracy(scale, alpha)
print('Accuracy interval (with accuracy value {0}) contains the true mean on D_tilde with probability {1}'.format(
round(accuracy, 4),
np.mean([(D_tilde_mean_age >= val - accuracy) & (D_tilde_mean_age <= val + accuracy) for val in releases])))
print('Accuracy interval (with accuracy value {0}) contains the true mean on D with probability {1}'.format(
round(accuracy, 4),
np.mean([(D_mean_age >= val - accuracy) & (D_mean_age <= val + accuracy) for val in releases])))
# + [markdown] pycharm={"name": "#%% md\n"}
# This performance is as expected. $D$ and $\tilde{D}$ are actually the exact same data (the maximum age in the raw data
# is 93, so our clamp to $[0, 100]$ does not change any values, and we know the correct $n$), so our theoretical
# guarantees on $\tilde{D}$ map exactly to guarantees on $D$.
#
# We now move to a scenario that is still realistic, but where the performance does not translate quite as well. In this
# case, we imagine that the analyst believes the data to be of size 1050 and uses the default imputation within resize
# so that the extra 50 elements are replaced with a constant.
#
# Note that our diagnostic testing of $\tilde{D}$ in the code above is not trivial in this case. In the first example, we
# knew that clamp/resize did not change the underlying data, so we could predict exactly the data on which the DP mean
# would actually be calculated. This will not be true for the following examples, so we will simulate finding the true
# underlying mean by releasing an extra DP mean with very high epsilon.
# + pycharm={"name": "#%%\n"}
# This estimate is larger than the true size of 1000, so we will impute 50 values using the impute constant
data_size = 1_050
mean_aggregator = make_mean_aggregator(data_size)
# This value contains the true mean of the data after resizing and imputation
D_tilde_mean = mean_aggregator(data)
scale = binary_search_param(lambda s: mean_aggregator >> make_base_laplace(s), 1, epsilon)
measurement = mean_aggregator >> make_base_laplace(scale)
releases = [measurement(data) for _ in range(n_sims)]
accuracy = laplacian_scale_to_accuracy(scale, alpha)
print('Accuracy interval (with accuracy value {0}) contains the true mean on D_tilde with probability {1}'.format(
round(accuracy, 4),
np.mean([(D_tilde_mean >= dp_mean - accuracy) & (D_tilde_mean <= dp_mean + accuracy)
for dp_mean in releases])))
print('Accuracy interval (with accuracy value {0}) contains the true mean on D with probability {1}'.format(
round(accuracy, 4),
np.mean([(D_mean_age >= dp_mean - accuracy) & (D_mean_age <= dp_mean + accuracy) for dp_mean in releases])))
# + [markdown] pycharm={"name": "#%% md\n"}
# The accuracy guarantee still holds on $\tilde{D}$ (as it should), but we now see much worse performance relative to the
# true underlying data $D$.
| python/example/accuracy_pitfalls.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from math import pi
from random import uniform
from ipywidgets import Button
from ipycanvas import Canvas, hold_canvas
# -
canvas = Canvas(width=800, height=600)
def recursive_draw_leaf(canvas, length, r_angle, r_factor, l_angle, l_factor):
canvas.stroke_line(0, 0, 0, -length)
canvas.translate(0, -length)
if length > 5:
canvas.save()
canvas.rotate(r_angle)
recursive_draw_leaf(
canvas, length * r_factor, r_angle, r_factor, l_angle, l_factor
)
canvas.restore()
canvas.save()
canvas.rotate(l_angle)
recursive_draw_leaf(
canvas, length * l_factor, r_angle, r_factor, l_angle, l_factor
)
canvas.restore()
def draw_tree(canvas):
with hold_canvas(canvas):
canvas.save()
canvas.clear()
canvas.translate(canvas.width / 2.0, canvas.height)
canvas.stroke_style = "black"
r_factor = uniform(0.6, 0.8)
l_factor = uniform(0.6, 0.8)
r_angle = uniform(pi / 10.0, pi / 5.0)
l_angle = uniform(-pi / 5.0, -pi / 10.0)
recursive_draw_leaf(canvas, 150, r_angle, r_factor, l_angle, l_factor)
canvas.restore()
# +
button = Button(description="Generate tree!")
def click_callback(*args, **kwargs):
global canvas
draw_tree(canvas)
button.on_click(click_callback)
# -
draw_tree(canvas)
display(canvas)
display(button)
# ## Acknowledgment:
#
# This Notebook is adapted from a Notebook by [<NAME>](https://github.com/ericecmorlaix) which was itself adapted from https://medium.com/better-programming/learning-p5-js-by-making-fractals-cbdcac5c651e
| examples/fractals_tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gmihaila/machine_learning_things/blob/master/notebooks%20/EstimateDuration.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="V8gFFNxISKIb" colab_type="text"
# # EstimateDuration
# + [markdown] id="lEPujXhASbaV" colab_type="text"
# # Imports
# + id="9XsGO6ZXScgc" colab_type="code" colab={}
import time
from datetime import datetime, timedelta
# + [markdown] id="t__KXKfTSeTA" colab_type="text"
# # Code
# + id="JgyBO3heSEGr" colab_type="code" outputId="e115cd00-11ca-4887-9f93-3b06149afc9f" colab={"base_uri": "https://localhost:8080/", "height": 221}
class EstimateDuration(object):
def __init__(self, steps):
assert steps > 0
self.steps = steps
self.start_time = time.time()
self.time_steps = []
self.current_time_step = 0
self.start_date = datetime.now()
def start_step(self,):
self.start_time = time.time()
return
def end_step(self,):
end_time = time.time()
self.time_steps.append(end_time - self.start_time)
self.current_time_step += 1
return
def elapsed(self,):
return self.convert_seconds(sum(self.time_steps))
def convert_seconds(self, seconds):
d= seconds//86400
h= (seconds-(d*86400))//3600
m= (seconds - ((d*86400) + (h*3600)))//60
s= seconds - ((d*86400) + (h*3600) + (m*60))
return "%d:%d:%d:%d"%(d,h,m,s)
def eta(self, return_strings=True):
length_step = sum(self.time_steps)/self.current_time_step
estimated_seconds = length_step * (self.steps - self.current_time_step)
estimated_date = datetime.now() + timedelta(seconds=estimated_seconds)
if return_strings:
estimated_time = self.convert_seconds(estimated_seconds)
return estimated_time, estimated_date.strftime("%d-%b-%Y (%H:%M:%S)")
else:
return estimated_seconds, estimated_date
# test
tracker = EstimateDuration(10)
print(datetime.now().strftime("%d-%b-%Y (%H:%M:%S)"))
for i in range(10):
tracker.start_step()
if i %2 == 0:
time.sleep(3)
else:
time.sleep(1)
tracker.end_step()
print('eta',tracker.eta(return_strings=False))
tracker.elapsed(), tracker.eta(), datetime.now().strftime("%d-%b-%Y (%H:%M:%S)")
# + id="I0smj3wsTfBn" colab_type="code" outputId="e327d4f8-e565-452a-bb77-c5006cafeebf" colab={"base_uri": "https://localhost:8080/", "height": 34}
from datetime import datetime, timedelta
x = datetime.now() + timedelta(seconds=3)
x += timedelta(seconds=300)
x
# + id="tZrwoZOxEk68" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2e57dd48-01ad-4934-e261-600d4bb2cecc"
x.strftime("%d-%b-%Y (%H:%M:%S)")
| notebooks/old_notebooks/EstimateDuration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
from keras_vggface.vggface import VGGFace
from keras_vggface import utils
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Loading development set
# +
images = 'Manually_Annotated_file_lists/'
filename = 'validation.csv'
names_list = ['subDirectory_filePath','face_x','face_y','face_width','face_height','facial_landmarks','expression','valence','arousal']
df_valid = pd.read_csv(images + filename, names = names_list)
df_valid_need = df_valid[~df_valid.expression.isin([7,8,9,10])]
df_valid_need = df_valid_need.reset_index(drop=True)
dict_name_labels = {0: 'Neutral', 1: 'Happiness', 2: 'Sadness', 3: 'Surprise', 4: 'Fear', 5: 'Disgust', 6: 'Anger'}
labels_valid = ['{}_{}'.format(i,dict_name_labels[i])for i in df_valid_need.expression]
df_valid_need.expression = labels_valid
# +
images = 'D:/Databases/AffectNet_short_set/'
batch_size = 64
height_width = 224
def preproc(x):
x = utils.preprocess_input(x, version = 2)
return x
datagen_valid = ImageDataGenerator(preprocessing_function=preproc)
validation_generator = datagen_valid.flow_from_dataframe(df_valid_need,
directory=images,
x_col='subDirectory_filePath',
y_col='expression',
target_size=(height_width, height_width),
class_mode="categorical",
batch_size=batch_size,
shuffle=False)
# -
# Building the model
def network(model='resnet50', input_shape=(224, 224, 3), path_weight_model = ''):
resnet50_features = VGGFace(model=model, include_top=False, input_shape=input_shape, pooling=None)
gaus = tf.keras.layers.GaussianNoise(0.1)(resnet50_features.output)
x = tf.keras.layers.Dense(units=512, kernel_regularizer=keras.regularizers.l2(1e-4), activation = 'relu')(gaus)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(7, activation = 'softmax')(x)
model_resnet50_features = tf.keras.models.Model(resnet50_features.input, x)
model_resnet50_features.load_weights(path_weight_model)
return model_resnet50_features
# Getting class probabilities and prediction
model = network(path_weight_model='models/EmoAffectnet/weights_66_37.h5')
val_predict=model.predict(validation_generator, verbose=1)
val_predict = val_predict.reshape(-1,7)
val_predict=np.argmax(val_predict,axis=1)
val_targ = validation_generator.labels
# Drawing the confusion matrix
def draw_conf_matrix_clas_report(y_true, y_pred, name_labels='', name_model=''):
c_m = confusion_matrix(y_true, y_pred)
conf_matrix = pd.DataFrame(c_m, name_labels, name_labels)
plt.figure(figsize = (9,9))
group_counts = ['{0:0.0f}'.format(value) for value in
conf_matrix.values.flatten()]
group_percentages = ['{0:.1%}'.format(value) for value in
conf_matrix.div(np.sum(conf_matrix, axis=1), axis=0).values.flatten()]
labels = ['{}\n{}'.format(v1,v2) for v1,v2 in zip(group_counts, group_percentages)]
labels = np.asarray(labels).reshape(c_m.shape)
sns.set(font_scale=1.8)
chart = sns.heatmap(conf_matrix,
cbar=False ,
annot=labels,
square=True,
fmt='',
annot_kws={ 'size': 18},
cmap="Blues",
)
chart.set_xticklabels(name_labels)
chart.set_yticklabels(name_labels, rotation=360, verticalalignment='center')
plt.savefig('confusion_matrix_{}.png'.format(name_model), bbox_inches='tight', pad_inches=0)
name_labels = ['NE', 'HA', 'SA', 'SU', 'FE', 'DI', 'AN']
draw_conf_matrix_clas_report(val_targ, val_predict, name_labels, 'AffectNet_valid_2')
print(classification_report(val_targ, val_predict, target_names=name_labels, digits=3))
| check_valid_set_Affectnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="QHfJAkdQk3jv"
import os
project_name = "reco-tut-vrr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# + colab={"base_uri": "https://localhost:8080/"} id="3APW7y3clBlH" executionInfo={"status": "ok", "timestamp": 1628408434630, "user_tz": -330, "elapsed": 1378, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="17900773-0b16-41fd-c1d3-57d20c5a748b"
if not os.path.exists(project_path):
# !cp /content/drive/MyDrive/mykeys.py /content
import mykeys
# !rm /content/mykeys.py
path = "/content/" + project_name;
# !mkdir "{path}"
# %cd "{path}"
import sys; sys.path.append(path)
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "reco-tut"
# !git init
# !git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git
# !git pull origin "{branch}"
# !git checkout main
else:
# %cd "{project_path}"
# + id="yGo_LJd6lBlN"
# !git status
# + colab={"base_uri": "https://localhost:8080/"} id="SWzR_TDJlBlO" executionInfo={"status": "ok", "timestamp": 1628408390337, "user_tz": -330, "elapsed": 2087, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1f0aa5a6-f8e0-4bd1-8b83-cea3865fa6dc"
# !git add . && git commit -m 'commit' && git push origin "{branch}"
# + [markdown] id="ra084G6QmDcL"
# ---
# + id="Nrb7EyBKmEmk"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.pairwise import linear_kernel
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras import models
from keras.layers.embeddings import Embedding
import warnings
warnings.filterwarnings('ignore')
# + colab={"base_uri": "https://localhost:8080/"} id="ce393Lpposh_" executionInfo={"status": "ok", "timestamp": 1628408439309, "user_tz": -330, "elapsed": 714, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2db10727-181b-4a94-bc29-e6d5422a74cf"
nltk.download('wordnet')
nltk.download('stopwords')
# + colab={"base_uri": "https://localhost:8080/", "height": 289} id="HF4J7q62mJ7u" executionInfo={"status": "ok", "timestamp": 1628408460344, "user_tz": -330, "elapsed": 832, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="deeca935-07f5-4b92-9835-da83411264d5"
df = pd.read_parquet('./data/silver/reviews.parquet.gzip')
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="knyLYsCEmT0d" executionInfo={"status": "ok", "timestamp": 1628408464417, "user_tz": -330, "elapsed": 456, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d600bbe4-a1f2-4401-8f92-ede0b17210db"
df.info()
# + id="UdcHVDdS0BOx"
#filter punctuations, stemming and stopwords
corpus = []
for i in range(len(df)):
review = re.sub('[^a-zA-Z0-9]', ' ', df['Reviews'][i])
review = review.lower()
tokenizer = nltk.tokenize.WhitespaceTokenizer()
stemmer = nltk.PorterStemmer()
lemmatizer = nltk.WordNetLemmatizer()
tokens_list = tokenizer.tokenize(review)
tokens = []
for token in tokens_list:
tokens.append(lemmatizer.lemmatize(token))
stop_words = stopwords.words("english")
filtered_words = [w for w in tokens if w not in stop_words]
review = ' '.join(filtered_words)
corpus.append(review)
# + colab={"base_uri": "https://localhost:8080/"} id="pe1b_dKWop6y" executionInfo={"status": "ok", "timestamp": 1628410285697, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6dd4296d-c2de-4146-cde5-d4876f6510d1"
print('length of coprus is {} and first item is "{}"'.format(len(corpus), corpus[0]))
# + id="F8vG-YKC0Fal"
#Bag of Words model to convert corpus into X
cv = CountVectorizer()
cv.fit(corpus)
key = list(cv.vocabulary_.keys())
key.sort()
X = pd.DataFrame(cv.transform(corpus).toarray(),columns = key)
y = df.Rating
# + id="MdLG9_a_0JIv"
#TF_IDF model to convert corpus into X
tfidf = TfidfVectorizer()
X2 = pd.DataFrame(tfidf.fit_transform(corpus).toarray())
# + id="Jo4ZW6-w1cQG"
Rating = df.Rating
# + colab={"base_uri": "https://localhost:8080/"} id="r8YKopCu0YiU" executionInfo={"status": "ok", "timestamp": 1628410321928, "user_tz": -330, "elapsed": 911, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="12db105a-f12a-4397-d031-436b6326d278"
# We need to get unique words to determine the vocabulary size
reviews = df.Reviews
uniq_words=set()
for doc in reviews:
for word in doc.split(" "):
uniq_words.add(word)
vocab_size=len(uniq_words)
print ("Total Unique words:",vocab_size)
# + [markdown] id="gRpPNRWAy16I"
# ## Review text to rating prediction
# + [markdown] id="WAZX4K6x0oK_"
# We need to convert each of the words in the reviews to one-hot vectors. Below is the code to get integer indexes of the words for one hot vector.
#
# Note that we don't need to store all zeros as only the integer index for the word in a vector will have a value of 1.
#
# + colab={"base_uri": "https://localhost:8080/"} id="O0mbja6W0t_5" executionInfo={"status": "ok", "timestamp": 1628410407382, "user_tz": -330, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="640d4589-6639-4e4a-ae7a-3f7a15dfd75e"
# Integer encode the documents
encoded_reviews = [one_hot(review, vocab_size) for review in reviews]
print(encoded_reviews[1])
# + colab={"base_uri": "https://localhost:8080/"} id="0c6xxGn00wES" executionInfo={"status": "ok", "timestamp": 1628410416732, "user_tz": -330, "elapsed": 799, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="545e1714-5ef0-4d6d-b7a5-3a77fff0b55d"
# We fix the maximum length to 100 words.
# pad documents to a max length of n words
max_length = 100
padded_reviews = pad_sequences(encoded_reviews, maxlen=max_length, padding='post')
print(padded_reviews)
# + id="xKrq1eXG1AdB"
# We have completed our pre-processing, it is now time to build the neural network based classifier. We start by splitting the reviews into training and test set.
X_train, X_test, y_train, y_test = train_test_split(padded_reviews,Rating,test_size=0.3, random_state=0)
# + [markdown] id="CBUQuT4G1CCd"
# Now we need to define the basics of model for neural network
# + colab={"base_uri": "https://localhost:8080/"} id="kw1F5g8QzhDY" executionInfo={"status": "ok", "timestamp": 1628410513708, "user_tz": -330, "elapsed": 1781, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="aa51c7d8-bbc0-45ce-c420-f5dd3fd3d9d5"
# define the model
model = Sequential()
# Define the embedding matrix dimensions. Each vector is of 8 dimensions and there will be total of vocab_size vectors
# The input length (window) is 100 words so the output from embedding layer will be a conactenated (flattened) vector of
# 800 dimensions
model.add(Embedding(vocab_size, 16, input_length=max_length))
model.add(Flatten())
model.add(Dense(units=3, activation='relu'))
model.add(Dense(units=2, activation='relu'))
model.add(Dense(units=1, activation='relu'))
# compile the model with stochastic gradient descent and binary cross entropy
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())
# + colab={"base_uri": "https://localhost:8080/"} id="AR1ZNTIG1HuL" executionInfo={"status": "ok", "timestamp": 1628410635710, "user_tz": -330, "elapsed": 15384, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="40185b4e-13d5-49c4-929a-7bfc499fd00f"
# Fit the model... there are few docs, so I am trying with batch_size=1, you can delete it for default batch
#size or change it to a bigger number
model.fit(X_train, y_train, epochs=10, batch_size=30, verbose=1)
# + [markdown] id="JAm6R4kI1kDf"
# Now, we shall evaluate our model against the test set that we kep separate earlier.
# + colab={"base_uri": "https://localhost:8080/"} id="ptyT08xP1T50" executionInfo={"status": "ok", "timestamp": 1628410666934, "user_tz": -330, "elapsed": 850, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="865a00ea-c763-4504-af67-37d9e1c9fff4"
# evaluate the model
loss, accuracy = model.evaluate(X_test, y_test, verbose=1)
print('Accuracy: %f' % (accuracy*100))
# + [markdown] id="bopd8Ytd1pP7"
# Precision and Recall
# + colab={"base_uri": "https://localhost:8080/"} id="7tEDCIkL1oY4" executionInfo={"status": "ok", "timestamp": 1628410697860, "user_tz": -330, "elapsed": 503, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e1e1b91d-42b4-4a49-fb8b-4bc24e77072c"
predictions = model.predict(X_test, batch_size=100, verbose=1)
predictions_bool = np.argmax(predictions, axis=1)
print(classification_report(y_test, predictions_bool))
| notebooks/reco-tut-vrr-02-predictions.ipynb |
(* -*- coding: utf-8 -*-
(* --- *)
(* jupyter: *)
(* jupytext: *)
(* text_representation: *)
(* extension: .ml *)
(* format_name: light *)
(* format_version: '1.5' *)
(* jupytext_version: 1.14.4 *)
(* kernelspec: *)
(* display_name: OCaml *)
(* language: ocaml *)
(* name: iocaml *)
(* --- *)
(* + [markdown] deletable=true editable=true
(* <h1>Fentes d'Young</h1> *)
(* *)
(* <h2>Présentation</h2> *)
(* *)
(* *)
(* <img src="OptPhys-Fig03.svg" /> *)
(* *)
(* On considère une source de lumière émettant des photons de longueur d'onde $\lambda$ et d'impulsion $\vec p=\hbar.\vec k$. *)
(* <br> *)
(* *)
(* Ces photons arrivent sur une plaque opaque possédant deux fentes de largeur $a$, parallèles, distantes de $d$. *)
(* <br> *)
(* *)
(* Un calcul purement quantique permet de montrer (<a href="https://arxiv.org/ftp/quant-ph/papers/0703/0703126.pdf">ref</a>) *)
(* que la probabilité qu'<b> un</b> photon soit diffracté dans la direction $\theta$ est donnée par: *)
(* $$P(\theta)=\frac{2.a}{\pi}.\left[\cos^2(\phi/2).\textrm{sinc}^2(\alpha)\right]$$ *)
(* avec $\phi=p.d.\sin(\theta)/\hbar$, $\alpha=p.a.\sin(\theta)/(2.\hbar)$ et $\textrm{sinc}(x)=\frac{\sin(x)}{x}$. *)
(* *)
(* <h2>Questions</h2> *)
(* <ol> *)
(* <li> Représenter la fonction $P(\theta)$ pour $\theta\in[-\pi/2,\,\pi/2]$. *)
(* <li> Simuler le passage de 50 000 photons à travers les deux fentes d'Young. On prendra $a=\lambda$ et $d=4.\lambda$. *)
(* </ol> *)
(* + deletable=true editable=true
open Random;;
Random.self_init;;
#use "topfind";;
#require "plplot";;
open Plplot;;
module P = Plot;;
let couleurs_list = [[ 0;255;255;255]; (*`white*)
[ 1; 0; 0; 0]; (*`black*)
[ 2; 0; 0;255]; (*`blue*)
[ 3;255; 0; 0]; (*`red*)
[ 4;165; 42; 42]; (*`brown*)
[ 5; 0; 0; 0]; [ 6; 0; 0; 0]; [ 7; 0; 0; 0]; [ 8; 0; 0; 0]; [ 9; 0; 0; 0];
[10;200;200;200]; (*`gray*)
[11; 0;255;255]; (*`light_blue*)
[12; 0;255; 0]; (*`green*)
[13;255;255; 0]; (*`yellow*)
[14;255; 0;255]; (*`pink*)
[15;160; 0;213]; (*`purple*) ]
let rec loop couleurs_list = match couleurs_list with
| [n;r;g;b]::tl -> plscol0 n r g b; loop tl
| _ -> ();;
let couleurs = (fun () -> plscolbg 255 255 255; loop couleurs_list)
let initialisation filename xmin xmax ymin ymax =
P.init (xmin, ymin) (xmax, ymax) `greedy (`svg `core) ~filename:(filename^".svg") ~pre:couleurs
(* + deletable=true editable=true
let hbar=6.62e-34;;
let lambda=1e-7;;(*longueur d'onde*)
let a=lambda;; (*largeur de chaque fente*)
let d=4.*.lambda;; (*distance entre fente*)
let pi = 4.*.atan 1.;;
let p=hbar*.(2.*.pi/.lambda);; (*impulsion*)
(* + deletable=true editable=true
let alpha theta =
"à compléter";;
let phi theta =
"à compléter";;
let proba theta =
"à compléter";;
(* + deletable=true editable=true
let linspace debut fin nbre =
let step = (fin-.debut)/.(float_of_int (nbre-1)) in
let rec loop i acc =
if i=nbre then List.rev acc else
loop (i+1) (debut+.(float_of_int i)*.step::acc) in
loop 0 [];;
(* + deletable=true editable=true
let array_min array = Array.fold_right min array array.(0);;
let array_max array = Array.fold_right max array array.(0);;
(* + deletable=true editable=true
let liste_theta=linspace(-.pi/.2.)(pi/.2.) 1000 in
let liste_proba=List.map (fun theta -> proba theta) liste_theta in
let xs = Array.of_list liste_theta in
let ys = Array.of_list liste_proba in
let p = initialisation "graph" (array_min xs) (array_max xs) 0. (array_max ys) in
P.plot ~stream:p [P.lines `blue xs ys;
P.label "Angle θ en rad" "Probabilité" "Fentes d'Young: λ et d=4.λ"];
P.finish ~stream:p ();;
(* + [markdown] deletable=true editable=true
(* <img src="graph.svg" width="500"/> *)
(* + deletable=true editable=true
open Unix;;
(* + deletable=true editable=true
Unix.system "ls -l ./tmp"
(* + deletable=true editable=true
try
Unix.mkdir "tmp" 511 (* O777 *)
with _ -> Unix.chmod "tmp" 511;;
(* + deletable=true editable=true
let zeros_of_i i = if i<10 then "00" else if i<100 then "0" else "";;
let rec loop num max liste_theta liste_y =
"à compléter";;
(* + deletable=true editable=true
Unix.system "rm young.ogv";;
Unix.system "ffmpeg -framerate 25 -pattern_type sequence -i ./tmp/proba%03d.png -s 720x480 -c:v libtheora young.ogv"
(* + deletable=true editable=true
Unix.system "ls -l *.ogv"
(* + [markdown] deletable=true editable=true
(* <video width="640" height="480" src="./young.ogv" controls> *)
(* + deletable=true editable=true
| Fentes_Young/Fentes_Young_OCaml_sujet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [Table of Contents](./table_of_contents.ipynb)
# # Particle Filters
#format the book
# %matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
# ## Motivation
#
# Here is our problem. We have moving objects that we want to track. Maybe the objects are fighter jets and missiles, or maybe we are tracking people playing cricket in a field. It doesn't really matter. Which of the filters that we have learned can handle this problem? Unfortunately, none of them are ideal. Let's think about the characteristics of this problem.
#
# * **multimodal**: We want to track zero, one, or more than one object simultaneously.
#
# * **occlusions**: One object can hide another, resulting in one measurement for multiple objects.
#
# * **nonlinear behavior**: Aircraft are buffeted by winds, balls move in parabolas, and people collide into each other.
#
# * **nonlinear measurements**: Radar gives us the distance to an object. Converting that to an (x,y,z) coordinate requires a square root, which is nonlinear.
#
# * **non-Gaussian noise:** as objects move across a background the computer vision can mistake part of the background for the object.
#
# * **continuous:** the object's position and velocity (i.e. the state space) can smoothly vary over time.
#
# * **multivariate**: we want to track several attributes, such as position, velocity, turn rates, etc.
#
# * **unknown process model**: we may not know the process model of the system
#
# None of the filters we have learned work well with all of these constraints.
#
# * **Discrete Bayes filter**: This has most of the attributes. It is multimodal, can handle nonlinear measurements, and can be extended to work with nonlinear behavior. However, it is discrete and univariate.
#
# * **Kalman filter**: The Kalman filter produces optimal estimates for unimodal linear systems with Gaussian noise. None of these are true for our problem.
#
# * **Unscented Kalman filter**: The UKF handles nonlinear, continuous, multivariate problems. However, it is not multimodal nor does it handle occlusions. It can handle noise that is modestly non-Gaussian, but does not do well with distributions that are very non-Gaussian or problems that are very nonlinear.
#
# * **Extended Kalman filter**: The EKF has the same strengths and limitations as the UKF, except that is it even more sensitive to strong nonlinearities and non-Gaussian noise.
# ## Monte Carlo Sampling
#
# In the UKF chapter I generated a plot similar to this to illustrate the effects of nonlinear systems on Gaussians:
import kf_book.pf_internal as pf_internal
pf_internal.plot_monte_carlo_ukf()
# The left plot shows 3,000 points normally distributed based on the Gaussian
#
# $$\mu = \begin{bmatrix}0\\0\end{bmatrix},\, \, \, \Sigma = \begin{bmatrix}32&15\\15&40\end{bmatrix}$$
#
# The right plots shows these points passed through this set of equations:
#
# $$\begin{aligned}x&=x+y\\
# y &= 0.1x^2 + y^2\end{aligned}$$
#
# Using a finite number of randomly sampled points to compute a result is called a [*Monte Carlo*](https://en.wikipedia.org/wiki/Monte_Carlo_method) (MC) method. The idea is simple. Generate enough points to get a representative sample of the problem, run the points through the system you are modeling, and then compute the results on the transformed points.
#
# In a nutshell this is what particle filtering does. The Bayesian filter algorithm we have been using throughout the book is applied to thousands of particles, where each particle represents a *possible* state for the system. We extract the estimated state from the thousands of particles using weighted statistics of the particles.
# ## Generic Particle Filter Algorithm
#
# 1. **Randomly generate a bunch of particles**
#
# Particles can have position, heading, and/or whatever other state variable you need to estimate. Each has a weight (probability) indicating how likely it matches the actual state of the system. Initialize each with the same weight.
#
# 2. **Predict next state of the particles**
#
# Move the particles based on how you predict the real system is behaving.
#
# 3. **Update**
#
# Update the weighting of the particles based on the measurement. Particles that closely match the measurements are weighted higher than particles which don't match the measurements very well.
#
# 4. **Resample**
#
# Discard highly improbable particle and replace them with copies of the more probable particles.
#
# 5. **Compute Estimate**
#
# Optionally, compute weighted mean and covariance of the set of particles to get a state estimate.
#
# This naive algorithm has practical difficulties which we will need to overcome, but this is the general idea. Let's see an example. I wrote a particle filter for the robot localization problem from the UKF and EKF chapters. The robot has steering and velocity control inputs. It has sensors that measures distance to visible landmarks. Both the sensors and control mechanism have noise in them, and we need to track the robot's position.
#
# Here I run a particle filter and plotted the positions of the particles. The plot on the left is after one iteration, and on the right is after 10. The red 'X' shows the actual position of the robot, and the large circle is the computed weighted mean position.
pf_internal.show_two_pf_plots()
# If you are viewing this in a browser, this animation shows the entire sequence:
# <img src='animations/particle_filter_anim.gif'>
# After the first iteration the particles are still largely randomly scattered around the map, but you can see that some have already collected near the robot's position. The computed mean is quite close to the robot's position. This is because each particle is weighted based on how closely it matches the measurement. The robot is near (1,1), so particles that are near (1, 1) will have a high weight because they closely match the measurements. Particles that are far from the robot will not match the measurements, and thus have a very low weight. The estimated position is computed as the weighted mean of positions of the particles. Particles near the robot contribute more to the computation so the estimate is quite accurate.
#
# Several iterations later you can see that all the particles have clustered around the robot. This is due to the *resampling* step. Resampling discards particles that are very improbable (very low weight) and replaces them with particles with higher probability.
#
# I haven't fully shown *why* this works nor fully explained the algorithms for particle weighting and resampling, but it should make intuitive sense. Make a bunch of random particles, move them so they 'kind of' follow the robot, weight them according to how well they match the measurements, only let the likely ones live. It seems like it should work, and it does.
# ## Probability distributions via Monte Carlo
#
# Suppose we want to know the area under the curve $y= \mathrm{e}^{\sin(x)}$ in the interval [0, $\pi$]. The area is computed with the definite integral $\int_0^\pi \mathrm{e}^{\sin(x)}\, \mathrm{d}x$. As an exercise, go ahead and find the answer; I'll wait.
#
# If you are wise you did not take that challenge; $\mathrm{e}^{\sin(x)}$ cannot be integrated analytically. The world is filled with equations which we cannot integrate. For example, consider calculating the luminosity of an object. An object reflects some of the light that strike it. Some of the reflected light bounces off of other objects and restrikes the original object, increasing the luminosity. This creates a *recursive integral*. Good luck with that one.
#
# However, integrals are trivial to compute using a Monte Carlo technique. To find the area under a curve create a bounding box that contains the curve in the desired interval. Generate randomly positioned point within the box, and compute the ratio of points that fall under the curve vs the total number of points. For example, if 40% of the points are under the curve and the area of the bounding box is 1, then the area under the curve is approximately 0.4. As you tend towards infinite points you can achieve any arbitrary precision. In practice, a few thousand points will give you a fairly accurate result.
#
# You can use this technique to numerically integrate a function of any arbitrary difficulty. this includes non-integrable and noncontinuous functions. This technique was invented by <NAME> at Los Alamos National Laboratory to allow him to perform computations for nuclear reactions which were unsolvable on paper.
#
# Let's compute $\pi$ by finding the area of a circle. We will define a circle with a radius of 1, and bound it in a square. The side of the square has length 2, so the area is 4. We generate a set of uniformly distributed random points within the box, and count how many fall inside the circle. The area of the circle is computed as the area of the box times the ratio of points inside the circle vs. the total number of points. Finally, we know that $A = \pi r^2$, so we compute $\pi = A / r^2$.
#
# We start by creating the points.
#
# ```python
# N = 20000
# pts = uniform(-1, 1, (N, 2))
# ```
#
# A point is inside a circle if its distance from the center of the circle is less than or equal to the radius. We compute the distance with `numpy.linalg.norm`, which computes the magnitude of a vector. Since vectors start at (0, 0) calling norm will compute the point's distance from the origin.
#
# ```python
# dist = np.linalg.norm(pts, axis=1)
# ```
#
# Next we compute which of this distances fit the criteria. This code returns a bool array that contains `True` if it meets the condition `dist <= 1`:
#
# ```python
# in_circle = dist <= 1
# ```
#
# All that is left is to count the points inside the circle, compute pi, and plot the results. I've put it all in one cell so you can experiment with alternative values for `N`, the number of points.
# +
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform
N = 20000 # number of points
radius = 1
area = (2*radius)**2
pts = uniform(-1, 1, (N, 2))
# distance from (0,0)
dist = np.linalg.norm(pts, axis=1)
in_circle = dist <= 1
pts_in_circle = np.count_nonzero(in_circle)
pi = area * (pts_in_circle / N)
# plot results
plt.scatter(pts[in_circle,0], pts[in_circle,1],
marker=',', edgecolor='k', s=1)
plt.scatter(pts[~in_circle,0], pts[~in_circle,1],
marker=',', edgecolor='r', s=1)
plt.axis('equal')
print('mean pi(N={})= {:.4f}'.format(N, pi))
print('err pi(N={})= {:.4f}'.format(N, np.pi-pi))
# -
# This insight leads us to the realization that we can use Monte Carlo to compute the probability density of any probability distribution. For example, suppose we have this Gaussian:
from filterpy.stats import plot_gaussian_pdf
plot_gaussian_pdf(mean=2, variance=3);
# The probability density function (PDF) gives the probability that the random value falls between 2 values. For example, we may want to know the probability of x being between 0 and 2 in the graph above. This is a continuous function, so we need to take the integral to find the area under the curve, as the area is equal to the probability for that range of values to occur.
#
# $$P[a \le X \le b] = \int_a^b f_X(x) \, dx$$
#
# It is easy to compute this integral for a Gaussian. But real life is not so easy. For example, the plot below shows a probability distribution. There is no way to analytically describe an arbitrary curve, let alone integrate it.
pf_internal.plot_random_pd()
# We can use Monte Carlo methods to compute any integral. The PDF is computed with an integral, hence we can compute the PDF of this curve using Monte Carlo.
# ## The Particle Filter
#
# All of this brings us to the particle filter. Consider tracking a robot or a car in an urban environment. For consistency I will use the robot localization problem from the EKF and UKF chapters. In this problem we tracked a robot that has a sensor which measures the range and bearing to known landmarks.
#
# Particle filters are a family of algorithms. I'm presenting a specific form of a particle filter that is intuitive to grasp and relates to the problems we have studied in this book. This will leave a few of the steps seeming a bit 'magical' since I haven't offered a full explanation. That will follow later in the chapter.
#
# Taking insight from the discussion in the previous section we start by creating several thousand *particles*. Each particle has a position that represents a possible belief of where the robot is in the scene, and perhaps a heading and velocity. Suppose that we have no knowledge of the location of the robot. We would want to scatter the particles uniformly over the entire scene. If you think of all of the particles representing a probability distribution, locations where there are more particles represent a higher belief, and locations with fewer particles represents a lower belief. If there was a large clump of particles near a specific location that would imply that we were more certain that the robot is there.
#
# Each particle needs a weight - ideally the probability that it represents the true position of the robot. This probability is rarely computable, so we only require it be *proportional* to that probability, which is computable. At initialization we have no reason to favor one particle over another, so we assign a weight of $1/N$, for $N$ particles. We use $1/N$ so that the sum of all probabilities equals one.
#
# The combination of particles and weights forms the *probability distribution* for our problem. Think back to the *Discrete Bayes* chapter. In that chapter we modeled positions in a hallway as discrete and uniformly spaced. This is very similar except the particles are randomly distributed in a continuous space rather than constrained to discrete locations. In this problem the robot can move on a plane of some arbitrary dimension, with the lower right corner at (0,0).
#
# To track our robot we need to maintain states for x, y, and heading. We will store `N` particles in a `(N, 3)` shaped array. The three columns contain x, y, and heading, in that order.
#
# If you are passively tracking something (no control input), then you would need to include velocity in the state and use that estimate to make the prediction. More dimensions requires exponentially more particles to form a good estimate, so we always try to minimize the number of random variables in the state.
#
# This code creates a uniform and Gaussian distribution of particles over a region:
# +
from numpy.random import uniform
def create_uniform_particles(x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles(mean, std, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + (randn(N) * std[0])
particles[:, 1] = mean[1] + (randn(N) * std[1])
particles[:, 2] = mean[2] + (randn(N) * std[2])
particles[:, 2] %= 2 * np.pi
return particles
# -
# For example:
create_uniform_particles((0,1), (0,1), (0, np.pi*2), 4)
# ### Predict Step
#
# The predict step in the Bayes algorithm uses the process model to update the belief in the system state. How would we do that with particles? Each particle represents a possible position for the robot. Suppose we send a command to the robot to move 0.1 meters while turning by 0.007 radians. We could move each particle by this amount. If we did that we would soon run into a problem. The robot's controls are not perfect so it will not move exactly as commanded. Therefore we need to add noise to the particle's movements to have a reasonable chance of capturing the actual movement of the robot. If you do not model the uncertainty in the system the particle filter will not correctly model the probability distribution of our belief in the robot's position.
def predict(particles, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise Q (std heading change, std velocity)`"""
N = len(particles)
# update heading
particles[:, 2] += u[0] + (randn(N) * std[0])
particles[:, 2] %= 2 * np.pi
# move in the (noisy) commanded direction
dist = (u[1] * dt) + (randn(N) * std[1])
particles[:, 0] += np.cos(particles[:, 2]) * dist
particles[:, 1] += np.sin(particles[:, 2]) * dist
# ### Update Step
#
# Next we get a set of measurements - one for each landmark currently in view. How should these measurements be used to alter our probability distribution as modeled by the particles?
#
# Think back to the **Discrete Bayes** chapter. In that chapter we modeled positions in a hallway as discrete and uniformly spaced. We assigned a probability to each position which we called the *prior*. When a new measurement came in we multiplied the current probability of that position (the *prior*) by the *likelihood* that the measurement matched that location:
#
# ```python
# def update(likelihood, prior):
# posterior = prior * likelihood
# return normalize(posterior)
# ```
#
# which is an implementation of the equation
#
# $$x = \| \mathcal L \bar x \|$$
#
# which is a realization of Bayes theorem:
#
# $$\begin{aligned}P(x \mid z) &= \frac{P(z \mid x)\, P(x)}{P(z)} \\
# &= \frac{\mathtt{likelihood}\times \mathtt{prior}}{\mathtt{normalization}}\end{aligned}$$
# We do the same with our particles. Each particle has a position and a weight which estimates how well it matches the measurement. Normalizing the weights so they sum to one turns them into a probability distribution. The particles those that are closest to the robot will generally have a higher weight than ones far from the robot.
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300 # avoid round-off to zero
weights /= sum(weights) # normalize
# In the literature this part of the algorithm is called *Sequential Importance Sampling*, or SIS. The equation for the weights is called the *importance density*. I will give these theoretical underpinnings in a following section. For now I hope that this makes intuitive sense. If we weight the particles according to how well they match the measurements they are probably a good sample for the probability distribution of the system after incorporating the measurements. Theory proves this is so. The weights are the *likelihood* in Bayes theorem. Different problems will need to tackle this step in slightly different ways but this is the general idea.
# ### Computing the State Estimate
#
# In most applications you will want to know the estimated state after each update, but the filter consists of nothing but a collection of particles. Assuming that we are tracking one object (i.e. it is unimodal) we can compute the mean of the estimate as the sum of the weighted values of the particles.
#
# $$ \mu = \frac{1}{N}\sum\limits_{i=1}^N w^ix^i$$
#
# Here I adopt the notation $x^i$ to indicate the i$^{th}$ particle. A superscript is used because we often need to use subscripts to denote time steps the k$^{th}$ or k+1$^{th}$ particle, yielding the unwieldy $x^i_{k+1}$.
#
# This function computes both the mean and variance of the particles:
def estimate(particles, weights):
"""returns mean and variance of the weighted particles"""
pos = particles[:, 0:2]
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
# If we create a uniform distribution of points in a 1x1 square with equal weights we get a mean position very near the center of the square at (0.5, 0.5) and a small variance.
particles = create_uniform_particles((0,1), (0,1), (0, 5), 1000)
weights = np.array([.25]*1000)
estimate(particles, weights)
# ### Particle Resampling
#
# The SIS algorithm suffers from the *degeneracy problem*. It starts with uniformly distributed particles with equal weights. There may only be a handful of particles near the robot. As the algorithm runs any particle that does not match the measurements will acquire an extremely low weight. Only the particles which are near the robot will have an appreciable weight. We could have 5,000 particles with only 3 contributing meaningfully to the state estimate! We say the filter has *degenerated*.This problem is usually solved by some form of *resampling* of the particles.
#
# Particles with very small weights do not meaningfully describe the probability distribution of the robot. The resampling algorithm discards particles with very low probability and replaces them with new particles with higher probability. It does that by duplicating particles with relatively high probability. The duplicates are slightly dispersed by the noise added in the predict step. This results in a set of points in which a large majority of the particles accurately represent the probability distribution.
#
# There are many resampling algorithms. For now let's look at one of the simplest, *simple random resampling*, also called *multinomial resampling*. It samples from the current particle set $N$ times, making a new set of particles from the sample. The probability of selecting any given particle should be proportional to its weight.
#
# We accomplish this with NumPy's `cumsum` function. `cumsum` computes the cumulative sum of an array. That is, element one is the sum of elements zero and one, element two is the sum of elements zero, one and two, etc. Then we generate random numbers in the range of 0.0 to 1.0 and do a binary search to find the weight that most closely matches that number:
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights.fill(1.0 / N)
# We don't resample at every epoch. For example, if you received no new measurements you have not received any information from which the resample can benefit. We can determine when to resample by using something called the *effective N*, which approximately measures the number of particles which meaningfully contribute to the probability distribution. The equation for this is
#
# $$\hat{N}_\text{eff} = \frac{1}{\sum w^2}$$
#
# and we can implement this in Python with
def neff(weights):
return 1. / np.sum(np.square(weights))
# If $\hat{N}_\text{eff}$ falls below some threshold it is time to resample. A useful starting point is $N/2$, but this varies by problem. It is also possible for $\hat{N}_\text{eff} = N$, which means the particle set has collapsed to one point (each has equal weight). It may not be theoretically pure, but if that happens I create a new distribution of particles in the hopes of generating particles with more diversity. If this happens to you often, you may need to increase the number of particles, or otherwise adjust your filter. We will talk more of this later.
# ## SIR Filter - A Complete Example
#
# There is more to learn, but we know enough to implement a full particle filter. We will implement the *Sampling Importance Resampling filter*, or SIR.
#
# I need to introduce a more sophisticated resampling method than I gave above. FilterPy provides several resampling methods. I will describe them later. They take an array of weights and returns indexes to the particles that have been chosen for the resampling. We just need to write a function that performs the resampling from these indexes:
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights.fill (1.0 / len(weights))
# To implement the filter we need to create the particles and the landmarks. We then execute a loop, successively calling `predict`, `update`, resampling, and then computing the new state estimate with `estimate`.
# +
from filterpy.monte_carlo import systematic_resample
from numpy.linalg import norm
from numpy.random import randn
import scipy.stats
def run_pf1(N, iters=18, sensor_std_err=.1,
do_plot=True, plot_particles=False,
xlim=(0, 20), ylim=(0, 20),
initial_x=None):
landmarks = np.array([[-1, 2], [5, 10], [12,14], [18,21]])
NL = len(landmarks)
plt.figure()
# create particles and weights
if initial_x is not None:
particles = create_gaussian_particles(
mean=initial_x, std=(5, 5, np.pi/4), N=N)
else:
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.zeros(N)
if plot_particles:
alpha = .20
if N > 5000:
alpha *= np.sqrt(5000)/np.sqrt(N)
plt.scatter(particles[:, 0], particles[:, 1],
alpha=alpha, color='g')
xs = []
robot_pos = np.array([0., 0.])
for x in range(iters):
robot_pos += (1, 1)
# distance from robot to each landmark
zs = (norm(landmarks - robot_pos, axis=1) +
(randn(NL) * sensor_std_err))
# move diagonally forward to (x+1, x+1)
predict(particles, u=(0.00, 1.414), std=(.2, .05))
# incorporate measurements
update(particles, weights, z=zs, R=sensor_std_err,
landmarks=landmarks)
# resample if too few effective particles
if neff(weights) < N/2:
indexes = systematic_resample(weights)
resample_from_index(particles, weights, indexes)
mu, var = estimate(particles, weights)
xs.append(mu)
if plot_particles:
plt.scatter(particles[:, 0], particles[:, 1],
color='k', marker=',', s=1)
p1 = plt.scatter(robot_pos[0], robot_pos[1], marker='+',
color='k', s=180, lw=3)
p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')
xs = np.array(xs)
#plt.plot(xs[:, 0], xs[:, 1])
plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
plt.xlim(*xlim)
plt.ylim(*ylim)
print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
plt.show()
from numpy.random import seed
seed(2)
run_pf1(N=5000, plot_particles=False)
# -
# Most of this code is devoted to initialization and plotting. The entirety of the particle filter processing consists of these lines:
#
# ```python
# # move diagonally forward to (x+1, x+1)
# predict(particles, u=(0.00, 1.414), std=(.2, .05))
#
# # incorporate measurements
# update(particles, weights, z=zs, R=sensor_std_err,
# landmarks=landmarks)
#
# # resample if too few effective particles
# if neff(weights) < N/2:
# indexes = systematic_resample(weights)
# resample_from_index(particles, weights, indexes)
#
# mu, var = estimate(particles, weights)
# ```
#
# The first line predicts the position of the particles with the assumption that the robot is moving in a straight line (`u[0] == 0`) and moving 1 unit in both the x and y axis (`u[1]==1.414`). The standard deviation for the error in the turn is 0.2, and the standard deviation for the distance is 0.05. When this call returns the particles will all have been moved forward, but the weights are no longer correct as they have not been updated.
#
# The next line incorporates the measurement into the filter. This does not alter the particle positions, it only alters the weights. If you recall the weight of the particle is computed as the probability that it matches the Gaussian of the sensor error model. The further the particle from the measured distance the less likely it is to be a good representation.
#
# The final two lines example the effective particle count ($\hat{N}_\text{eff})$. If it falls below $N/2$ we perform resampling to try to ensure our particles form a good representation of the actual probability distribution.
#
# Now let's look at this with all the particles plotted. Seeing this happen interactively is much more instructive, but this format still gives us useful information. I plotted the original random distribution of points in a very pale green and large circles to help distinguish them from the subsequent iterations where the particles are plotted with black pixels. The number of particles makes it hard to see the details, so I limited the number of iterations to 8 so we can zoom in and look more closely.
seed(2)
run_pf1(N=5000, iters=8, plot_particles=True,
xlim=(0,8), ylim=(0,8))
# From the plot it looks like there are only a few particles at the first two robot positions. This is not true; there are 5,000 particles, but due to resampling most are duplicates of each other. The reason for this is the Gaussian for the sensor is very narrow. This is called *sample impoverishment* and can lead to filter divergence. I'll address this in detail below. For now, looking at the second step at x=2 we can see that the particles have dispersed a bit. This dispersion is due to the motion model noise. All particles are projected forward according to the control input `u`, but noise is added to each particle proportional to the error in the control mechanism in the robot. By the third step the particles have dispersed enough to make a convincing cloud of particles around the robot.
#
# The shape of the particle cloud is an ellipse. This is not a coincidence. The sensors and robot control are both modeled as Gaussian, so the probability distribution of the system is also a Gaussian. The particle filter is a sampling of the probability distribution, so the cloud should be an ellipse.
#
# It is important to recognize that the particle filter algorithm *does not require* the sensors or system to be Gaussian or linear. Because we represent the probability distribution with a cloud of particles we can handle any probability distribution and strongly nonlinear problems. There can be discontinuities and hard limits in the probability model.
# ### Effect of Sensor Errors on the Filter
#
# The first few iterations of the filter resulted in many duplicate particles. This happens because the model for the sensors is Gaussian, and we gave it a small standard deviation of $\sigma=0.1$. This is counterintuitive at first. The Kalman filter performs better when the noise is smaller, yet the particle filter can perform worse.
#
#
# We can reason about why this is true. If $\sigma=0.1$, the robot is at (1, 1) and a particle is at (2, 2) the particle is 14 standard deviations away from the robot. This gives it a near zero probability. It contributes nothing to the estimate of the mean, and it is extremely unlikely to survive after the resampling. If $\sigma=1.4$ then the particle is only $1\sigma$ away and thus it will contribute to the estimate of the mean. During resampling it is likely to be copied one or more times.
#
# This is *very important* to understand - a very accurate sensor can lead to poor performance of the filter because few of the particles will be a good sample of the probability distribution. There are a few fixes available to us. First, we can artificially increase the sensor noise standard deviation so the particle filter will accept more points as matching the robots probability distribution. This is non-optimal because some of those points will be a poor match. The real problem is that there aren't enough points being generated such that enough are near the robot. Increasing `N` usually fixes this problem. This decision is not cost free as increasing the number of particles significantly increase the computation time. Still, let's look at the result of using 100,000 particles.
seed(2)
run_pf1(N=100000, iters=8, plot_particles=True,
xlim=(0,8), ylim=(0,8))
# There are many more particles at x=1, and we have a convincing cloud at x=2. Clearly the filter is performing better, but at the cost of large memory usage and long run times.
#
# Another approach is to be smarter about generating the initial particle cloud. Suppose we guess that the robot is near (0, 0). This is not exact, as the simulation actually places the robot at (1, 1), but it is close. If we create a normally distributed cloud near (0, 0) there is a much greater chance of the particles matching the robot's position.
#
# `run_pf1()` has an optional parameter `initial_x`. Use this to specify the initial position guess for the robot. The code then uses `create_gaussian_particles(mean, std, N)` to create particles distributed normally around the initial guess. We will use this in the next section.
# ### Filter Degeneracy From Inadequate Samples
#
# The filter as written is far from perfect. Here is how it performs with a different random seed.
seed(6)
run_pf1(N=5000, plot_particles=True, ylim=(-20, 20))
# Here the initial sample of points did not generate any points near the robot. The particle filter does not create new points during the resample operation, so it ends up duplicating points which are not a representative sample of the probability distribution. As mentioned earlier this is called *sample impoverishment*. The problem quickly spirals out of control. The particles are not a good match for the landscape measurement so they become dispersed in a highly nonlinear, curved distribution, and the particle filter diverges from reality. No particles are available near the robot, so it cannot ever converge.
#
# Let's make use of the `create_gaussian_particles()` method to try to generate more points near the robot. We can do this by using the `initial_x` parameter to specify a location to create the particles.
seed(6)
run_pf1(N=5000, plot_particles=True, initial_x=(1,1, np.pi/4))
# This works great. You should always try to create particles near the initial position if you have any way to roughly estimate it. Do not be *too* careful - if you generate all the points very near a single position the particles may not be dispersed enough to capture the nonlinearities in the system. This is a fairly linear system, so we could get away with a smaller variance in the distribution. Clearly this depends on your problem. Increasing the number of particles is always a good way to get a better sample, but the processing cost may be a higher price than you are willing to pay.
# ## Importance Sampling
#
# I've hand waved a difficulty away which we must now confront. There is some probability distribution that describes the position and movement of our robot. We want to draw a sample of particles from that distribution and compute the integral using MC methods.
#
# Our difficulty is that in many problems we don't know the distribution. For example, the tracked object might move very differently than we predicted with our state model. How can we draw a sample from a probability distribution that is unknown?
#
# There is a theorem from statistics called [*importance sampling*](https://en.wikipedia.org/wiki/Importance_sampling)[1]. Remarkably, it gives us a way to draw samples from a different and known probability distribution and use those to compute the properties of the unknown one. It's a fantastic theorem that brings joy to my heart.
#
# The idea is simple, and we already used it. We draw samples from the known probability distribution, but *weight the samples* according to the distribution we are interested in. We can then compute properties such as the mean and variance by computing the weighted mean and weighted variance of the samples.
#
# For the robot localization problem we drew samples from the probability distribution that we computed from our state model prediction step. In other words, we reasoned 'the robot was there, it is perhaps moving at this direction and speed, hence it might be here'. Yet the robot might have done something completely different. It may have fell off a cliff or been hit by a mortar round. In each case the probability distribution is not correct. It seems like we are stymied, but we are not because we can use importance sampling. We drew particles from that likely incorrect probability distribution, then weighted them according to how well the particles match the measurements. That weighting is based on the true probability distribution, so according to the theory the resulting mean, variance, etc, will be correct!
#
# How can that be true? I'll give you the math; you can safely skip this if you don't plan to go beyond the robot localization problem. However, other particle filter problems require different approaches to importance sampling, and a bit of math helps. Also, the literature and much of the content on the web uses the mathematical formulation in favor of my rather imprecise "imagine that..." exposition. If you want to understand the literature you will need to know the following equations.
#
# We have some probability distribution $\pi(x)$ which we want to take samples from. However, we don't know what $\pi(x)$ is; instead we only know an alternative probability distribution $q(x)$. In the context of robot localization, $\pi(x)$ is the probability distribution for the robot, but we don't know it, and $q(x)$ is the probability distribution of our measurements, which we do know.
#
# The expected value of a function $f(x)$ with probability distribution $\pi(x)$ is
#
# $$\mathbb{E}\big[f(x)\big] = \int f(x)\pi(x)\, dx$$
#
# We don't know $\pi(x)$ so we cannot compute this integral. We do know an alternative distribution $q(x)$ so we can add it into the integral without changing the value with
#
# $$\mathbb{E}\big[f(x)\big] = \int f(x)\pi(x)\frac{q(x)}{q(x)}\, dx$$
#
# Now we rearrange and group terms
#
# $$\mathbb{E}\big[f(x)\big] = \int f(x)q(x)\, \, \cdot \, \frac{\pi(x)}{q(x)}\, dx$$
#
# $q(x)$ is known to us, so we can compute $\int f(x)q(x)$ using MC integration. That leaves us with $\pi(x)/q(x)$. That is a ratio, and we define it as a *weight*. This gives us
#
# $$\mathbb{E}\big[f(x)\big] = \sum\limits_{i=1}^N f(x^i)w(x^i)$$
#
# Maybe that seems a little abstract. If we want to compute the mean of the particles we would compute
#
# $$\mu = \sum\limits_{i=1}^N x^iw^i$$
#
# which is the equation I gave you earlier in the chapter.
#
# It is required that the weights be proportional to the ratio $\pi(x)/q(x)$. We normally do not know the exact value, so in practice we normalize the weights by dividing them by $\sum w(x^i)$.
#
# When you formulate a particle filter algorithm you will have to implement this step depending on the particulars of your situation. For robot localization the best distribution to use for $q(x)$ is the particle distribution from the `predict()` step of the filter. Let's look at the code again:
#
# ```python
# def update(particles, weights, z, R, landmarks):
# weights.fill(1.)
# for i, landmark in enumerate(landmarks):
# dist = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
# weights *= scipy.stats.norm(dist, R).pdf(z[i])
#
# weights += 1.e-300 # avoid round-off to zero
# weights /= sum(weights) # normalize
# ```
#
# The reason for `self.weights.fill(1.)` might have confused you. In all the Bayesian filters up to this chapter we started with the probability distribution created by the `predict` step, and this appears to discard that information by setting all of the weights to 1. Well, we are discarding the weights, but we do not discard the particles. That is a direct result of applying importance sampling - we draw from the known distribution, but weight by the unknown distribution. In this case our known distribution is the uniform distribution - all are weighted equally.
#
# Of course if you can compute the posterior probability distribution from the prior you should do so. If you cannot, then importance sampling gives you a way to solve this problem. In practice, computing the posterior is incredibly difficult. The Kalman filter became a spectacular success because it took advantage of the properties of Gaussians to find an analytic solution. Once we relax the conditions required by the Kalman filter (Markov property, Gaussian measurements and process) importance sampling and monte carlo methods make the problem tractable.
# ## Resampling Methods
#
# The resampling algorithm affects the performance of the filter. For example, suppose we resampled particles by picking particles at random. This would lead us to choosing many particles with a very low weight, and the resulting set of particles would be a terrible representation of the problem's probability distribution.
#
# Research on the topic continues, but a handful of algorithms work well in practice across a wide variety of situations. We desire an algorithm that has several properties. It should preferentially select particles that have a higher probability. It should select a representative population of the higher probability particles to avoid sample impoverishment. It should include enough lower probability particles to give the filter a chance of detecting strongly nonlinear behavior.
#
# FilterPy implements several of the popular algorithms. FilterPy doesn't know how your particle filter is implemented, so it cannot generate the new samples. Instead, the algorithms create a `numpy.array` containing the indexes of the particles that are chosen. Your code needs to perform the resampling step. For example, I used this for the robot:
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights.fill(1.0 / len(weights))
# ### Multinomial Resampling
#
# Multinomial resampling is the algorithm that I used while developing the robot localization example. The idea is simple. Compute the cumulative sum of the normalized weights. This gives you an array of increasing values from 0 to 1. Here is a plot which illustrates how this spaces out the weights. The colors are meaningless, they just make the divisions easier to see.
from kf_book.pf_internal import plot_cumsum
print('cumulative sume is', np.cumsum([.1, .2, .1, .6]))
plot_cumsum([.1, .2, .1, .6])
# To select a weight we generate a random number uniformly selected between 0 and 1 and use binary search to find its position inside the cumulative sum array. Large weights occupy more space than low weights, so they are more likely to be selected.
#
# This is very easy to code using NumPy's [ufunc](http://docs.scipy.org/doc/numpy/reference/ufuncs.html) support. Ufuncs apply functions to every element of an array, returning an array of the results. `searchsorted` is NumPy's binary search algorithm. If you provide it with an array of search values it will return an array of answers: a single answer for each search value.
def multinomal_resample(weights):
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off errors
return np.searchsorted(cumulative_sum, random(len(weights)))
# Here is an example:
from kf_book.pf_internal import plot_multinomial_resample
plot_multinomial_resample([.1, .2, .3, .4, .2, .3, .1])
# This is an $O(n \log(n))$ algorithm. That is not terrible, but there are $O(n)$ resampling algorithms with better properties with respect to the uniformity of the samples. I'm showing it because you can understand the other algorithms as variations on this one. There is a faster implementation of this multinomial resampling that uses the inverse of the CDF of the distribution. You can search on the internet if you are interested.
#
# Import the function from FilterPy using
#
# ```python
# from filterpy.monte_carlo import multinomal_resample
# ```
# ### Residual Resampling
#
# Residual resampling both improves the run time of multinomial resampling, and ensures that the sampling is uniform across the population of particles. It's fairly ingenious: the normalized weights are multiplied by *N*, and then the integer value of each weight is used to define how many samples of that particle will be taken. For example, if the weight of a particle is 0.0012 and $N$=3000, the scaled weight is 3.6, so 3 samples will be taken of that particle. This ensures that all higher weight particles are chosen at least once. The running time is $O(N)$, making it faster than multinomial resampling.
#
# However, this does not generate all *N* selections. To select the rest, we take the *residual*: the weights minus the integer part, which leaves the fractional part of the number. We then use a simpler sampling scheme such as multinomial, to select the rest of the particles based on the residual. In the example above the scaled weight was 3.6, so the residual will be 0.6 (3.6 - int(3.6)). This residual is very large so the particle will be likely to be sampled again. This is reasonable because the larger the residual the larger the error in the round off, and thus the particle was relatively under sampled in the integer step.
def residual_resample(weights):
N = len(weights)
indexes = np.zeros(N, 'i')
# take int(N*w) copies of each weight
num_copies = (N*np.asarray(weights)).astype(int)
k = 0
for i in range(N):
for _ in range(num_copies[i]): # make n copies
indexes[k] = i
k += 1
# use multinormial resample on the residual to fill up the rest.
residual = w - num_copies # get fractional part
residual /= sum(residual) # normalize
cumulative_sum = np.cumsum(residual)
cumulative_sum[-1] = 1. # ensures sum is exactly one
indexes[k:N] = np.searchsorted(cumulative_sum, random(N-k))
return indexes
# You may be tempted to replace the inner for loop with a slice `indexes[k:k + num_copies[i]] = i`, but very short slices are comparatively slow, and the for loop usually runs faster.
#
# Let's look at an example:
from kf_book.pf_internal import plot_residual_resample
plot_residual_resample([.1, .2, .3, .4, .2, .3, .1])
# You may import this from FilterPy using
#
# ```python
# from filterpy.monte_carlo import residual_resample
# ```
# ### Stratified Resampling
#
# This scheme aims to make selections relatively uniformly across the particles. It works by dividing the cumulative sum into $N$ equal sections, and then selects one particle randomly from each section. This guarantees that each sample is between 0 and $\frac{2}{N}$ apart.
#
# The plot below illustrates this. The colored bars show the cumulative sum of the array, and the black lines show the $N$ equal subdivisions. Particles, shown as black circles, are randomly placed in each subdivision.
from kf_book.pf_internal import plot_stratified_resample
plot_stratified_resample([.1, .2, .3, .4, .2, .3, .1])
# The code to perform the stratification is quite straightforward.
def stratified_resample(weights):
N = len(weights)
# make N subdivisions, chose a random position within each one
positions = (random(N) + range(N)) / N
indexes = np.zeros(N, 'i')
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
indexes[i] = j
i += 1
else:
j += 1
return indexes
# Import it from FilterPy with
#
# ```python
# from filterpy.monte_carlo import stratified_resample
# ```
# ### Systematic Resampling
#
# The last algorithm we will look at is systemic resampling. As with stratified resampling the space is divided into $N$ divisions. We then choose a random offset to use for all of the divisions, ensuring that each sample is exactly $\frac{1}{N}$ apart. It looks like this.
from kf_book.pf_internal import plot_systematic_resample
plot_systematic_resample([.1, .2, .3, .4, .2, .3, .1])
# Having seen the earlier examples the code couldn't be simpler.
def systematic_resample(weights):
N = len(weights)
# make N subdivisions, choose positions
# with a consistent random offset
positions = (np.arange(N) + random()) / N
indexes = np.zeros(N, 'i')
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
indexes[i] = j
i += 1
else:
j += 1
return indexes
#
# Import from FilterPy with
#
# ```python
# from filterpy.monte_carlo import systematic_resample
# ```
# ### Choosing a Resampling Algorithm
#
# Let's look at the four algorithms at once so they are easier to compare.
a = [.1, .2, .3, .4, .2, .3, .1]
np.random.seed(4)
plot_multinomial_resample(a)
plot_residual_resample(a)
plot_systematic_resample(a)
plot_stratified_resample(a)
# The performance of the multinomial resampling is quite bad. There is a very large weight that was not sampled at all. The largest weight only got one resample, yet the smallest weight was sample was sampled twice. Most tutorials on the net that I have read use multinomial resampling, and I am not sure why. Multinomial resampling is rarely used in the literature or for real problems. I recommend not using it unless you have a very good reason to do so.
#
# The residual resampling algorithm does excellently at what it tries to do: ensure all the largest weights are resampled multiple times. It doesn't evenly distribute the samples across the particles - many reasonably large weights are not resampled at all.
#
# Both systematic and stratified perform very well. Systematic sampling does an excellent job of ensuring we sample from all parts of the particle space while ensuring larger weights are proportionality resampled more often. Stratified resampling is not quite as uniform as systematic resampling, but it is a bit better at ensuring the higher weights get resampled more.
#
# Plenty has been written on the theoretical performance of these algorithms, and feel free to read it. In practice I apply particle filters to problems that resist analytic efforts, and so I am a bit dubious about the validity of a specific analysis to these problems. In practice both the stratified and systematic algorithms perform well and similarly across a variety of problems. I say try one, and if it works stick with it. If performance of the filter is critical try both, and perhaps see if there is literature published on your specific problem that will give you better guidance.
# ## Summary
#
# This chapter only touches the surface of what is a vast topic. My goal was not to teach you the field, but to expose you to practical Bayesian Monte Carlo techniques for filtering.
#
# Particle filters are a type of *ensemble* filtering. Kalman filters represents state with a Gaussian. Measurements are applied to the Gaussian using Bayes Theorem, and the prediction is done using state-space methods. These techniques are applied to the Gaussian - the probability distribution.
#
# In contrast, ensemble techniques represent a probability distribution using a discrete collection of points and associated probabilities. Measurements are applied to these points, not the Gaussian distribution. Likewise, the system model is applied to the points, not a Gaussian. We then compute the statistical properties of the resulting ensemble of points.
#
# These choices have many trade-offs. The Kalman filter is very efficient, and is an optimal estimator if the assumptions of linearity and Gaussian noise are true. If the problem is nonlinear than we must linearize the problem. If the problem is multimodal (more than one object being tracked) then the Kalman filter cannot represent it. The Kalman filter requires that you know the state model. If you do not know how your system behaves the performance is poor.
#
# In contrast, particle filters work with any arbitrary, non-analytic probability distribution. The ensemble of particles, if large enough, form an accurate approximation of the distribution. It performs wonderfully even in the presence of severe nonlinearities. Importance sampling allows us to compute probabilities even if we do not know the underlying probability distribution. Monte Carlo techniques replace the analytic integrals required by the other filters.
#
# This power comes with a cost. The most obvious costs are the high computational and memory burdens the filter places on the computer. Less obvious is the fact that they are fickle. You have to be careful to avoid particle degeneracy and divergence. It can be very difficult to prove the correctness of your filter. If you are working with multimodal distributions you have further work to cluster the particles to determine the paths of the multiple objects. This can be very difficult when the objects are close to each other.
#
# There are many different classes of particle filter; I only described the naive SIS algorithm, and followed that with a SIR algorithm that performs well. There are many classes of filters, and many examples of filters in each class. It would take a small book to describe them all.
#
# When you read the literature on particle filters you will find that it is strewn with integrals. We perform computations on probability distributions using integrals, so using integrals gives the authors a powerful and compact notation. You must recognize that when you reduce these equations to code you will be representing the distributions with particles, and integrations are replaced with sums over the particles. If you keep in mind the core ideas in this chapter the material shouldn't be daunting.
# ## References
#
# [1] *Importance Sampling*, Wikipedia.
# https://en.wikipedia.org/wiki/Importance_sampling
#
| 12-Particle-Filters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Restoring a Noisy Image
# > Written by <NAME>
#
# **This problem is *not* graded. The autograder will tell you if your solution is correct, but will not provide a hash. The solution notebook is available on EdX. **
#
# ## Understanding image restoration
# In this problem, we will learn about a simple algorithm for removing noise from ('denoising') an image. We will want to use vectorization to write an efficient algorithm for this.
#
# Suppose we have an image that we want to transmit to a friend. During transmission, there is a 10% chance that any given pixel gets distorted. After transmission, about 10% of our pixels have changed colors due to this distortion. For example, we might start with the image on the left and our friend may receive the image on the right:
#
#
# 
#
# We want to write an algorithm that will automatically remove as much noise from the received image as possible. The algorithm we'll use for this is called Iterated Conditional Modes (ICM). We'll only concern ourselves with grayscale images for this, but note that the same technique can be applied to color images as well.
#
# #### Iterated Conditional Modes
# The idea behind ICM is pretty straightforward. It hinges on a very simple observation: in an image, color changes are very infrequent. Objects tend to have the same color throughout them, so the only color differences we run into are when we transition from one object to another. For example, in the image above we see a color transition when we look at the boundary of the circle and the triangle. However, within an object the color is the same.
#
# Taking a look at the noisy image, we can see that there are a lot of color changes; far more than in the original image. Most of these are isolated to a single pixel. For example, see this blowup of the triangle:
#
# 
#
# In the middle of the red rectangle we can see a dark gray pixel that's all by itself. We can be almost certain that this kind of discontinuity is caused by noise. The chances of a single pixel being a different color than all of its neighbors in a normal image is very low. If we change the color of that dark gray pixel to light gray, we'll probably improve the image quality.
#
# This is exactly what the Iterated Conditional Modes algorithm does. We define the *energy* of a pixel $p$ as the number of $p$'s neighbors that differ in color from $p$. We will define $p$'s neighborhood as the four pixels immediately adjacent to $p$. If $p$ is at $(3, 5)$, then $p$'s neighborhood is $\{(2, 5), (4, 5), (3, 4), (3, 6)\}$.
#
# The energy of the whole image is the sum of each pixel's energy. The aim of the ICM algorithm is to minimize the energy of the noisy image by appropriately choosing color values for the most energetic pixels.
#
# As you can probably tell by the name, *Iterated* Conditional Modes is an iterative algorithm. At each iteration, we change the color value of the highest-energy pixel to match the most neighbors it can.
#
# Let's fix a concrete example. Suppose we're transmitting a 5x8 image that looks like this:
#
# ```
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# ```
#
# Not very interesting, but it illustrates our point. Suppose the transmitted image that our friend receives is:
#
# ```
# 0 0 0 0 1 1 0 1
# 0 1 0 0 1 1 1 1
# 0 0 0 0 0 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 1 0 1 1 1 1
# ```
#
# We can compute the energy at each pixel by recording how many of the pixels above, below, left, and right are a different value. If we do this, we get these energy values:
#
# ```
# 0 1 0 1 1 1 3 1
# 1 4 1 1 2 0 1 0
# 0 1 0 1 3 1 0 0
# 0 0 1 1 2 0 0 0
# 0 1 3 2 1 0 0 0
# ```
#
# Remeber, these are *energy* values, not *color* values. We'll pick the highest-energy pixel, which is at $(1, 1)$ and has energy 4. We change its color to minimize its energy. This is the value $0$. Flipping it yields the image:
#
# ```
# 0 0 0 0 1 1 0 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 0 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 1 0 1 1 1 1
# ```
#
# We can now recompute the energy across the entire image and choose the next-highest value. Eventually, we reach a point where we don't swap any labels, and our algorithm terminates, yielding the restored image:
#
# ```
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# ```
#
# which perfectly restores what we transmitted. Note that in general, the restoration will not be perfect. For an example of an imperfect restoration, if we get this transmission:
#
# ```
# 0 0 0 0 1 1 1 1
# 0 1 1 0 1 1 1 1
# 0 1 1 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# 0 0 0 0 1 1 1 1
# ```
#
# we will not remove that block of $1$s in the middle of the $0$s.
# ## Problem 1: Local energy of each pixel
# Write a Python function that takes in a 2-dimensional NumPy array containing color values, and returns the energy at each pixel in the image. Recall that the energy of a pixel is the number of a pixel's neighbors that differ from it.
#
# Hint: the vectorized solution to the heat equation problem can be easily adapted to compute the energy at each pixel.
#
# Fun fact: `True` and `False` can be interpreted as integers! A `True` value is a binary 1, while `False` is binary 0. We can use this fact to help with our vectorization. Try it out!
#
# ```
# >>> np.sum([False, True, False])
# 1
# ```
# +
import numpy as np
def compute_energy(img):
''' Given a 2D array of color values, produces a 2D array with
the energy at each pixel, where energy is defined as the sum
of each's pixels neighbors differing in color from that pixel.
Parameters
----------
img : numpy.ndarray, shape=(M, N)
An MxN array of color values.
Returns
-------
numpy.ndarray, shape=(M, N)
An MxN array of energy values.
'''
# student code goes here
# -
from bwsi_grader.python.image_restoration import grader1
grader1(compute_energy)
# ## Problem 2: Neighbor colors
# Once we get the highest-energy pixel, we need to get the color values of its neighbors, so we know what color will minimize the energy. Write a Python function that takes in a 2D NumPy array of color values and (row, column) coordinates of a pixel, and returns a list containing the color values of $(r, c)$'s neighbors.
def get_neighbor_colors(img, pixel):
''' Given a 2D array of color values and the position of a pixel,
returns a list of `pixel`'s neighboring color values.
Parameters
----------
img : numpy.ndarray, shape=(M, N)
An MxN array of color values
pixel : tuple[int, int]
The (r, c) index of the pixel whose neighbors to retrieve.
Returns
-------
List[int]
The color (or label) value of each of `pixel`'s neighbors.
'''
# student code goes here
from bwsi_grader.python.image_restoration import grader2
grader2(get_neighbor_colors)
# ## Problem 3: Iterated Conditional Modes
# With these two functions in place, we can implement Iterated Conditional Modes. Write a Python function takes as input a 2D NumPy array of color values and returns a new image with the highest-energy pixel replaced.
#
# Your function should:
# - Use the `compute_energy` function to get the energy at each pixel
# - Find the pixel coordinates of the highest-energy pixel (*hint: NumPy's argmax may be useful*)
# - Use the `get_neighbor_colors` function to get the colors of the highest-energy pixel's neighbors
# - Change the highest-energy pixel color to best match its neighbors
def denoise_iter(noisy):
''' Given a 2D array of color values, performs one step of the
Iterated Conditional Modes algorithm, changing the color of
the highest-energy pixel.
Paramters
---------
noisy : numpy.ndarray, shape=(M, N)
An MxN array of color values.
Returns
-------
numpy.ndarray, shape=(M, N)
An MxN array of color values, after applying one step of ICM.
'''
# student code goes here
from bwsi_grader.python.image_restoration import grader3
grader3(denoise_iter)
# Now we'll see your functions in action. Let's generate a noisy image.
# +
# This cell generates a noisy image and displays it
# You need not modify this code, simply run the cell
import matplotlib.pyplot as plt
# %matplotlib notebook
def generate_noisy_copy(img, pct_noise):
''' Given a 2D array of color values and an approximate noise percentage,
returns a noisy image.
Parameters
----------
img : numpy.ndarray, shape=(M, N)
An MxN array of color values -- the pristine image.
pct_noise : float
The amount of noise to add, as an approximate percentage.
Returns
-------
numpy.ndarray, shape=(M, N)
An MxN array of color values -- a noisy copy of the pristine image.
'''
# create a noise image built by randomly choosing a color from our image
noise = np.random.choice(np.unique(img), img.shape).astype(np.uint8)
# make a random choice for each pixel, as to whether to draw that pixel from
# the `noise` image or from input image
rands = np.random.rand(img.size).reshape(img.shape)
# start with a copy of the pristine image and distort it
noisy = img.copy()
idxs_to_change = np.where(rands < pct_noise)
noisy[idxs_to_change] = noise[idxs_to_change]
return noisy
# load the original image
pristine = (plt.imread('original-image.png')*255).astype(np.uint8)
# add about 10% noise
noisy = generate_noisy_copy(pristine, 0.1)
# display the images
fig, axs = plt.subplots(1, 2, figsize=(8, 5))
axs[0].imshow(pristine, 'gray')
axs[1].imshow(noisy, 'gray')
# -
# With all the code in place, let's finally denoise our image!
# +
# You may simply run this cell
num_iters = 0 # how many iterations we have performed, to see progress
cleaned_up = noisy.copy() # the denoised image
old = np.zeros_like(cleaned_up) # the previous iteration, for a stopping condition
while np.any(old != cleaned_up): # loop until no labels change values
num_iters += 1
if (num_iters%1000) == 0: # print progress
print(num_iters, 'Energy {}'.format(compute_energy(cleaned_up).sum()))
old = cleaned_up.copy()
cleaned_up = denoise_iter(cleaned_up)
# -
# Display the noisy version, our denoised image, and the original alongside each other
# +
# Again, you only need to run this
fig, axs = plt.subplots(1, 3, figsize=(8, 3))
axs[0].imshow(noisy, 'gray')
axs[1].imshow(cleaned_up, 'gray')
axs[2].imshow(pristine, 'gray')
# -
# Now we can see the difference in energy between the original and denoised version
compute_energy(cleaned_up).sum()
compute_energy(pristine).sum()
| PythonHW-1.2.4/image_restoration/HW_image_restoration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Binder Magic Demo
#
# Example of using Binder magic to launch a MyBinder container from Github and then access it from a notebook running the magic.
#
# Install from the Github repo:
#
# `pip install --upgrade git+https://github.com/innovationOUtside/ipython_binder_magic.git`
#
# The magic can then be loaded as:
#
# `%load_ext binder_magic`
#
# or
#
# `%reload_ext binder_magic`
# %reload_ext binder_magic
# The MyBinder connection should be initiated by calling the `%binder` line magic with a `-r` argument containing the name of the Gituhb repository you want to use as the Binder target.
# %binder -r https://github.com/ouseful-demos/ordo
# Once the Binder image is running, we can start to run commands against it using `%%binder` cell magic:
# %%binder
# !ls
# %%binder
a=1
# %%binder
a
# %%binder
# !pip install pandas
import pandas as pd
pd.DataFrame({'a':[1,2], 'b':['foo','bar']})
# %%binder
print('a')
# Note that whilst a heartbeat is sent back to the Binder kernel every 30s, the connection seems to die quite quickly (after about 2 minutes of inactivity?). A warning should be raised if the connection is detected to have died.
#
# I wonder if we can con the Binder instance into thinking something is alive by creating a dummy notebook / kernel and writing adding a new cell to it in the background every 30s or so, so that the Binder container knows it's still being used (?!) and doesn't time out so quickly...?
| Binder magic demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from PIL import Image
from sklearn.cluster import MeanShift, estimate_bandwidth
from skimage.filters import gaussian
from skimage.measure import label
import matplotlib.pyplot as plt
import numpy as np
from math import floor
from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
import json
from pycocotools import mask
from skimage import measure
import cv2
from matplotlib import rcParams
# +
# Read in the image
im = Image.open("images/test_image.jpg")
# Convert image to numpy array
im = np.array(im)
# Display numpy array
plt.imshow(im)
# +
# Read in the image
im = Image.open("images/test_image.jpg")
original_rows, original_cols = im.size
# Declare bounding box
crop_rectangle = (1095, 468, 1432, 788)
# Crop the image around the bounding box
im = im.crop(crop_rectangle)
# Load pixel data from cropped image
pix = im.load()
cols, rows = im.size
print("Rows: {}".format(rows))
print("Cols: {}".format(cols))
# Convert cropped image to numpy array
im = np.array(im)
# im = gaussian(im, sigma=6)
original_shape = im.shape
# Display numpy array
plt.imshow(im)
# -
# Load positive and negative histograms
RGB_Water_Histogram = np.load('histograms/boat_positive_histogram.npy')
RGB_Non_Water_Histogram = np.load('histograms/boat_negative_histogram.npy')
# +
# Get the height and width of the sample image
rows = np.shape(im)[0]
cols = np.shape(im)[1]
print("Rows: {}".format(rows))
print("Cols: {}".format(cols))
# Get histogram bins
histogram_bins = np.shape(RGB_Water_Histogram)[0]
# Get the factor based on the histogram bins
factor = 256 / histogram_bins
# -
# Declare a results numpy array that contains only zeros
result = np.zeros((rows, cols))
# +
# Determine the probability of water given RGB and histograms representing water and non water
try:
iterations = 4
for row in range(rows):
for col in range(cols):
# print(row)
# print(col)
# print("\n")
red = float(pix[col, row][0])
green = float(pix[col, row][1])
blue = float(pix[col, row][2])
red_index = floor(red / factor)
green_index = floor(green / factor)
blue_index = floor(blue / factor)
water_value = RGB_Water_Histogram[red_index, green_index, blue_index]
non_water_value = RGB_Non_Water_Histogram[red_index, green_index, blue_index]
total = water_value + non_water_value
if total is not 0:
result[row, col] = water_value / total
# Filter out points given threshold
threshold = 0.48
for row in range(rows):
for col in range(cols):
if result[row, col] < threshold:
result[row, col] = 1
else:
result[row, col] = 0
plt.imshow(result)
# Get the largest connected component
labels = label(result)
assert( labels.max() != 0 ) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
except:
iterations = 2
for row in range(rows):
for col in range(cols):
# print(row)
# print(col)
# print("\n")
red = float(pix[col, row][0])
green = float(pix[col, row][1])
blue = float(pix[col, row][2])
red_index = floor(red / factor)
green_index = floor(green / factor)
blue_index = floor(blue / factor)
water_value = RGB_Water_Histogram[red_index, green_index, blue_index]
non_water_value = RGB_Non_Water_Histogram[red_index, green_index, blue_index]
total = water_value + non_water_value
if total is not 0:
result[row, col] = water_value / total
# Filter out points given threshold
threshold = 0.48
for row in range(rows):
for col in range(cols):
if result[row, col] < np.median(result):
result[row, col] = 1
else:
result[row, col] = 0
plt.imshow(result)
# Get the largest connected component
labels = label(result)
assert( labels.max() != 0 ) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
plt.imshow(largestCC)
# +
# Fill holes in the boat
largestCC = binary_fill_holes(largestCC)
plt.imshow(largestCC)
# +
# Dialate to expand the mask
largestCC = binary_dilation(largestCC, iterations=iterations)
plt.imshow(largestCC)
# +
# Create numpy zeros array the same size as the original image before cropping
image_with_mask = np.zeros((original_cols, original_rows))
# Overlay binary mask onto zeros array
image_with_mask[crop_rectangle[1]:crop_rectangle[1] + rows, crop_rectangle[0]:crop_rectangle[0] + cols] = largestCC
plt.imshow(image_with_mask)
# +
# Now that we have the mask, we can convert the mask into COCO JSON format
# Convert mask to type uint8
image_with_mask = np.array(image_with_mask, dtype=np.uint8)
fortran_ground_truth_binary_mask = np.asfortranarray(image_with_mask)
encoded_ground_truth = mask.encode(fortran_ground_truth_binary_mask)
ground_truth_area = mask.area(encoded_ground_truth)
ground_truth_bounding_box = mask.toBbox(encoded_ground_truth)
contours = measure.find_contours(image_with_mask, 0.5)
segmentations = list()
for contour in contours:
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
segmentations.append(segmentation)
print(json.dumps(segmentations, indent=4))
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0be9b083-4903-434d-bac6-14bef8a12a3e", "showTitle": false, "title": ""}
ACCESS_KEY = ""
SECRET_KEY = ""
ENCODED_SECRET_KEY = SECRET_KEY.replace("/", "%2F")
AWS_BUCKET_NAME = ""
MOUNT_NAME = "database"
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ef25c7fc-bae4-4fcf-8027-a617961beea2", "showTitle": false, "title": ""}
mount_dir = '/mnt/database'.format(MOUNT_NAME)
try: # Testa se o diretório /mnt/datasets já existe
dbutils.fs.ls(mount_dir)
except: # Se o diretório não existir no mount, monta apontando para S3
dbutils.fs.mount("s3a://%s:%s@%s" % (ACCESS_KEY, ENCODED_SECRET_KEY, AWS_BUCKET_NAME), mount_dir)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "55b600c9-890c-4423-8bfa-c4d9a10876ac", "showTitle": false, "title": ""}
#dbutils.fs.unmount("/mnt/database")
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "674fa997-fb93-4a6d-a51a-b16eef81f873", "showTitle": false, "title": ""}
# %fs
# ls /mnt/database/
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c5b42bde-f582-452b-8152-bc8db117acdd", "showTitle": false, "title": ""}
data_dir = mount_dir
print('Tamanho total dos arquivos (em formato Parquet):', sum([x.size for x in dbutils.fs.ls(data_dir)]) / 2000000000.0, 'GB\n')
print('Quantidade de arquivos:', len(dbutils.fs.ls(data_dir)))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6482be4c-f067-47c9-b0ac-35c938b94601", "showTitle": false, "title": ""}
#Fazer Leitura dos Arquivos
#avaliacoes_filme = spark.read.format("csv").option("header", "true").load("/FileStore/tables/part_02-1.csv")
avaliacoes_filme = spark.read.format("csv").option("header", "true").load("/mnt/database/part_**.csv")
display(avaliacoes_filme)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bd82bb99-1479-4d5c-be10-8c36df0f1d44", "showTitle": false, "title": ""}
# Cada usuário pode dar notas para diversos filmes
# Com isso contar a quantidade de usuários
display(avaliacoes_filme.select(avaliacoes_filme.User_Id).summary('count'))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2602995a-da34-40c5-9b65-ac0b0e46da94", "showTitle": false, "title": ""}
import pyspark.sql.functions as f
# Limpeza - Anos
## Utilizar regex para fazer separação dos Movie_Name - Que são composto pelo nome + (ANO)
## Regex
### get last six character = (year) = (.{6})\s*$
### .+?(?=(.{6})\s*$) - 1Grupo = Titulo, 2 Grupo = (Ano)
avaliacoes_filme = avaliacoes_filme.withColumn('Titulo',f.split('Movie_Name',"(.{6})\s*$")[0]).\
withColumn('Ano',f.split('Movie_Name',".+?(?=(.{6})\s*$)")[1])
## Remover parentes da coluna Ano
avaliacoes_filme = avaliacoes_filme.withColumn('Ano', avaliacoes_filme['Ano'].substr(2, 4))
## Remover Coluna antiga
avaliacoes_filme = avaliacoes_filme.drop('Movie_Name')
display(avaliacoes_filme)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "757bffd6-0d00-4784-835d-38c9a4218dac", "showTitle": false, "title": ""}
# Mostrar anos que tiveram mais avaliações
anos_filmes = avaliacoes_filme.groupBy("Ano", "User_Id").count().groupBy("Ano").count()
# Remover strings - Filmes que não possuem data na base
anos_filmes = anos_filmes.filter(anos_filmes.Ano > 0).orderBy(anos_filmes.Ano, ascending=True).collect()
display(anos_filmes)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f696fb5e-acf9-498e-9bb9-8b5f07061ad7", "showTitle": false, "title": ""}
from pyspark.sql import types
from pyspark.sql.functions import sum as _sum
from pyspark.sql.functions import count as _count
# Atualizar tipo coluna Rating de str para double
avaliacoes_filme.withColumn("ratingTemp", avaliacoes_filme.Rating.cast("Double")).drop("Rating").withColumnRenamed("ratingTemp", "Rating")
# Verificar Genero
## Fazer Contagem de quantas avaliações cada genero teve
## Somar Avaliações
generos = avaliacoes_filme.groupBy("Genre").agg(_sum("Rating"), _count("Genre"))
## Remover 'Sem Generos'
generos = generos.filter(generos.Genre != "(no genres listed)")
## Fazer a média
generos = generos.withColumn('Media', f.col('sum(Rating)') / f.col('count(Genre)'))
display(generos)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0dd69afe-8d6d-4a60-866d-5e1f9921b249", "showTitle": false, "title": ""}
# Essa base de dados possui bastante criticas para os filmes entre os anos 70 e 2000
# A hipotese inicial era que os filmes mais recentes teriam mais avaliações devido a ser consumidos mais recentemente
# Outro analise feita foi a questão de generos
# Verificando todas os generos e suas mistura,
# os que tiveram maior pontuação na base analisada foi a mistura de um ou mais generos
# Essas informações poderiam ser utilizadas por streaming de video para produção de novos filmes por exemplo
| databricks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sorting Results
# Imports
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import read_h5ad
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
# cd scrfe test results
kidney_facs_1000_age = pd.read_csv('KidneyFacsAge1000TissReset.csv')
# +
# kidney_facs_1000_age
# -
def sortVals(df):
cols = df.columns.to_list()
print(cols[1])
return df.sort_values(by = cols[2], ascending=False)
sortVals(kidney_facs_1000_age)
kidney_facs_1000_cell = pd.read_csv('KidneyFacsCell1000TissReset.csv')
sortVals(kidney_facs_1000_cell).head()
heart_droplet_1000_age = pd.read_csv('HeartDropletAge1000TissReset.csv')
sortVals(heart_droplet_1000_age).head()
| scripts/practiceScripts/.ipynb_checkpoints/SortingResults-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Chemometrics
# <br>
# **<NAME> / 2017 / Universidad del Valle**
# <br>
# **<NAME> / 2017 / ???**
#
# An up-to-date version of this notebook can be found here: https://github.com/jwist/chemometrics/
options(repr.plot.width=4, repr.plot.height=4) # change these setting to plot larger figures
# ## Experimental desgin
#
#
# 10.1016/j.aca.2009.06.015
| 3.5_chemometrics-DOE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 列表List
# - 一个列表可以储存任意大小的数据集合,你可以理解为他是一个容器
a=[1,1,1.5,'ab',True]
a
a=[1,1,1.5,'ab',True,[2,3]]
a
c='abd'
list(c)
d=(-,2,3)
list(d)
# ## 先来一个例子爽一爽
# 
# ## 创建一个列表
# - a = [1,2,3,4,5]
# ## 列表的一般操作
# 
a=100
b=[1,2,3,4,[100,200]]
a in b
a=[1,2]
b=[3]
b+a
a=[1,2]
a*10
a=[1,2,3]
b=[3,2,1]
a==b
# # 列表索引操作
# - Mylist[index]
# - 正序索引,逆序索引
# - 列表一定注意越界
# - 
a=[1,2,3,4,5]
a[2]
a=[1,2,3,4,5]
a[-2]
a=[1,2,3,4,5]
a[::-1]
a=[1,2,3,4,5]
a[2:0:-1]
a=[1,2,3,4,5,[100,200]]
a[5][0]
a=[1,2,3,4,5,[100,200,[1000,[4000]]]]
a[5][2][1][0]
b=[1,2,3,5]
b[1]=100
b
b=[1,2,3,4,5,6,7,8,9,10]
for i in range(0,10,2):
b[i]=100
print(b)
b=[1,2,3,4,5,6,7,8,9,10,11]
b[2]
for i in range(0,10,3):
print(b[i:i+2])
en=[1,2,3,[3,4]]
len(en)
count=0
en=[1,2,3,[3,4]]
for i in en:
if type(i)==list:
for j in i:
count+=1
else:
count+=1
print(count)
en.__iter__
a=[1,2,3]
b=[1,2,3]
a==b
b=[4,5,1]
def du:
listlen=len(b)
while listlen>0:
for i in range(listlen-1):
if b[i]>b[i+1]:
b[i],b[i+1]=b[i+1],b[i]
listlen-=1
print(b)
b=[4,3,2,1]
length=len(b)
for i in range(length):
for j in range(length-i-1):
if b[j]>b[j+1]:
b[j],b[j+1]=b[j+1],b[j]
b
a=[1,2,3]
b=[1,2,100]
a.append(b)
a
a=[1,2,3,4]
b=[1,2,100]
a[2].append(b)
a
a=[1,2,3,4]
b=[1,2,100]
b.extend(a)
b
c=[1,2,3]
d=[[100],[200],[300]]
c.extend(d)
c
d
c=[1,1,2,3]
c.index(1)
c=[1,1,2,3]
c.insert(0,100)
c
c=[1,2,3,4,5,6]
for i in range(0,len(c)+3,3):
c.insert(i,100)
c
s=[1,2,4,5,3,7]
for i in s:
if i%2!=0:
print(i)
for k in int(i):
d=s.index(k)
s.insert(d,100)
s
# ## 列表切片操作
# - Mylist[start:end]
# - 正序切片,逆序切片
# ## 列表 +、*、in 、not in
# ## 使用for循环遍历元素
# - for 循环可以遍历一切可迭代元素
# ## EP:
# - 使用while 循环遍历列表
# ## 列表的比较
# - \>,<,>=,<=,==,!=
# ## 列表生成式
# [x for x in range(10)]
a=[]
for i in range(10):
a.append(i)
a
[x for x in range(10)]
a=[]
for i in range(10):
if i%2==0:
a.append(i)
a
[x for x in range(10) if x%2==0]#简写
[x**2 for x in range(10) if x%2==0]
import random
[random.random() for i in range(10)]
# ## 列表的方法
# 
# +
#extend合并 reverse反转
# -
a=[1,2,3]
a.pop()
a
a.pop()
# ## 将字符串分割成列表
# - split 按照自定义的内容拆分
a='a b c d'
a.split(' ')
b='a!gh!c!'
b.split('!')
a='a b c d'
a.split(' ',2)
# ## EP:
# 
# 
list1=[30,1,12,14,10,0]
random.shuffle(list1)#打乱
list1
list1=[30,1,12,14,10,0]
list1.extend([1,43])
list1
lst=[30,1,2]
lst.sort()
lst
# ## 列表的复制
# - copy 浅复制
# - deepcopy import copy 深复制
# - http://www.pythontutor.com/visualize.html#mode=edit
a=[1,2,3]
b=a
b
a[0]=100
a
b
c=[1,2,3]
d=c.copy()
d
c[0]=100
c
d
e=[1,2,3,[0,1]]
f=e.copy()
f
e[3][0]=100
e
f
import copy
a=[1,2,3,[100,200]]
b=copy.deepcopy(a)
b
a[3][0]=1
a
b
# ## 列表排序
# - sort
# - sorted
# - 列表的多级排序
# - 匿名函数
(lambda x:print(x))(100)
c=[1,3,4]
c.sort(reverse=True)
c
# +
#多级列表排序#sort默认升序,会更改原列表,sorted不会
# -
c=[['hhhh',100],['yyyy',90],['dddd',200],['rrrr',0]]
c.sort(key=lambda x:x[1])
c
c=[['hhhh',100,[20,80]],['yyyy',90,[70,20]],['dddd',200,[60,140]],['rrrr',0,[0,0]]]
c.sort(key=lambda x:x[2][0])
c
# ## EP:
# - 手动排序该列表[5,3,8,0,17],以升序或者降序
# - 1
# 
def chengji():
c1,c2,c3,c4=eval(input('输入成绩: '))
list_=[c1,c2,c3,c4]#输入的成绩换为列表
best=70
for i in list_:
if i>=best-10:#如果成绩>=best-10
print('Student score is ',i,'A')
else:
if i>=best-20:#如果成绩>=best-20
print('Student score is ',i,'B')
else:
if i>=best-30:#如果成绩>=best-30
print('Student score is ',i,'C')
else:
if i>=best-40:#如果成绩>=best-40
print('Student score is ',i,'D')
chengji()
# - 2
# 
a=[1,2,3,4]
a[::-1]#倒序
# - 3
# 
# - 4
# 
# - 5
# 
# - 6
# 
def index0fsmallestelement(lst):
a=min(lst)#找出最小值
n=0
for i in lst:#循环列表里的值,循环次数位=列表长度,所以最后的n要小一
n+=1
if i==a:
print(n-1)
index0fsmallestelement([1,2,-2,-1])
# - 7
# 
# 
import random
def shuffle(l):
tmp=list()#定义一个新的空列表
for i in range(0,len(l)):
r=random.randint(0,len(l)-1)#随机产生一个数字在输入的列表的范围内,因为在下一步要产生的默认在最后一位,所以要-1
tmp.append(l[r])#利用上一步随机产生的数作为位置r,加一个l里的r位置的数
l.remove(l[r])#移除l里的r位置的数,避免下一次循环重复
return tmp
shuffle([1,2,37])
# - 8
# 
def eliminateduplicates(lst):
lst1=[]#建立一个新的空列表
for i in lst:#循环旧的列表
if i not in lst1:#排除重复的
lst1.append(i)#加入不重复的
print(lst1)
eliminateduplicates([2,2,6,2])
# - 9
# 
def isSorted(lst):
if lst!=lst.sort():
print('The list is not sorted')
else:
print('The list is already')
isSorted([2,1,66,88,5])
# - 10
# 
# - 11
# 
# - 12
# 
def isConsecutiveFour(v):
n=0
for i in range(0,len(v)-1):#循环长度-1次
if v[i]==v[i+1]:#如果第一个=下一个,就自加
n+=1
if n==3 and v[i+1]==v[i-3]:#如果第四个等于第一个,就自加
n+=1
print(n)
isConsecutiveFour([11,11,11,11])
| 7.25.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# The structure of GPflow
# --
# *<NAME>, 2016*
# *Small edits <NAME> 2017*
#
# In this document I'll try to give some insight into what's happening under the hood in GPflow. First I'll explain how the hierarchical structure works: GPflow models, kernels, likelihoods and parameters are arranged in a tree. Next I'll attempt to explain what happens when we run `_compile`, which builds a tensorflow graph ready for execution. Lots of these ideas are taken from GPy, where I was strongly influenced by <NAME>'s work. You can find some of that in the [paramz project](http://github.com/sods/paramz).
# ### Tree structure
# GPflow has some core classes. They are
# - The `Param` class (`GPflow.param.Param`) which is used to represent parameters.
#
# - The DataHolder class (`GPflow.data_holders.DataDict`) which is used similarly to represent data.
#
# - The `Parameterized` class (`GPflow.param.Parameterized`) which is a container for `Param`s. Subclasses include
#
# - Kernels (`GPflow.kernels.Kern`)
#
# - Likelihoods (`GPflow.likelihoods.Likelihood`)
#
# - Mean functions (`GPflow.mean_functions.MeanFunction`)
#
# - The model class (`GPflow.model.Model`) which, which is a special class of `Parameterized`.
#
#
# Together these form a tree: the model class is the trunk, `Parameterized` classes are on branches, and `Param` and `DataHolder` classes are at the leaves.
# It possible to have as many layers (branches) in this structure as you want. Here's a silly example.
from __future__ import print_function
import GPflow
import numpy as np
m = GPflow.model.Model()
m.p = GPflow.param.Parameterized()
m.p.p = GPflow.param.Parameterized()
m.p.p.p = GPflow.param.Parameterized()
m.p.p.p.p = GPflow.param.Parameterized()
m.p.p.p.p.foo = GPflow.param.Param(np.array([7, 12]))
print(m)
# #### Going down the tree
# The tree is implicitely defined by the attributes of each class. To find all the child nodes of a node, we just search through the `__dict__` of the node, looking for `Parameterized` or `Param` objects. There's a built-in `sorted_params` property, which does this, and ensures that they always apear in the same order. A good example of a function that makes use of this is the `build_prior` function. In `Parameterized` this just calls `build_prior` on all the child nodes. In a `Param` object, this computes the log prior density, if applicable (else 0). Another good example that we'll see shortly is the `make_tf_array` function.
# #### Going up the tree
# All the elements of this tree inherrit from `Parentable`. This base class doesn't do much, apart from point 'up' the tree. Each element in the tree has a `_parent` attribute, which points at the member in the layer above. It's only possible to have one parent: this is a tree. The model (trunk) has a `None` parent. It's therefore possible to get a pointer to the trunk by recursing the `_parent` attribute, which is how we have defined `highest_parent`. This is used to tell the model if something has changed (like if a parameter has been swwitched to `fixed`).
# ### What happens in `compile()` ?
#
# + [markdown] run_control={"marked": true}
# To understand the ideas behind the stucture of GPflow, let's first have a look at how to compute things in tensorflow. Tensorflow works by building a symbolic graph, and then executing the graph using `session().run(graph)`. Data are fed into the graph using a dictionary in the form `session().run(graph, feed_dict={X_tf:X_np}`, where `X_tf` is a tensorflow placeholder, and `X_np` is a numpy array containing the data to be fed in.
#
# Let's have a look at a simple Linear regression example, without any GPlow machinery. We'll write a build_likelihood() function which will construct the tensorflow graph, and then we'll feed the graph with some data.
# + code_folding=[]
import numpy as np
import tensorflow as tf
# generate a dataset
X = np.random.randn(10, 2)
Y = np.dot(X, np.array([2.3, -1.2])) + np.random.randn(10, 1) * 0.2
# placeholders for the data and coefficients
X_tf = tf.placeholder(tf.float64)
Y_tf = tf.placeholder(tf.float64)
coeffs_tf = tf.placeholder(tf.float64)
noise_var_tf = tf.placeholder(tf.float64)
def build_likelihood():
err = Y_tf - tf.matmul(X_tf, coeffs_tf)
log_likelihood = -0.5*tf.log(noise_var_tf) -0.5 / noise_var_tf * tf.reduce_sum(tf.square(err))
return log_likelihood
ll_graph = build_likelihood()
grads_tf = tf.gradients(ll_graph, [coeffs_tf, noise_var_tf])
sess = tf.InteractiveSession()
coeffs_guess = np.random.randn(2, 1)
noise_guess = np.random.rand(1)
log_lik, grad_coeff, grad_noise = sess.run([ll_graph]+grads_tf, feed_dict={X_tf:X,
Y_tf:Y,
coeffs_tf:coeffs_guess,
noise_var_tf:noise_guess})
print(log_lik, '\n\n', grad_coeff, '\n\n', grad_noise)
# -
# That's quite a lot of code for such a simple task! Here's the same code using GPflow
# +
import GPflow
class LinReg(GPflow.model.Model):
def __init__(self, X, Y):
GPflow.model.Model.__init__(self)
self.coeffs = GPflow.param.Param(np.random.randn(2, 1)) # Param contains initial guess at value
self.noise_var = GPflow.param.Param(np.random.rand(), transform=GPflow.transforms.Exp())
self.X, self.Y = GPflow.param.DataHolder(X), GPflow.param.DataHolder(Y)
def build_likelihood(self):
err = self.Y - tf.matmul(self.X, self.coeffs) # we can use self.coeffs as if it were a tf object
log_likelihood = -0.5*tf.log(self.noise_var) -0.5 / self.noise_var * tf.reduce_sum(tf.square(err))
return log_likelihood
m = LinReg(X, Y)
m._compile()
print(m._objective(m.get_free_state()))
# -
# Sidenote: that that's not the recommended way to get the log likelihood out of the model. Use `m.compute_log_likelihood()` instead.
#
# That's not a huge saving in code, but the savings mount up for more complex models. Here's what happens in the above code.
#
# 1. When the Model is instantiated (`m = LinReg(X, Y)`), the constructor `__init__` is called. In this function:
# - `Model.__init__` creates a general purpose model. This contains its own `Session`, a placeholder `m._free_vars` for all our parameters, and a some convenient functions for e.g. optimization.
# - We define a `Param` object `self.coeffs` for the coefficients, and `self.noise_var` for the noise variance. This lets us pass the current guess at the parameter into `model._session`. The current state of each parameter is stored as `self._array`; we have initialized each parameter with a guess.
# - We store the data as `DataHolders`, which allows us to pass the data into `m._session`.
#
# 2. When we manually compile the model (`m._compile`), the models calls the build_likelihod function. But, in order to let us use the parameters and dataholders as a tensorflow object (see e.g. `tf.matmul(self.X, self.coeffs)`), this is done inside a python 'context' that we call `tf_mode`. Before we can use `tf_mode`, we must first call `make_tf_array`.
#
# 3. `make_tf_array` takes the tensorflow placeholder `m._free_vars` and distributes it amongst the parameters. We go through all the parameters in turn, and take a chunk of `_free_vars` of the correct length, and reshape it (and possibly transform it, see later) into a tensorflow variable that can represent the parameter in the graph. In our simple example, `_free_vars` has length three. The first two elements are transformed into a 2x1 matrix of coeffs, and the last one is exponentiated to become the noise variance. Each Param object stores its transformed, reshaped section of `_free_vars` as `self._tf_array`. Similiarly, the DataHolder objects hold placeholders into which we later feed the data as `self._tf_array`.
#
# 4. Inside the clause `with m.tf_mode()`, all of the parameters (and dataholders) are replaced with their `_tf_array` attributes. The `_compile` function can then call the `build_likelhood` function which builds a representation of the likelihood as a function of `_free_vars` (via the `_tf_array` attributes). It also builds a convenient function `self._objective` which allows us to link up with scipy's `optimize` routines.
#
# 5. The `_objective` function is a wrapper around a call to the model's `_session.run`. It calls `session.run()`, requesting the negative log_likelihood and the gradients with respect to `_free_vars`. Into the feed_dict, it passes the
print(m.coeffs._tf_array)
print(m.noise_var._tf_array)
# ### get_feed_dict()
#
| doc/source/notebooks/structure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## this notebook is specifically for BERT classifier
import tensorflow as tf
import pandas as pd
#from pandas import ExcelWriter
#from pandas import ExcelFile
from ..transformers import BertTokenizer, BertForSequenceClassification
import torch
from ..transformers import AutoModel,AutoTokenizer
import numpy as np
torch.__version__
tf.__version__
import transformers;transformers.__version__
import os;os.getcwd()
os.listdir()
import json
with open('TREC.train.all', 'r', errors='ignore') as tf:
lines=tf.readlines()
lines
### process data
labels, sentences=[],[]
for it in lines:
its=it.rstrip().split(' ')
labels.append(int(its[0]))
sentences.append(' '.join(its[1:]))
set(labels)
int(its[0]),
# +
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# -
import os;os.getcwd()
import sys; sys.version
# ## load pre-trained bio-bert into tokenizer and model
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
#lm_model = AutoModelWithLMHead.from_pretrained("biobert_v1.1_pubmed", from_tf=True)
#clf_model=BertForSequenceClassification.from_pretrained("biobert_v1.1_pubmed", from_tf=True) ## use Bio-BERT
clf_model=BertForSequenceClassification.from_pretrained('bert-base-uncased')
clf_model.config
## set the padding
tokenizer.pad_token = '[PAD]'
tokenizer.pad_token_id
# +
# Print the original sentence.
print(' Original: ', sentences[0])
# Print the sentence split into tokens.
print('Tokenized: ', tokenizer.tokenize(sentences[0]))
# Print the sentence mapped to token ids.
print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0])))
# -
# ## Sentences to IDs
# +
# Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
# For every sentence...
for sent in sentences:
# `encode` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
encoded_sent = tokenizer.encode(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
# This function also supports truncation and conversion
# to pytorch tensors, but we need to do padding, so we
# can't use these features
#max_length = 128, # Truncate all sentences.
#return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_sent)
# Print sentence 0, now as a list of IDs.
print('Original: ', sentences[0])
print('Token IDs:', input_ids[0])
# -
# ## padding and truncating
## First, what’s the maximum sentence length in our dataset?
print('Max sentence length: ', max([len(sen) for sen in input_ids]),
'Average:',np.mean([len(sen) for sen in input_ids]), 'Std:',
np.std([len(sen) for sen in input_ids]))
# #### Given that, let’s choose MAX_LEN = 256 and apply the padding.
# +
# We'll borrow the `pad_sequences` utility function to do this.
from keras.preprocessing.sequence import pad_sequences
# Set the maximum sequence length.
# maximum training sentence length of 87...
MAX_LEN = 64
print('\nPadding/truncating all sentences to %d values...' % MAX_LEN)
print('\nPadding token: "{:}", ID: {:}'.format(tokenizer.pad_token, tokenizer.pad_token_id))
# Pad our input tokens with value 0.
# "post" indicates that we want to pad and truncate at the end of the sequence,
# as opposed to the beginning.
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long",
value=0, truncating="post", padding="post")
print('\nDone.')
# -
input_ids.shape
# ### 3.4. Attention Masks
#
# The attention mask simply makes it explicit which tokens are actual words versus which are padding.
#
# The BERT vocabulary does not use the ID 0, so if a token ID is 0, then it’s padding, and otherwise it’s a real token.
# +
# Create attention masks
attention_masks = []
# For each sentence...
for sent in input_ids:
# Create the attention mask.
# - If a token ID is 0, then it's padding, set the mask to 0.
# - If a token ID is > 0, then it's a real token, set the mask to 1.
att_mask = [int(token_id > 0) for token_id in sent]
# Store the attention mask for this sentence.
attention_masks.append(att_mask)
# -
# ### 3.5. Training & Validation Split
#
# Divide up our training set to use 90% for training and 10% for validation.
from sklearn.model_selection import train_test_split
from collections import Counter
# +
# Use train_test_split to split our data into train and validation sets for
# training
# Use 80% for training and 20% for validation.
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
random_state=2020, stratify=labels,test_size=0.2)
# Do the same for the masks.
train_masks, validation_masks, _, _ = train_test_split(attention_masks, labels,
random_state=2020, stratify=labels,test_size=0.2)
# -
from sklearn.preprocessing import LabelEncoder
Encoder = LabelEncoder()
train_labels = Encoder.fit_transform(train_labels)
validation_labels = Encoder.fit_transform(validation_labels)
# ### pre-processing data
def preprocess_data(tokenizer, sentences, MAX_LEN = 256):
"""
:params[in]: tokenizer, the configured tokenizer
:params[in]: sentences, list of strings
"""
# 1. Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
# For every sentence...
for sent in sentences:
# `encode` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
encoded_sent = tokenizer.encode(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
# This function also supports truncation and conversion
# to pytorch tensors, but we need to do padding, so we
# can't use these features :( .
#max_length = 128, # Truncate all sentences.
#return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_sent)
# We'll borrow the `pad_sequences` utility function to do this.
from keras.preprocessing.sequence import pad_sequences
# Set the maximum sequence length.
# maximum training sentence length of 87...
print('\nPadding/truncating all sentences to %d values...' % MAX_LEN)
print('\nPadding token: "{:}", ID: {:}'.format(tokenizer.pad_token, tokenizer.pad_token_id))
# Pad our input tokens with value 0.
# "post" indicates that we want to pad and truncate at the end of the sequence,
# as opposed to the beginning.
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long",
value=0, truncating="post", padding="post")
print('\nDone.')
# Create attention masks
attention_masks = []
# For each sentence...
for sent in input_ids:
# Create the attention mask.
# - If a token ID is 0, then it's padding, set the mask to 0.
# - If a token ID is > 0, then it's a real token, set the mask to 1.
att_mask = [int(token_id > 0) for token_id in sent]
# Store the attention mask for this sentence.
attention_masks.append(att_mask)
return input_ids, attention_masks
import json
with open('eda_augment_data.json', 'r') as jf:
data =json.load(jf)
x_train,x_test,y_train,y_test= data['x_train'],data['x_test'],data['y_train'],data['y_test']
Counter(y_train).most_common()
Counter(y_test).most_common()
train_inputs,train_masks = preprocess_data(tokenizer, x_train, MAX_LEN = 256)
validation_inputs,validation_masks = preprocess_data(tokenizer, x_test, MAX_LEN = 256)
# ### 3.6. Converting to PyTorch Data Types
#
# Our model expects PyTorch tensors rather than numpy.ndarrays, so convert all of our dataset variables.
# +
# Convert all inputs and labels into torch tensors, the required datatype
# for our model.
train_inputs = torch.LongTensor(train_inputs)
validation_inputs = torch.LongTensor(validation_inputs)
#train_labels = torch.tensor(y_train)
#validation_labels = torch.tensor(y_test)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
# -
train_inputs.shape,train_labels.shape
Counter(train_labels.tolist()).most_common()
# We’ll also create an iterator for our dataset using the torch DataLoader class. This helps save on memory during training because, unlike a for loop, with an iterator the entire dataset does not need to be loaded into memory.
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
# ## 4. Train Our Classification Model
#
# Now that our input data is properly formatted, it’s time to fine tune the BERT model.
# ### 4.1. BertForSequenceClassification
#
# For this task, we first want to modify the pre-trained BERT model to give outputs for classification, and then we want to continue training the model on our dataset until that the entire model, end-to-end, is well-suited for our task.
#
# Thankfully, the huggingface pytorch implementation includes a set of interfaces designed for a variety of NLP tasks. Though these interfaces are all built on top of a trained BERT model, each has different top layers and output types designed to accomodate their specific NLP task.
#
# Here is the current list of classes provided for fine-tuning:
#
# BertModel
# BertForPreTraining
# BertForMaskedLM
# BertForNextSentencePrediction
# BertForSequenceClassification - The one we’ll use.
# BertForTokenClassification
# BertForQuestionAnswering
#
clf_model=BertForSequenceClassification.from_pretrained(
'bert-base-uncased', num_labels = 6)
clf_model.config.num_labels
# +
## Get all of the model's parameters as a list of tuples.
params = list(clf_model.named_parameters())
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
# -
# ### 4.2. Optimizer & Learning Rate Scheduler
#
# Now that we have our model loaded we need to grab the training hyperparameters from within the stored model.
#
# For the purposes of fine-tuning, the authors recommend choosing from the following values:
#
# Batch size: 16, 32 (We chose 32 when creating our DataLoaders).
# Learning rate (Adam): 5e-5, 3e-5, 2e-5 (We’ll use 2e-5).
# Number of epochs: 2, 3, 4 (We’ll use 4).
#
# The epsilon parameter eps = 1e-8 is “a very small number to prevent any division by zero in the implementation” (from here).
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
# ### Observation from Feb 27th:
# #### use bio-bert:
# #### 1. batch size=4, learning rate=5e-5; performance is good
# #### 2. Easy Data augmentation, batch size=4, learning rate=2e-5, 4 epochs, the best macro -f1 = .78
# #### 3. Easy data augmentation, batch size=8, learning rate=2e-5, 4 epochs, f1=.79, accuracy: .89
# +
# The DataLoader needs to know our batch size for training, so we specify it
# here.
# For fine-tuning BERT on a specific task, the authors recommend a batch size of
# 16 or 32.
batch_size = 8
# Create the DataLoader for our training set.
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# Create the DataLoader for our validation set.
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# -
# +
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
# I believe the 'W' stands for 'Warm up"
optimizer = AdamW(clf_model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
# +
# Number of training epochs (authors recommend between 2 and 4)
epochs = 4
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=1,
num_training_steps = total_steps)
# +
import numpy as np
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# +
import time
import datetime
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
# -
from sklearn.metrics import classification_report
# +
import random
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
clf_model.cuda(device)
# Store the average loss after each epoch so we can plot them.
loss_values = []
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
## predicted/true labels
pred_labels, true_labels = [],[]
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
clf_model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
clf_model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# This will return the loss (rather than the model output) because we
# have provided the `labels`.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = clf_model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# The call to `model` always returns a tuple, so we need to pull the
# loss value out of the tuple.
loss = outputs[0]
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(clf_model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
print("")
print(" Average training loss: {0:.3f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(format_time(time.time() - t0)))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
clf_model.eval()
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have
# not provided labels.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = clf_model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
## pred_labels/true_labels in a batch flatten
pred_flat = np.argmax(logits, axis=1).flatten()
true_flat = label_ids.flatten()
# true labels and predicted labels
true_labels += true_flat.tolist()
pred_labels += pred_flat.tolist()
# Accumulate the total accuracy.
eval_accuracy += tmp_eval_accuracy
# Track the number of batches
nb_eval_steps += 1
# Report the final accuracy for this validation run.
print(" Accuracy: {0:.3f}".format(eval_accuracy/nb_eval_steps))
print(" Validation took: {:}".format(format_time(time.time() - t0)))
print(classification_report(true_labels, pred_labels))
print("")
print("Training complete!")
# -
clf_model=BertForSequenceClassification.from_pretrained('fine_tuned_bio_bert/bio_bert_split_5/epoch2_done')
# during evaluation.
clf_model.eval()
pred_labels, true_labels = [],[]
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have
# not provided labels.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = clf_model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
## pred_labels/true_labels in a batch flatten
pred_flat = np.argmax(logits, axis=1).flatten()
true_flat = label_ids.flatten()
# true labels and predicted labels
true_labels += true_flat.tolist()
pred_labels += pred_flat.tolist()
# Accumulate the total accuracy.
eval_accuracy += tmp_eval_accuracy
# Track the number of batches
nb_eval_steps += 1
# Report the final accuracy for this validation run.
print(" Accuracy: {0:.3f}".format(eval_accuracy/nb_eval_steps))
#print(" Validation took: {:}".format(format_time(time.time() - t0)))
print(classification_report(true_labels, pred_labels))
print(classification_report(true_labels, pred_labels))
| python_notebooks/bert_classification.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Javascript (Node.js)
// language: javascript
// name: javascript
// ---
// # In the official SOWPODS Scrabble dictionary...
// +
var fetch = require('node-fetch');
var src = 'https://raw.githubusercontent.com/jesstess/Scrabble/master/scrabble/sowpods.txt';
var dict;
// fetch and clean the dictionary
fetch(src)
.then(resp => resp.text())
.then(text => {
dict = text.split(/\r*\n/g);
dict = dict.filter(word => word.length); // remove any empty words
console.log(dict.length + ' words in total');
});
// -
// # What words contain "UU"?
dict.filter(word => /UU/.test(word));
// ### What is the big-O complexity of the solution?
//
// The solution is $O(n)$ for an input word of length $n$ (with the coefficient of $n$ depending on the [regex matcher implementation](https://en.wikibooks.org/wiki/Regular_Expressions/Implementation)).
//
// ### What is another way of solving the problem?
//
// Other ways of solving the problem are either directly equivalent to or less efficient than this method.
// For example, we could implement filtering and regex matching ourselves ($O(n)$):
// + deletable=false editable=false run_control={"frozen": true}
// var UU = [];
//
// for (var word of dict) {
// for (var i = 0; i < word.length - 1; i++) {
// // equivalent to regex matching
// if (word[i] == 'U') {
// if (word[i+1] == 'U') {
// UU.push(word);
// break;
//
// } else {
// i++; // skip the following letter, since it can't be the start of 'UU'
// }
// }
// }
// }
// -
// ### Is there a faster solution?
//
// There is no faster solution: in the worst case (the word doesn't contain "UU"), all $n$ letters in a word must be examined to check for a match with this regular expression.
// # What words contain "Q" without "U"?
// match any number of characters other than 'U', then 'Q', then any number of characters other than 'U'
dict.filter(word => /^[^U]*Q[^U]*$/.test(word));
// ### What is the big-O complexity of the solution?
//
// The solution is $O(n)$ for an input word of length $n$ (with the coefficient of $n$ depending on the [regex matcher implementation](https://en.wikibooks.org/wiki/Regular_Expressions/Implementation)).
//
// ### What is another way of solving the problem?
//
// Other ways of solving the problem are either directly equivalent to or less efficient than this method.
// For example, we could filter by words containing "Q" and then by words not containing "U" ($O(n^2)$):
// + deletable=false editable=false run_control={"frozen": true}
// dict.filter(word => /Q/.test(word))
// .filter(word => !/U/.test(word));
// -
// ### Is there a faster solution?
//
// There is no faster solution: in the worst case (the word doesn't contain either "Q" or "U"), all $n$ letters in a word must be examined to check for a match with this regular expression.
// # What letters, if any, never appear doubled?
// +
var undoubled = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'; // letters that have not appeared doubled
for (var word of dict) {
for (var i = 0; i < word.length - 1; i++) {
// if all the letters have already appeared doubled, stop looking
if (!undoubled) {
break;
}
if (word[i] == word[i+1]) {
// remove a doubled letter
undoubled = undoubled.replace(word[i], '');
i++;
}
}
}
console.log(undoubled.split(''));
// -
// ### What is the big-O complexity of the solution?
//
// The solution is $O(n)$ for a set of input words of total length $n$ (that is, $n$ is the sum of the lengths of all of the words in the set).
// Alternatively, the solution is $O(m)$ for a set of input words of size $m$ (that is, there are $m$ words in the set).
//
// ### What is another way of solving the problem?
//
// Other ways of solving the problem are either directly equivalent to or less efficient than this method.
// For example, we could search for each pair of doubled letters individually ($O(n)$, with a larger coefficient of $n$ than in the original solution):
// + deletable=false editable=false run_control={"frozen": true}
// var undoubled = [],
// doubled;
//
// for (var letter of 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') {
// var regex = new RegExp(letter + letter); // construct a regex matching the current letter, doubled
// doubled = false;
//
// // search for the doubled letter in each word
// for (var word of dict) {
// if (regex.test(word)) {
// doubled = true;
// break;
// }
// }
//
// // if the letter has not appeared doubled in any word, add it to undoubled
// if (!doubled) {
// undoubled.push(letter);
// }
// }
// -
// ### Is there a faster solution?
//
// There is no faster solution: in the worst case (no letters ever appear doubled), every letter of every word must be examined to look for a doubled letter.
// # What is the longest palindrome?
// +
var maxLen = 0,
maxPal = [];
for (var word of dict) {
var wordLen = word.length;
// advance the start and end indices until they meet
for (var start = 0, end = wordLen - 1;
// if the letters at start and end do not match, the word is not a palindrome
start < end && word[start] == word[end];
start++, end--);
// if the start and end indices met, the word is a palindrome
if (start >= end) {
if (wordLen > maxLen) {
// this palindrome is longer than the previous longest
maxLen = wordLen;
maxPal = [word];
} else if (wordLen == maxLen) {
// this palindrome is as long as the previous longest
maxPal.push(word);
}
}
}
console.log(maxPal);
// -
// ### What is the big-O complexity of the solution?
//
// The solution is $O(n)$ for a set of input words of total length $n$ (that is, $n$ is the sum of the lengths of all of the words in the set).
//
// ### What is another way of solving the problem?
//
// Other ways of solving the problem are either directly equivalent to or less efficient than this method.
// For example, we could sort the words by decreasing length and then search for a palindrome from the start.
// With a typical comparison-based sorting algorithm, in the worst case (none of the words are palindromes), this is $O(m\log(m) + n)$, where $m$ is the total number of input words.
// + deletable=false editable=false run_control={"frozen": true}
// // make a copy of the list of words and sort it in-place by decreasing length
// var sortDict = [...dict];
// sortDict.sort((wordA, wordB) => wordA.length > wordB.length ? -1 : 1);
//
// var maxPal = [],
// maxLen = 0;
//
// for (var word of sortDict) {
// // we have checked all the words of the same or greater length as the longest
// // palindrome, so stop looking
// if (word.length < maxLen) {
// break;
// }
//
// // advance the start and end indices until they meet
// for (var start = 0, end = word.length - 1;
// // if the letters at start and end do not match, the word is not a palindrome
// start < end && word[start] == word[end];
// start++, end--);
//
// // if the start and end indices met, the word is a palindrome
// if (start >= end) {
// maxPal.push(word);
// maxLen = word.length;
// }
// }
// -
// ### Is there a faster solution?
//
// There is no faster solution: in the worst case (every word is a palindrome), every letter of each word must be examined to verify that it is a palindrome.
// # What words contain all of the vowels and Y, in any order?
// +
var all = [],
// map of vowels and Y to powers of 2
bits = { 'A': 1, 'E': 2, 'I': 4, 'O': 8, 'U': 16, 'Y': 32 };
for (var word of dict) {
vowels = 0;
for (var letter of word) {
// if the letter is a vowel or Y, set the bit in vowels corresponding to this letter
if (letter in bits) {
vowels |= bits[letter];
}
// if all vowels and Y have been found in the word, stop looking
if (vowels == 63) {
all.push(word);
break;
}
}
}
console.log(all);
// -
// ### What is the big-O complexity of the solution?
//
// The solution is $O(n)$ for an input word of length $n$.
//
// ### What is another way of solving the problem?
//
// Other ways of solving the problem are either directly equivalent to or less efficient than this method.
// For example, we could search that a word contains each vowel and Y, independently ($O(n)$, with a larger coefficient of $n$ than in the original solution):
// + deletable=false editable=false run_control={"frozen": true}
// dict.filter(word => (/A/.test(word) &&
// /E/.test(word) &&
// /I/.test(word) &&
// /O/.test(word) &&
// /U/.test(word) &&
// /Y/.test(word)));
// -
// ### Is there a faster solution?
//
// There is no faster solution: in the worst case (either the word contains all of the vowels, with a unique vowel at the end of the word, or the word contains all but one of the vowels, with a consonant at the end of the word), all $n$ letters in a word must be examined.
// # What words contain all of the vowels and Y, in alphabetical order?
// match any number of non-vowel characters, then A, then any number of non-vowel characters or A,
// then E, then any number of non-vowel characters or E, etc.
dict.filter(word => /^[^AEIOUY]*A[^EIOUY]*E[^AIOUY]*I[^AEOUY]*O[^AEIUY]*U[^AEIOY]*Y[^AEIOU]*$/.test(word));
// ### What is the big-O complexity of the solution?
//
// The solution is $O(n)$ for an input word of length $n$ (with the coefficient of $n$ depending on the [regex matcher implementation](https://en.wikibooks.org/wiki/Regular_Expressions/Implementation)).
//
// ### What is another way of solving the problem?
//
// Other ways of solving the problem are either directly equivalent to or less efficient than this method.
// For example, we could remove all of the letters that are not vowels or Y, then check that the result is "AEIOUY", possibly with repeated letters ($O(n)$):
// + deletable=false editable=false run_control={"frozen": true}
// dict.filter(word => /^A+E+I+O+U+Y+$/.test(word.replace(/[^AEIOUY]/g, '')));
// -
// ### Is there a faster solution?
//
// There is no faster solution: in the worst case (the word contains all of the vowels in alphabetical order, without Y or with the only Y at the end of the word), all $n$ letters in a word must be examined to check for a match with this regular expression.
// # What letter makes the most appearances in a single word, and what is that word?
// +
// return the letter(s) that appears most frequently in word, and the number of appearances
function freqLetter(word) {
var freqs = {};
// map each letter to the number of times it appears in the word
for (var letter of word) {
freqs[letter] = freqs[letter] ? (freqs[letter]+1) : 1;
}
var maxFreq = 0,
maxLetter = [];
// find the letter(s) with the most appearances
for (var letter in freqs) {
if (freqs[letter] > maxFreq) {
maxFreq = freqs[letter];
maxLetter = [letter];
} else if (freqs[letter] == maxFreq) {
maxLetter.push(letter);
}
}
return { maxFreq, maxLetter };
}
// maxLetterAll: the letter(s) that appear most frequently in any word, mapped
// to a list of the word(s) in which they appear most frequently
var maxFreqAll = 0,
maxLetterAll = {};
for (var word of dict) {
// find the most frequent letter(s) in this word
var { maxFreq, maxLetter } = freqLetter(word);
// this word has a letter(s) that appears more frequently than the previous most
// frequent letter(s)
if (maxFreq > maxFreqAll) {
maxFreqAll = maxFreq;
maxLetterAll = {};
for (var l of maxLetter) {
maxLetterAll[l] = [word];
}
// this word has a letter(s) that appears with the same frequency as the previous
// most frequent letter(s)
} else if (maxFreq == maxFreqAll) {
for (var l of maxLetter) {
if (!maxLetterAll[l]) {
maxLetterAll[l] = [];
}
maxLetterAll[l].push(word);
}
}
}
console.log(maxLetterAll);
// -
// ### What is the big-O complexity of the solution?
//
// ### What is another way of solving the problem?
//
//
// ### Is there a faster solution?
// # What words are the longest anagrams of each other?
// +
// map of letters to unique primes
var primes = { 'A': 2, 'B': 3, 'C': 5, 'D': 7, 'E': 11, 'F': 13, 'G': 17, 'H': 19, 'I': 23,
'J': 29, 'K': 31, 'L': 37, 'M': 41, 'N': 43, 'O': 47, 'P': 53, 'Q': 59, 'R': 61,
'S': 67, 'T': 71, 'U': 73, 'V': 79, 'W': 83, 'X': 89, 'Y': 97, 'Z': 101 };
// hash each word as a product of the primes corresponding to its letters
var hashmap = {};
for (var word of dict) {
var key = word.split('').reduce((prod, letter) => primes[letter] * prod, 1);
if (hashmap[key]) {
hashmap[key].push(word);
} else {
hashmap[key] = [word];
}
}
var maxLen = 0,
maxAna = [];
for (var key in hashmap) {
// multiple words had the same hash (i.e., these words are anagrams of each other)
if (hashmap[key].length > 1) {
var anaLen = hashmap[key][0].length; // length of the words
// these words are the same length as the previous longest anagrams
if (anaLen == maxLen) {
maxAna.push(hashmap[key]);
// these words are longer than the previous longest anagrams
} else if (anaLen > maxLen) {
maxLen = anaLen;
maxAna = [hashmap[key]];
}
}
}
console.log(maxAna);
// -
// ### What is the big-O complexity of the solution?
//
// ### What is another way of solving the problem?
//
//
// ### Is there a faster solution?
// # Scrabble cheater
// Write a program that takes a Scrabble rack as an argument and returns all valid Scrabble words that can be constructed from that rack, along with their Scrabble scores, sorted by score.
// +
function cheat(rack) {
// map of letters to unique primes
const primes = { 'A': 2, 'B': 3, 'C': 5, 'D': 7, 'E': 11, 'F': 13, 'G': 17, 'H': 19, 'I': 23,
'J': 29, 'K': 31, 'L': 37, 'M': 41, 'N': 43, 'O': 47, 'P': 53, 'Q': 59, 'R': 61,
'S': 67, 'T': 71, 'U': 73, 'V': 79, 'W': 83, 'X': 89, 'Y': 97, 'Z': 101 };
// hash the rack as a product of primes
var rackHash = rack.split('').reduce((prod, letter) => primes[letter] * prod, 1);
var valid = [],
factoredHash, subset;
// for each word, check whether the rack hash can be (at least partially) factored by its letters
for (var word of dict) {
factoredHash = rackHash; // rack hash being factored
subset = true; // whether this word is a subset of the rack
for (var letter of word) {
// check if the prime corresponding to this letter is a factor of the rack hash (prime product)
if (factoredHash % primes[letter] == 0) {
// the letter is in the rack, so factor it out from the hash ("remove it from the rack")
factoredHash /= primes[letter];
} else {
// the letter is not in the rack, so this word can't be made from this rack
subset = false;
break;
}
}
if (subset) {
valid.push(word);
}
}
// scores for each letter in Scrabble
const scores = { 'A': 1, 'C': 3, 'B': 3, 'E': 1, 'D': 2, 'G': 2,
'F': 4, 'I': 1, 'H': 4, 'K': 5, 'J': 8, 'M': 3,
'L': 1, 'O': 1, 'N': 1, 'Q': 10, 'P': 3, 'S': 1,
'R': 1, 'U': 1, 'T': 1, 'W': 4, 'V': 4, 'Y': 4,
'X': 8, 'Z': 10 }
// map valid words to their scores
valid = valid.map(word => ([word.split('').reduce((sum, letter) => scores[letter] + sum, 0),
word]));
// sort valid words by decreasing score
valid.sort((a, b) => a[0] > b[0] ? -1 : 1);
return valid;
}
console.log(cheat('ZAEFIEE'));
// -
// ## Bonus
// Modify your program to allow blank tiles, which can be used as any letter but contribute no points to the word score.
// +
// use '_' to represent a blank tile in the rack
function cheatWithBlanks(rack) {
// map of letters to unique primes
// (map blank tiles to 1 so that they do not contribute to the rack hash)
const primes = { 'A': 2, 'B': 3, 'C': 5, 'D': 7, 'E': 11, 'F': 13, 'G': 17, 'H': 19, 'I': 23,
'J': 29, 'K': 31, 'L': 37, 'M': 41, 'N': 43, 'O': 47, 'P': 53, 'Q': 59, 'R': 61,
'S': 67, 'T': 71, 'U': 73, 'V': 79, 'W': 83, 'X': 89, 'Y': 97, 'Z': 101, '_': 1 };
// hash the rack as a product of primes
var rackHash = rack.split('').reduce((prod, letter) => primes[letter] * prod, 1);
// count the blank tiles
var blanks = rack.match(/_/g);
blanks = blanks ? blanks.length : 0;
// scores for each letter in Scrabble
const scores = { 'A': 1, 'C': 3, 'B': 3, 'E': 1, 'D': 2, 'G': 2,
'F': 4, 'I': 1, 'H': 4, 'K': 5, 'J': 8, 'M': 3,
'L': 1, 'O': 1, 'N': 1, 'Q': 10, 'P': 3, 'S': 1,
'R': 1, 'U': 1, 'T': 1, 'W': 4, 'V': 4, 'Y': 4,
'X': 8, 'Z': 10 }
var valid = [],
factoredHash, blanksLeft, score, subset;
// for each word, check whether the rack hash can be (at least partially) factored by its letters
for (var word of dict) {
factoredHash = rackHash; // rack hash being factored
blanksLeft = blanks; // how many blank tiles are left to use
score = 0; // score for this word
subset = true; // whether this word is a subset of the rack
for (var letter of word) {
// check if the prime corresponding to this letter is a factor of the rack hash (prime product)
if (factoredHash % primes[letter] == 0) {
// the letter is in the rack, so factor it out from the hash ("remove it from the rack")
factoredHash /= primes[letter];
score += scores[letter]; // add this letter to the score
} else if (blanksLeft) {
// the letter is not in the rack, but we can use a blank tile to represent it
blanksLeft -= 1;
} else {
// the letter is not in the rack, and we have no blank tiles left
subset = false;
break;
}
}
if (subset) {
valid.push([score, word]);
}
}
// sort valid words by decreasing score
valid.sort((a, b) => a[0] > b[0] ? -1 : 1);
return valid;
}
console.log(cheatWithBlanks('ZAEFIE_'));
// -
| wordplay/wordplay.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: csm_local
# language: python
# name: csm_local
# ---
# +
import os
os.environ["ISISROOT"] = "/usgs/pkgs/isis3.7.0/install"
os.environ["ISIS3DATA"] = "/usgs/cpkgs/isis3/data"
from pysis import isis
from plio.io import io_controlnetwork
from knoten.csm import create_csm
from scipy import sparse
import ale
import csmapi
import numpy as np
import matplotlib.pyplot as plt
from knoten.bundle import *
# -
# ## Load in Network
# +
cubes = '/work/projects/control_network_metrics/registration_quality/cubes.lis'
# cubes = 'data_lak/cubes.lis'
sensors = generate_sensors(cubes, directory='data_lak/tmp/', clean=True)
network_path = '/work/projects/control_network_metrics/registration_quality/measure_error_50px_1pts_5mean_2std.net'
# network_path = 'data_lak/hand_dense.net'
network = io_controlnetwork.from_isis(network_path)
network = compute_apriori_ground_points(network, sensors) # autoseed did not generate ground points, calculate and repopulate the data frame
# -
# ## Determine Solve Parameters
all_parameters = {sn: get_sensor_parameters(sensor) for sn, sensor in sensors.items()} #all parameters
parameters = {sn: parameter[6:12] for sn, parameter in all_parameters.items()} #just solving for camera angles and angle velocity
# ## Functions
# +
def compute_sigma(V, W_parameters, W_observations):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
Parameters
----------
V : np.array
The control network dataframe with updated ground points
W_parameters : ndarray
The parameter weight matrix (i.e.: sensor parameters and point weights)
W_observations : ndarray
The observation weight matrix (i.e.: point weights)
Returns
-------
: float64
Standard deviation of the residuals
"""
num_parameters = W_parameters.shape[0]
num_observations = W_observations.shape[0]
dof = num_observations - num_parameters
VTPV = (V.dot(W_observations).dot(V))
sigma0 = np.sqrt(VTPV/dof)
return sigma0
def bundle_iteration(J, V, W_parameters, W_observations):
"""
Parameters
----------
J : ndarray
The control network as a dataframe generated by plio.
V : np.array
The control network dataframe with updated ground points
W_parameters : ndarray
The parameter weight matrix (i.e.: sensor parameters and point weights)
W_observations : ndarray
The observation weight matrix (i.e.: measure weights)
Returns
-------
N :
"""
N = J.T.dot(W_observations).dot(J) + W_parameters
C = J.T.dot(W_observations).dot(V)
dX = np.linalg.inv(N).dot(C)
return N, dX
# For data snooping we need to calculate updated residuals
def compute_normalized_residual(J, V, N, W_parameters, W_observations):
"""
Computes the normalized residual statistic for the data snooping method. Method derived from
Forstner 1985 "The Reliability of Block Triangulation"
Parameters
----------
V : np.array
The control network dataframe with updated ground points
N :
W_parameters : ndarray
The parameter weight matrix (i.e.: sensor parameters and point weights)
W_observations : ndarray
The observation weight matrix (i.e.: point weights)
Returns
-------
: np.array
Normalized residual statistic for the data snooping
"""
sigma0 = compute_sigma(V, W_parameters, W_observations)
Qxx = np.linalg.inv(N)
Qvv = np.linalg.inv(W_observations) - J.dot(Qxx).dot(J.T)
qvv = np.diagonal(Qvv)
sigma_vi = sigma0*np.sqrt(qvv)
wi = -V/sigma_vi
return wi
# -
# ## Data Snooping Function
k = 3.29 #critical values from Forstner
# k = 4.1 #cricital value from Baarda
def data_snooping(network, sensors, parameters, k):
"""
Parameters
----------
network : DataFrame
The control network as a dataframe generated by plio
sensors : dict
A dictionary that maps ISIS serial numbers to CSM sensors
parameters : list
The list of CsmParameter to compute the partials W.R.T.
k : float64
Critical value used for rejection criteria; defaults to Forstner's 3.29
(or Baarda's 4.1??)
Returns
-------
: list
Indices of the network DataFrame that were rejected during data snooping
"""
net = network
net['mask'] = "True"
rejected_indices = []
awi = np.array([5, 5, 5, 5]) #initialize larger than k so you get into first iteration
while (awi > k).any():
print(len(net[net['mask']=='True']))
# weight matrices
coefficient_columns = compute_coefficient_columns(network, sensors, parameters)
num_parameters = max(col_range[1] for col_range in coefficient_columns.values())
W_parameters = compute_parameter_weights(network, sensors, parameters, coefficient_columns)
num_observations = 2 * len(net[net['mask']=="True"])
W_observations = np.eye(num_observations)
# bundle iteration (and set up)
V = compute_residuals(net[net['mask']=="True"], sensors)
J = compute_jacobian(net[net['mask']=="True"], sensors, parameters, coefficient_columns)
sigma0 = compute_sigma(V, W_parameters, W_observations)
N, dX = bundle_iteration(J, V, W_parameters, W_observations)
# calculate test statistic
wi = compute_normalized_residual(J, V, N, W_parameters, W_observations)
awi = abs(wi)
#find maximum
imax = np.argmax(awi)
print(f'max wi = {awi[imax]}') # display
if awi[imax] <= k:
print('Data Snooping Outlier Rejection Complete')
break
else:
reject = net.index[net['mask']=="True"][imax]
net.loc[net.index == reject, ['mask']] = "False"
rejected_indices.append(reject)
return rejected_indices
rejected_indices = data_snooping(network, sensors, parameters, k)
# plt.figure()
# plt.boxplot(wi)
plt.figure()
plt.hist(wi[abs(wi) < k], bins=np.linspace(-4,5,50));
plt.hist(wi[abs(wi) > k], bins=np.linspace(-4,5,50));
| examples/data_snooping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# Read CSV
df = pd.read_csv("../res/wohnungspreise.csv")
# Print first five rows
df.head()
# +
# Print figure
import matplotlib.pyplot as plt
plt.scatter(df["Quadratmeter"], df["Verkaufspreis"])
plt.show()
# +
# Linear regression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(df[["Quadratmeter"]], df[["Verkaufspreis"]])
print("Intercept: " + str(model.intercept_))
print("Coeff: " + str(model.coef_))
# +
# Use/show regression
predicted = model.predict([[20], [100]])
min_x = min(df["Quadratmeter"])
max_x = max(df["Quadratmeter"])
plt.plot([min_x,max_x], predicted, color = "red")
plt.scatter(df["Quadratmeter"], df["Verkaufspreis"])
| common/tutorials/src/ML_05_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Running TREDMAD
#
# Instructions for installing and running of TREDMAD are as follows.
#
# This is a little involved, more hacking and less pedestrian computer user. Get in touch if you need assistance.
#
# Assuming a Ubuntu 18.04 installation.
#
# Assuming you have cloned this repo https://github.com/jun2tong/bnp-anomaly.git
#
# You need a python 3 venv running. The best instructions for that are here https://www.tensorflow.org/install/pip
#
# Activate your python 3 venv
#
# Change dir to:
#
# /your-path/bnp-anomaly
#
# In that directory run:
#
# pip install -e .
#
# This will install the bnpy code we have packaged in our repo into your venv
#
# You will have problems - you need to pip install things like cython.
#
# The requirements.txt file located in the directory beside this jupyter file has all the packages and version numbers of a working TREDMAD
# installation. You won't need to install everything - install only the things you get errors/warnings/complaints about when you run the
# "pip install -e ."
#
# For instance, running "pip install -e ." the first time will give you an error about cython. Referring to requirements.txt note
# that a working TREDMAD installation has Cython==0.29.20, and issues the following command:
#
# pip install cython==0.29.20
#
# Getting through this part is a little tricky... it changes week to week as the pypi repo evolves... we find randomly
# smashing the keyboard helps. Again if you are this far, and stuck, get in touch.
#
# Once you've succesfully installed using "pip install -e ."
#
# Try running
#
# ~/git/bnp-anomaly$ python src/TREDMAD.py
#
# You will again get errors like "hey you don't have xxyy installed"
#
# Use pip to install those missing packages. Again the requirements.txt has every required package so look there for version numbers.
#
# Try running the code.
#
# ~/git/bnp-anomaly$ python src/TREDMAD.py
#
# Again... rinse and repeat until the code runs
#
# By default the code runs SMOVB with Cohesion Mark II again an industrial data file bnp-anomaly/data/test/ds00.csv
#
# The comments in the code will help you run it against other files. If you are interested in a detailed walk through, get in touch
# we can set something up. We have a multiprocessor version that works well, which we can provide if interested.
#
# When the run is complete you will get output bnp-anomaly/data/test_output/ds00.test_alg-0_bs-5_wsib-5.csv
#
# This csv is ready for reading into a dataframe with pandas... and from there the real fun begins.
#
# Thanks Nick and Jun.
| jupyter/running_tredmad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Matrix Arithmetic
import numpy as np
# +
x=np.matrix([[1, 2], [3, 4]])
y=np.matrix([[5, 3], [8, 7]])
#Numpy matrices are strictly 2-dimensional, while numpy arrays (ndarrays) are N-dimensional.
#provide a convenient notation for matrix multiplication
# -
print(type(y))
print(x)
print(x+y)
print(x*3)
# ## Subtraction
print(x-y)
# ## Multiplication
# +
A=np.matrix([[1, 2], [3, 4]])
print(A)
# -
#haddmard multiplication
print (A*A)
#elemant wise multiplication
print(np.multiply(A,A))
# ## Inverse
from numpy.linalg import inv
print(inv(A))
# ## Transpose
print(A.T) #flip rows with columns
| section2/lecture16_matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# from xrayutilities import Experiment, GID, QConversion
from xrayutilities import Experiment, QConversion, cxrayutilities
from xrayutilities.materials import Si,Ge
# %matplotlib inline
from pylab import *
from diffractometer_swissfel import GID as GID_SF
from xrayutilities import config, math
# +
config.ENERGY = 9000.
qconv = QConversion(['y+', 'z+'], ['y+', 'z+'], [1, 0, 0])
gid = GID_SF(qconv=qconv)
gid.Transform([1,0,0])
gid.Ang2Q(.5,0,1,0)
# gid.scatplane
# config.ENERGY
# gid.Transform([1,0,0])
# print(qconv.transformSample2Lab([0,0,1],45,30)
# gid.Ang2Q.init_area('y+','x+',500,0,1001,601, distance=10,pwidth1=0.075,pwidth2=0.075)
# angs = gid.Q2Ang(Ge.Q(0,1,1),alpha_i=0)
alpha_i = 2
angs = gid.Q2Ang(Ge.Q(0,1,1),alpha_i=alpha_i)
print(angs)
print(gid.Ang2Q.point(alpha_i,angs[0], angs[1],angs[2]))
print(Ge.Q(0,1,1))
# gid.Ang2Q.point(1,10,10)
# -
math.VecNorm([0.92585652, 0.26200425, 0.272293 ])
| calx/diffractometer/.ipynb_checkpoints/examples-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import panel as pn
import xarray as xr
import holoviews as hv
import geoviews as gv
import cartopy.crs as ccrs
from earthsim.annotators import PolyAndPointAnnotator
from earthsim.grabcut import GrabCutPanel, SelectRegionPanel
gv.extension('bokeh')
# -
# The GrabCut algorithm provides a way to annotate an image using polygons or lines to demark the foreground and background. The algorithm estimates the color distribution of the target object and that of the background using a Gaussian mixture model. This is used to construct a Markov random field over the pixel labels, with an energy function that prefers connected regions having the same label, and running a graph cut based optimization to infer their values. This procedure is repeated until convergence, resulting in an image mask denoting the foreground and background.
#
# In this example this algorithm is applied to map tiles to automatically extract a coast- and shoreline contour. First we specify a region to download the map tiles in using the `SelectRegionPanel`, then we can declare the ``GrabCutPanel`` to annotate the region and let the algorithm compute a contour.
select_region = SelectRegionPanel(hv.Bounds((-77.5, 34.4, -77.3, 34.8)), magnification=2)
pn.Row(select_region.param, select_region.view())
# The toolbar in the plot on the left contains two polygon/polyline drawing tools to annotate the image with foreground and background regions respectively. To demonstrate this process in a static notebook there are already two polygons declared, one marking the sea as the foreground and one marking the land as the background.
background = np.array([
[-77.3777271 , 34.66037492], [-77.35987035, 34.62251189], [-77.34130751, 34.64016586],
[-77.35563287, 34.65360275], [-77.36083954, 34.66560481], [-77.3777271 , 34.66037492]
])
foreground = np.array([
[-77.46585666, 34.66965009], [-77.46451121, 34.62795592], [-77.43105867, 34.64501054],
[-77.41376085, 34.62573423], [-77.37886112,34.63780581], [-77.41283172, 34.6800562 ],
[-77.46585666, 34.66965009]
])
dashboard = GrabCutPanel(select_region.get_tiff(), fg_data=[foreground], bg_data=[background], height=600, width=300)
pn.Row(dashboard.param, dashboard.view())
# We can trigger an update in the extracted contour by pressing the ``Update contour`` button. To speed up the calculation we can also downsample the image before applying the Grabcut algorithm. Once we are done we can view the result in a separate cell:
gv.tile_sources.ESRI * dashboard.result.options(width=500, height=600, color='red')
| examples/topics/GrabCut.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## 1. Example: [Plotting Learning Curves](http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html#sphx-glr-auto-examples-model-selection-plot-learning-curve-py)
#
# On the left side the learning curve of a naive Bayes classifier is shown for the digits dataset. Note that the training score and the cross-validation score are both not very good at the end. However, the shape of the curve can be found in more complex datasets very often: the training score is very high at the beginning and decreases and the cross-validation score is very low at the beginning and increases. On the right side we see the learning curve of an SVM with RBF kernel. We can see clearly that the training score is still around the maximum and the validation score could be increased with more training samples.
#
#
# +
# print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
# -
# ## 2. Exercise
# +
# In this exercise we'll examine a learner which has high variance, and tries to learn
# nonexistant patterns in the data.
# Use the learning curve function from sklearn.learning_curve to plot learning curves
# of both training and testing error.
# CODE YOU HAVE TO TYPE IN IS IN LINE 35
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# PLEASE NOTE:
# In sklearn 0.18, the import would be from sklearn.model_selection import learning_curve
from sklearn.learning_curve import learning_curve # sklearn version 0.17
from sklearn.cross_validation import KFold
from sklearn.metrics import explained_variance_score, make_scorer
import numpy as np
# Set the learning curve parameters; you'll need this for learning_curves
size = 1000
cv = KFold(size,shuffle=True)
score = make_scorer(explained_variance_score)
# Create a series of data that forces a learner to have high variance
X = np.round(np.reshape(np.random.normal(scale=5,size=2*size),(-1,2)),2)
y = np.array([[np.sin(x[0]+np.sin(x[1]))] for x in X])
def plot_curve():
# Defining our regression algorithm
reg = DecisionTreeRegressor()
# Fit our model using X and y
reg.fit(X,y)
print "Regressor score: {:.4f}".format(reg.score(X,y))
# TODO: Use learning_curve imported above to create learning curves for both the
# training data and testing data. You'll need reg, X, y, cv and score from above.
n_jobs = 4
train_sizes, train_scores, test_scores = learning_curve(reg, X, y, cv=cv, n_jobs=n_jobs, train_sizes=size)
# Taking the mean of the test and training scores
train_scores_mean = np.mean(train_scores,axis=1)
test_scores_mean = np.mean(test_scores,axis=1)
# Plotting the training curves and the testing curves using train_scores_mean and test_scores_mean
plt.plot(train_sizes ,train_scores_mean,'-o',color='b',label="train_scores_mean")
plt.plot(train_sizes,test_scores_mean ,'-o',color='r',label="test_scores_mean")
# Plot aesthetics
plt.ylim(-0.1, 1.1)
plt.ylabel("Curve Score")
plt.xlabel("Training Points")
plt.legend(bbox_to_anchor=(1.1, 1.1))
plt.show()
# -
| Lesson6-Causes-of-Error/learning_curve_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **[Pandas Home Page](https://www.kaggle.com/learn/pandas)**
#
# ---
#
# # Introduction
#
# The first step in most data analytics projects is reading the data file. In this exercise, you'll create Series and DataFrame objects, both by hand and by reading data files.
#
# Run the code cell below to load libraries you will need (including code to check your answers).
import pandas as pd
pd.set_option('max_rows', 5)
from learntools.core import binder; binder.bind(globals())
from learntools.pandas.creating_reading_and_writing import *
print("Setup complete.")
# # Exercises
# ## 1.
#
# In the cell below, create a DataFrame `fruits` that looks like this:
#
# 
# +
# Your code goes here. Create a dataframe matching the above diagram and assign it to the variable fruits.
fruits = pd.DataFrame({"Apples":[30], "Bananas":[21]})
# Check your answer
q1.check()
fruits
# +
#q1.hint()
#q1.solution()
# -
# ## 2.
#
# Create a dataframe `fruit_sales` that matches the diagram below:
#
# 
# +
# Your code goes here. Create a dataframe matching the above diagram and assign it to the variable fruit_sales.
fruit_sales = pd.DataFrame({"Apples":[35, 41], "Bananas":[21, 34]}, index=["2017 Sales", "2018 Sales"])
# Check your answer
q2.check()
fruit_sales
# +
#q2.hint()
#q2.solution()
# -
# ## 3.
#
# Create a variable `ingredients` with a Series that looks like:
#
# ```
# Flour 4 cups
# Milk 1 cup
# Eggs 2 large
# Spam 1 can
# Name: Dinner, dtype: object
# ```
# +
ingredients = pd.Series(["4 cups", "1 cup", "2 large", "1 can"], index=['Flour', 'Milk', 'Eggs', 'Spam'], name='Dinner')
# Check your answer
q3.check()
ingredients
# +
#q3.hint()
#q3.solution()
# -
# ## 4.
#
# Read the following csv dataset of wine reviews into a DataFrame called `reviews`:
#
# 
#
# The filepath to the csv file is `../input/wine-reviews/winemag-data_first150k.csv`. The first few lines look like:
#
# ```
# ,country,description,designation,points,price,province,region_1,region_2,variety,winery
# 0,US,"This tremendous 100% varietal wine[...]",Martha's Vineyard,96,235.0,California,Napa Valley,Napa,Cabernet Sauvignon,Heitz
# 1,Spain,"Ripe aromas of fig, blackberry and[...]",Carodorum Selección Especial Reserva,96,110.0,Northern Spain,Toro,,Tinta de Toro,Bodega Carmen Rodríguez
# ```
reviews = pd.read_csv("../input/wine-reviews/winemag-data_first150k.csv", index_col=0)
reviews.head()
# Check your answer
q4.check()
reviews
q4.hint()
#q4.solution()
# ## 5.
#
# Run the cell below to create and display a DataFrame called `animals`:
animals = pd.DataFrame({'Cows': [12, 20], 'Goats': [22, 19]}, index=['Year 1', 'Year 2'])
animals
# In the cell below, write code to save this DataFrame to disk as a csv file with the name `cows_and_goats.csv`.
# Your code goes here
animals.to_csv("cows_and_goats.csv")# Check your answer
q5.check()
q5.hint()
q5.solution()
# # Keep going
#
# Move on to learn about **[indexing, selecting and assigning](https://www.kaggle.com/residentmario/indexing-selecting-assigning)**.
# ---
# **[Pandas Home Page](https://www.kaggle.com/learn/pandas)**
#
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161299) to chat with other Learners.*
| Pandas/1.Creating, Reading and Writing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import pickle
df_english = pd.read_pickle('english_reviews.pkl')
df_english['reviews'] = df_english['reviews'].dropna()
df_english['reviews'] = df_english['reviews'].replace('\d+', 'NUM', regex=True)
df_english.head()
df_french = pd.read_pickle('french_reviews.pkl')
df_french['reviews'] = df_french['reviews'].replace('\d+', 'NUM', regex=True)
df_french.head()
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(stop_words = 'english')
bag_of_words = vectorizer.fit_transform(df_english['reviews'])
bag_of_words
dictionary = vectorizer.get_feature_names()
dictionary[:10]
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components = 10)
lsa = svd.fit_transform(bag_of_words)
topics_encoded_df = pd.DataFrame(lsa , columns = ['topic_1' , 'topic_2' , 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2'])
topics_encoded_df['reviews'] = df_english.reviews
topics_encoded_df
encoding_matrix = pd.DataFrame(svd.components_ , index = ['topic_1' , 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2', 'topic_2'] , columns = dictionary).T
encoding_matrix.tail(50)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer_tfidf = TfidfVectorizer(stop_words = 'english')
bag_of_words_tfidf = vectorizer_tfidf.fit_transform(df_english['reviews'])
dictionary_tfidf = vectorizer_tfidf.get_feature_names()
dictionary_tfidf[:10]
from sklearn.decomposition import TruncatedSVD
svd_tfidf = TruncatedSVD(n_components = 2)
lsa_tfidf = svd_tfidf.fit_transform(bag_of_words_tfidf)
topics_encoded_df_tfidf = pd.DataFrame(lsa_tfidf , columns = ['topic_1' , 'topic_2'])
topics_encoded_df_tfidf['reviews'] = df_english.reviews
topics_encoded_df_tfidf
encoding_matrix_tfidf = pd.DataFrame(svd_tfidf.components_ , index = ['topic_1' , 'topic_2'] , columns = dictionary_tfidf).T
encoding_matrix_tfidf.head(50)
| LSA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# cd ~/Inzynierka/pythonpic/
from pythonpic.visualization.animation import animation
from pythonpic.classes import load_simulation
from matplotlib import animation as mpl_anim
S = load_simulation("data_analysis/twostream/TS1/TS1.hdf5")
animation(S, True, frame_to_draw=10)
for w in mpl_anim.writers.list():
print(w, mpl_anim.writers.is_available(w), mpl_anim.writers[w])
| docs/ipynb/animation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: 31862cc71836a94b0e0781803a3648767fc4cb197cc35bade0ddf231ddce7d7c
# name: python3
# ---
# # Usage Overview
# The following is example usage for `harmony-py`.
#
# +
# Under this proposed interface harmony-py would have the concept of three (3) entities: a Request, a Client, and a Job:
# - The Request contains request parameters
# - The Client represents the ability to perform authenticated HTTP with the Harmony endpoint
# - The Job represents what Harmony is working on as well as retrieving the finished work results. It's referenced via 'job_id' and used by the Client.
# Individual parameters are validated when set.
# Not all keyword args need be supplied at once. Also, parameters may be replaced.
request = Request(
collection=Collection(id='C1940468263-POCLOUD'),
spatial={'ll': (40, -107),
'ur': (42, -105)}
temporal={'start': datetime.date(2020, 6, 1),
'stop': datetime.date(2020, 6, 30)},
format=Format.ZARR
)
# Authentication is stored in a client object for subsequent server interaction.
client = Client(Authentication())
# Validation may be performed prior to job processing; uses Harmony server-side checking.
client.validate(request)
# Starts job processing; async by default.
job_id = client.submit(request, verbose=True, async=True)
# Optional
client.status(job_id)
# Optional
client.cancel(job_id)
# Retrieve results in-region; returns a generator
urls = client.result_urls(job_id, region=Region.US_WEST_2)
# - or -
# Download files to a local directory
client.download(job_id, region=Region.US_WEST_2, directory='./research', overwrite=True)
# -
# # Further Examples
# ## Authentication
# +
# Authentication options:
# .) specify `username` and `password`
# .) specify `username` and receive password prompt
# .) specify .netrc
# .) read .netrc in default location
# .) read .env file
# .) read ENV vars
auth = Authenticate(username='myusername', password='<PASSWORD>')
# or
auth = Authenticate(username='myusername')
# or
auth = Authenticate(netrc='/usr/local/etc/some/path/.netrc')
# or
auth = Authenticate()
# -
# ## Determine Service Availability and Variables / Working with CMR
# +
# Notes from a previous meeting:
# extend CMR library to UMM-S; nice output
# when and where do we use Harmony's Capabilities documents?
# stick with Python data structures (dicts, lists, etc.)
# output of CMR should be acceptable input to Harmony python lib
# but also allow a user to submit strings as input for Harmony python lib
# understand UMM-Var response (coupled ot their metadata format)
# More Notes: We may want to contribute to the CMR python library in order to make feeding data into the Harmony python library easier than what's shown here.
import re
# Import CMR's python library
import cmr.search.collection as coll
import cmr.search.granule as gran
cmr_res = coll.search({'keyword':'MOD09*',
'archive_center': 'lp daac'})
# regex uses a negative look-around assertion
brief = [[r['meta']['concept-id'],
r['umm']['ShortName'],
r['meta']['native-id']] for r in cmr_res if re.search('^((?!mmt_collection_).)*$', r['meta']['native-id'])]
[print(b) for b in brief]
# ['C193529903-LPDAAC_ECS', 'MOD09GQ', 'MODIS/Terra Surface Reflectance Daily L2G Global 250m SIN Grid V006']
# ['C193529902-LPDAAC_ECS', 'MOD09GA', 'MODIS/Terra Surface Reflectance Daily L2G Global 1km and 500m SIN Grid V006']
# ['C193529899-LPDAAC_ECS', 'MOD09A1', 'MODIS/Terra Surface Reflectance 8-Day L3 Global 500m SIN Grid V006']
# ['C193529944-LPDAAC_ECS', 'MOD09Q1', 'MODIS/Terra Surface Reflectance 8-Day L3 Global 250m SIN Grid V006']
# ['C193529901-LPDAAC_ECS', 'MOD09CMG', 'MODIS/Terra Surface Reflectance Daily L3 Global 0.05Deg CMG V006']
####
# The CMR python library does not support variable browsing at this time.
####
# The output from CMR may be used as input to the Harmony python library
req = Request(
collection=cmr_res[0],
spatial=Bbox(lat=(40, 42), lon=(-107, -105)), # could accept bbox, shapely, geojson (polygon)
temporal=Temporal(start=datetime.date(2020, 6, 1), stop=datetime.date(2020, 6, 30)),
format=Format.ZARR
)
# -
# ## Async vs. Sync Request Submit()
# +
# Sync Request
res = Request.submit(req, auth, sync=True)
# Async request; default behavior
res = Request.submit(req, auth)
# Async usage: Poll Harmony status page and display progress updates.
res.update()
# Async usage: Cancel an ongoing job (Result)
res.cancel()
# -
# ## Retrieve Results in Cloud: In / Out of Region; Internet Retrieval
# +
# Notes:
# require the user to be explicit on style of retrieval: in-cloud/in-region vs. over internet
# if in same region, collect k/v pairs for parameters to boto3 constructor
# End-user _must_ specify region keyword argument.
import boto3
import requests
# Downloads files to a local directory. Easy method.
Response.download(res, region=Response.IN_REGION, directory='./research', overwrite=True)
# Downloads files to a local directory but skips files which already exist; Note: doesn't verify existing file size. Easy method.
Response.download(res, region=Response.IN_REGION, directory='./research', overwrite=False)
####
# In-Region; alternative to the above.
s3 = boto3.client('s3')
files = Response.files(res, region=Response.IN_REGION)
for f in files:
# The parameters for each output file are the inputs to this boto method.
s3.download_file(f.bucket_name, f.object_name, './research/' + f.filename)
# Out-of-Region; alternative to the above easy methods.
files = Response.files(res, region=Response.OUT_OF_REGION)
for f in files:
r = requests.get(f.url, allow_redirects=True)
open(f.filename, 'wb').write(r.content)
# -
# ## Error Notification and Handling
#
# Open for suggestions. We probably should raise exceptions as needed and output friendly messages via logging. The same logging will be used for async operation. Async is futures based so the GIL and cooperative multitasking will handle contention for logging output destinations. STDOUT will be the default logging target.
# Notes from 2021/2/8 meeting:
# - no Temporal class
# - use datetime objects but not strings; tuple is fine; allow None on either side
# - could also use a NamedTuple or @dataclass
# - possible python construct here
# - for Bbox
# - don't provide just numbers of the form "12:34" (OGC standard)
# - follow CMR python library usage? Or send them a PR to align things with harmony-py
# - remove req.action
# - Request.validate(req)
# - throw an exception that contains the individual errors
# - 'validate' should exist in Harmony and be a function
# - perform HEAD request (or something that allows for responses in body) and send to Harmony; use server-side validation
# - Request.submit() lift up
# - will need to get Harmony to have status page even on sync requests
# - Response object could yield a sequence of e.g. URLs; watch for coroutine usage on resetting sequence
# - download() would continue to download files in the background
# - instead of IN_REGION it should specify us-west-2 somehow
# - everything requires auth for server communication; a 'client' or something should have auth as a part of it. Does not need to hold up short-term development.
#
# - UMM-S and UMM-Var browsing via CMR python library
# - ... meeting ended here
#
#
| design/checkpoint_02/harmony_py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Author : <NAME>
# github link : https://github.com/amirshnll/Wine
# dataset link : http://archive.ics.uci.edu/ml/datasets/Wine
# email : <EMAIL>
# -
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
# +
col_names = ['class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', ' Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue','OD280/OD315 of diluted wines','Proline']
wine =pd.read_csv("wine.csv",header=None, names=col_names)
# -
wine.head()
inputs =wine.drop('class',axis='columns')
target = wine['class']
inputs
X_train, X_test, y_train, y_test = train_test_split(inputs, target, test_size=0.3)
# +
from sklearn.neighbors import KNeighborsClassifier
k=[1,3,5,7,9]
for i in range(len(k)):
knn = KNeighborsClassifier(n_neighbors=k[i])
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
from sklearn import metrics
print("Accuracy for k = ",k[i]," : ",metrics.accuracy_score(y_test, y_pred))
# -
| knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 価値反復
#
# 千葉工業大学 上田 隆一
#
# (c) 2017 <NAME>
#
# This software is released under the MIT License, see LICENSE.
#
# ## はじめに
#
# このコードは、価値反復の実装の例です。
#
#
# ## 問題
#
# 次のような簡単な問題を扱います。
#
# 3x3の碁盤の世界を準備します。ロボットはマス目のどこかに存在し、1歩で上下左右に1マス移動できます。右上をゴールにして、あるマス目をスタート地点とするとき、何歩でゴールに達するかを、全マス目に対して計算しましょう。ただし、この世界は壁に囲まれていて、壁を超えて移動しようとすると元のマスに戻されます。
#
#
# ## 準備
#
# 次のような変数を準備します。
#
# * costs: 各マスをスタートにしたときのゴールまでの歩数を記録する2次元のリスト
# * goal: ゴールの座標。ゴールは2つ以上でも酔いが、この問題では1個だけ
# * actions: ロボットが選択できる行動のリスト
#
# valuesの初期値はデタラメで大丈夫ですが、ゴールに相当するマス目のものだけ正解の0にセットしておきます。
#
# +
# %matplotlib inline
import numpy as np
size = 3 # 1辺が3マス
costs = [[1,2,3],[4,5,6],[7,8,0]] # 3x3のマス目の世界を作る。座標はこのリストの添え字で表す
goal = (2,2) # ゴールを1個だけ座標(2,2)に置く
actions = ["up","down","left","right"] # ロボットが選択できる行動のリスト
# 指定した座標のコストを返す補助の関数
def cost(pos):
p = [pos[0],pos[1]]
for i in [0,1]: # 壁にぶつかったらそのまま
if p[i] < 0: p[i] = 0
if p[i] >= size: p[i] = size - 1
return costs[p[0]][p[1]]
# -
# ## 描画
# +
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def draw(mark_pos, action): # mark_pos はマス目に目印の枠をつけるときに使います
fig, ax = plt.subplots()
mp = ax.pcolor(costs, cmap=plt.cm.YlOrRd,vmin=0,vmax=8)
ax.set_aspect(1)
ax.set_xticks(range(size), minor=False)
ax.set_yticks(range(size), minor=False)
for x in range(len(costs)):
for y in range(len(costs[0])):
plt.text(x+0.5,y+0.5,costs[x][y],ha = 'center', va = 'center', size=20)
plt.text(goal[0]+0.75,goal[1]+0.75,"G",ha = 'center', va = 'center', size=20)
if mark_pos != None:
plt.text(mark_pos[0]+0.5,mark_pos[1]+0.25,action,ha = 'center', va = 'center', size=20)
plt.show()
fig.clear()
draw(None,None)
# -
# ## 問題を価値反復で解く
#
#
# ### 行動価値の計算式
#
# 歩行の即時コスト1+遷移先のコストを返します。
def action_value(pos, action, goal):
if pos == goal: return cost(pos) # ゴールの価値は不変
if action == "up": return 1+cost((pos[0],pos[1]+1))
elif action == "down": return 1+cost((pos[0],pos[1]-1))
elif action == "left": return 1+cost((pos[0]-1,pos[1]))
elif action == "right": return 1+cost((pos[0]+1,pos[1]))
# ### スイープ
#
# 各マス目について、各行動の行動価値を求めて最もコストが低いものを選び、そのときのコストでcostsリストを更新していきます。
# +
def sweep():
changed = False
for x in range(size):
for y in range(size):
min_cost = 100
min_action = None
for a in actions:
c = action_value((x,y), a, goal)
if c < min_cost:
min_cost = c
min_action = a
if costs[x][y] != min_cost:
costs[x][y] = min_cost
draw((x,y),min_action)
changed = True
return changed
draw(None,None)
changed = True
n = 1
while changed:
print("sweep",n)
changed = sweep()
n = n + 1
draw(None,None)
| value_iteration/simple_value_iteration_no_noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculate the values to be used for normalization the entire training data
#
# We need to looad it all into memory unfortunately
#
# We calculate 1 mean per temperature and save it
# +
# OPTIONAL: Load the "autoreload" extension so that code can change
# %load_ext autoreload
# OPTIONAL: always reload modules so that as you change code in src, it gets loaded
# %autoreload 2
# +
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from joblib import dump, load
from src.data import make_dataset
from src.data import read_dataset
from src.data import util
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# -
# Set seed for reprodubility
np.random.seed(42)
# +
from src.data.read_dataset import read_temperature
def make_temperatures(conversion, testing=False):
num_cols = [x for x in range(11025)]
save_cols = num_cols + ["label"]
for i in range(2, 8):
temperature = f"t0{i}"
df = read_temperature(temperature, conversion)
train_idx = df["training"] == 1 # get train data
train_data = df.loc[train_idx]
test_data = df.loc[~train_idx]
# Create validation
train_data, val_data = train_test_split(train_data, test_size=0.2)
# Train scaler
scaler = StandardScaler()
scaler.fit(train_data[num_cols])
dump(scaler, f"../data/interim/scaler_{conversion}_{temperature}.pkl")
# Save the data as compressed numpy arrays
np.savez_compressed(f"../data/interim/all_wavs_{conversion}_{temperature}",
train=train_data[save_cols].astype(int),
val=val_data[save_cols].astype(int),
test=test_data[save_cols].astype(int))
# -
make_temperatures("repeat")
make_temperatures("zero")
| notebooks_tcc/0.2-BrunoGomesCoelho-Prepare-data-temperature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# language: python
# name: python37664bitbaseconda6d6ca68311e247d3a1f06ea5c841201a
# ---
# # Pandas
# Bu doküman pandas paketi ve veri madenciliği uygulamaları hakkında bilgi içerir. pandas kütüphanesi bilgisayarınızda kurulu değilse, pip install pandas komutu ile yükleyebilirsiniz.
# ```python
# pip install pandas
# ```
# ya da
# ```
# conda install pandas
# ```
#
# Pandas Excel gibi görülebilir. Tablo şeklinde verilerle ilgili analiz ve işlemler yapabiliriz. Pandas içerisinde DataFrame nesnesi tanımlanır.
#
# Bu derste verilerin çağrılması, gösterilmesi, dilimlenmesi gibi işlemler gerçekleştirilecek. Ayrıca eksik verilerin doldurulması gibi işlemler yapılacaktır.
#
# Burada kullanılacak olan verileri bu [adresten](https://media.geeksforgeeks.org/wp-content/uploads/employees.csv) indirebilirsiniz.
# +
# pandas kütüphanesinin çağrılması
import pandas as pd
import numpy as np
# Verilerin pandas objesi olarak alınması
df = pd.read_csv('employees.csv')
# -
# Verisetine ait ilk 5 satırın gösterilmesi
df.head()
# ## DataFrame olarak okunması
# DataFrame ile herhangi bir özelliğe şu şekilde ulaşabiliriz:
# DataFrame.özellik_adi
print (df.Gender)
# Salary özelliğinin en küçük, en büyük ve ortalamalarının gösterilmesi
print (df.Salary.min(), df.Salary.max(), df.Salary.mean())
# ## Eksik Verilerin Tanımlanması
#
# ### None: Python'daki boş olan veriler için kullanılır.
#
#
degerler1 = np.array([1,2, None, 5, 2])
type(degerler1[2]) # None kendine özgü bir değişken tipidir.
degerler1.sum() # Aritmetik işlem yapılamaz.
# ### NaN(Not a Number)
degerler2 = np.array([1, np.nan, 2, 5])
type(degerler2[1]) # Numpy float değişken tipine sahip
degerler2.sum() # Aritmetik işlemleri bozar
degerler2[1]+1 # Herhangi sayı ile toplanması yine NaN değerini dönderir.
# Bu nedenle NaN ile çalışılırken dikkat edilmesi gerekir.
# NaN değerlerinin dışındaki değerlerin aritmetik işlemlerini yapabilmek için
np.nansum(degerler2), np.nanmin(degerler2), np.nanmax(degerler2)
# ## Pandas ile Eksik Veri İşlemleri
# 1. `isnull()`: Eksik(`NaN`) veriler için `True` değerini dönderir.
# 2. `notnull()`: Eksik(`NaN`) veriler için `False` değerini dönderir.
# 3. `dropna()`: Eksik verileri filtreleyip dönderir.
# 4. `fillna()`: Eksik verileri doldurup dönderir.
#
data = pd.Series([1, np.nan, 'merhaba', None])
data.isnull()
data.notnull()
data[data.notnull()]
data.dropna() # data değişmedi
data
data.dropna(inplace=True) # data serisini değiştirir.
df = pd.DataFrame([[1, np.nan, 2],[2, 3, 5],[np.nan, 4, 6]]) # 2 boyutlu dizide NaN değerlerini kaldırmak istediğimizde
df.dropna() # NaN değeri içeren tüm satırları kaldırır.
df.dropna(axis='columns') # Sütunda içerenleri kaldırır.
df
# Sadece tamamı NaN olan sütunları kaldırmak istersek
df[3]=np.nan
df # Sadece 3. sütunu kaldıralım.
df.dropna(axis='columns', how='all')
df.dropna(axis='rows', thresh=3) # Thresh kalacak satırlarda kaç tane NaN olmayan sayısı olacağını belirtir.
# ### NaN verilerinin doldurulması
data = pd.Series([1, np.nan, 2, None, 3], index=list('abcde'))
data
data.fillna(0) # Eksik değerleri 0 ile doldurmak istersek
data.fillna(method='ffill') # Bir önceki değer ile doldurmak istersek
data.fillna(method='bfill') # Bir sonraki değer ile doldurmak istersek
df
df.fillna(method='ffill', axis=1) # 3. satırdaki NaN değeri kalır çünkü dolduracağı önceki değer yok
# ### Groupby
df = pd.DataFrame({'key': ['<KEY> 'A', 'B', 'C'], 'data': range(6)}, columns=['key', 'data'])
df
df.groupby('key').sum()
df.groupby('key').apply(lambda x:x['data']+1)
df.groupby('key').sum()
# ### Pivot Tablolar (Çapraz Tablolar)
dogumlar = pd.read_csv("births.csv")
dogumlar.head()
dogumlar.groupby('gender')['births'].sum() # Her sene doğan kadın ve erkek sayısı ayrı ayrı
dogumlar.groupby(['gender','year'])['births'].sum()
# +
# Veya pivot_table fonksiyonu ile
dogumlar.pivot_table('births', index='gender', columns='month') # her ay kaç kişi doğmuş?
# -
dogumlar['onyil'] = 10 * (dogumlar['year'] // 10)
dogumlar
dogumlar.pivot_table('births', index='onyil', columns='gender', aggfunc='sum')
# ## Pandas dokümantasyonları
#
# 1. [Pandas sayfası](https://pandas.pydata.org/)
# 2. [Pandas soru cevap](https://stackoverflow.com/questions/tagged/pandas)
# 3. [Pandas Video](https://pyvideo.org/tag/pandas/)
| uygulama_dersleri/pandas_egitim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### duffing oscillator
import matplotlib
import numpy as np
from numpy import zeros, linspace, pi, cos, array
from matplotlib import pyplot as plt
t0=0
tf=30*pi
omega=1.2
beta=1
delta=0.3
gamma=0.38
alpha=1
n=10000 #iteration
h=(tf-t0)/(n-1) #stepsize
print('the value of h is',h)
u0=0 #initial displacement
t=linspace(t0,tf,n)
v=zeros([n])
u=zeros([n])
u[0]=u0
v[0]=0 #initial velocity
##### DEFINING FUNCTIONS
def dudt(t,u,v): #### u' = v
return(v)
def funt(t,u,v): #### v' = -delta*v+alpha*u-beta*u**3+gamma*cos(omega*t)
return (-delta*v+alpha*u-beta*u**3+gamma*cos(omega*t))
###### RK4 ALGORITHM USING FOR LOOP
for i in range(1,n):
k1=h*dudt(t[i-1],u[i-1],v[i-1])
l1=h*funt(t[i-1],u[i-1],v[i-1])
k2=h*dudt(t[i-1]+(0.5*h),u[i-1]+(k1*0.5),v[i-1]+(l1*0.5))
l2=h*funt(t[i-1]+(0.5*h),u[i-1]+(k1*0.5),v[i-1]+(l1*0.5))
k3=h*dudt(t[i-1]+(0.5*h),u[i-1]+(k2*0.5),v[i-1]+(l2*0.5))
l3=h*funt(t[i-1]+(0.5*h),u[i-1]+(k2*0.5),v[i-1]+(l2*0.5))
k4=h*dudt(t[i-1]+h,u[i-1]+(k3),v[i-1]+(l3))
l4=h*funt(t[i-1]+h,u[i-1]+(k3),v[i-1]+(l3))
u[i]=u[i-1]+(1/6)*(k1+(2*k2)+(2*k3)+k4)
v[i]=v[i-1]+(1/6)*(l1+(2*l2)+(2*l3)+l4)
### PLOT
plt.plot(t,u,'-r')
plt.xlabel('time(t)')
plt.ylabel('displacement(u)')
plt.show()
print('The value of GAMMA =',gamma)
fig = plt.figure()
plt.title("DUFFING_phaseplot_gamma=0.38")
plt.plot(u[5000:10000],v[5000:10000],'-g')
plt.xlabel('displacement(u)')
plt.ylabel('velocity(v)')
plt.show()
fig.savefig('duff_plot_gamma=0.38.png')
fig.savefig('duff_plot_gamma=0.38.eps')
#### SAMPLING
print("SAMPLING")
fig = plt.figure()
plt.plot(u[5000:10000:300],v[5000:10000:300],'.g')
data300=u[5000:10000:300],v[5000:10000:300]
np.savetxt('RAW_t=300_data(0.38).dat', data300)
plt.title("SAMPLING_t=300,gamma=0.38")
plt.xlabel('displacement(u)')
plt.ylabel('velocity(v)')
plt.show()
fig.savefig('duff_sampling_gamma=0.38_t=300.png')
fig.savefig('duff_sampling_gamma=0.38_t=300.eps')
# +
### PH for t=300------gamma=0.38
import matplotlib
import numpy as np
from ripser import ripser
from persim import plot_diagrams
from matplotlib import pyplot as plt
data = np.loadtxt('RAW_t=300_data(0.38).dat')
#print(data)
fig = plt.figure()
data1=data.transpose()
#print(data1)
plt.title("PERSISTENCE DIAGRAM")
diagrams = ripser(data1, maxdim=2)['dgms']
#fig.savefig('persistence diagram_gamma=0.38,t=300.png')
#fig.savefig('persistence diagram_gamma=0.38,t=300.eps')
#print(diagrams)
np.savetxt('ripser_plot_gamma=0.38,t=300.dat', diagrams, fmt='%s')
plot_diagrams(diagrams, show=True)
plot_diagrams(diagrams, lifetime=True)
# -
| Stuff From Joel&Rishab/Duff(28-5-20)/gamma=0.38/t=300/duff_gamma=0.38_t=300.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="23R0Z9RojXYW"
# # Ungraded Lab: Feature Engineering Pipeline
#
#
# In this lab, you will continue exploring [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started). This time, it will be in the context of a machine learning (ML) pipeline. In production-grade projects, you want to streamline tasks so you can more easily improve your model or find issues that may arise. [Tensorflow Extended (TFX)](https://www.tensorflow.org/tfx) provides components that work together to execute the most common steps in a machine learning project. If you want to dig deeper into the motivations behind TFX and the need for machine learning pipelines, you can read about it in [this paper](https://storage.googleapis.com/pub-tools-public-publication-data/pdf/b500d77bc4f518a1165c0ab43c8fac5d2948bc14.pdf) and in this [blog post](https://blog.tensorflow.org/2020/09/brief-history-of-tensorflow-extended-tfx.html).
#
# You will build end-to-end pipelines in future courses but for this one, you will only build up to the feature engineering part. Specifically, you will:
#
# * ingest data from a base directory with `ExampleGen`
# * compute the statistics of the training data with `StatisticsGen`
# * infer a schema with `SchemaGen`
# * detect anomalies in the evaluation data with `ExampleValidator`
# * preprocess the data into features suitable for model training with `Transform`
#
# If several steps mentioned above sound familiar, it's because the TFX components that deal with data validation and analysis (i.e. `StatisticsGen`, `SchemaGen`, `ExampleValidator`) uses [Tensorflow Data Validation (TFDV)](https://www.tensorflow.org/tfx/data_validation/get_started) under the hood. You're already familiar with this library from the exercises in Week 1 and for this week, you'll see how it fits within an ML pipeline.
#
# The components you will use are the orange boxes highlighted in the figure below:
#
# <img src='img/feature_eng_pipeline.png'>
#
#
# + [markdown] id="2GivNBNYjb3b"
# ## Setup
# + [markdown] id="N-ePgV0Lj68Q"
# ### Import packages
#
# Let's begin by importing the required packages and modules. In case you want to replicate this in your local workstation, we used *Tensorflow v2.6* and *TFX v1.3.0*.
# + id="YIqpWK9efviJ"
import tensorflow as tf
from tfx import v1 as tfx
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from google.protobuf.json_format import MessageToDict
import os
import pprint
pp = pprint.PrettyPrinter()
# -
# ### Define paths
#
# You will define a few global variables to indicate paths in the local workspace.
# +
# location of the pipeline metadata store
_pipeline_root = './pipeline/'
# directory of the raw data files
_data_root = './data/census_data'
# path to the raw training data
_data_filepath = os.path.join(_data_root, 'adult.data')
# + [markdown] id="n2cMMAbSkGfX"
# ### Preview the dataset
#
# You will again be using the [Census Income dataset](https://archive.ics.uci.edu/ml/datasets/Adult) from the Week 1 ungraded lab so you can compare outputs when just using stand-alone TFDV and when using it under TFX. Just to remind, the data can be used to predict if an individual earns more than or less than 50k US Dollars annually. Here is the description of the features again:
#
#
# * **age**: continuous.
# * **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
# * **fnlwgt**: continuous.
# * **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# * **education-num**: continuous.
# * **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
# * **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
# * **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# * **race**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
# * **sex**: Female, Male.
# * **capital-gain**: continuous.
# * **capital-loss**: continuous.
# * **hours-per-week**: continuous.
# * **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
# + id="c5YPeLPFOXaD"
# preview the first few rows of the CSV file
# !head {_data_filepath}
# + [markdown] id="8ONIE_hdkPS4"
# ### Create the Interactive Context
#
# When pushing to production, you want to automate the pipeline execution using orchestrators such as [Apache Beam](https://beam.apache.org/) and [Kubeflow](https://www.kubeflow.org/). You will not be doing that just yet and will instead execute the pipeline from this notebook. When experimenting in a notebook environment, you will be *manually* executing the pipeline components (i.e. you are the orchestrator). For that, TFX provides the [Interactive Context](https://github.com/tensorflow/tfx/blob/master/tfx/orchestration/experimental/interactive/interactive_context.py) so you can step through each component and inspect its outputs.
# -
# You will initialize the `InteractiveContext` below. This will create a database in the `_pipeline_root` directory which the different components will use to save or get the state of the component executions. You will learn more about this in Week 3 when we discuss ML Metadata. For now, you can think of it as the data store that makes it possible for the different pipeline components to work together.
#
# *Note: You can configure the database to connect to but for this exercise, we will just use the default which is a newly created local sqlite file.* ***You will see the warning after running the cell below and you can safely ignore it.***
# + id="0Rh6K5sUf9dd"
# Initialize the InteractiveContext with a local sqlite file.
# If you leave `_pipeline_root` blank, then the db will be created in a temporary directory.
# You can safely ignore the warning about the missing config file.
context = InteractiveContext(pipeline_root=_pipeline_root)
# + [markdown] id="HdQWxfsVkzdJ"
# ## Run TFX components interactively
#
# With that, you can now run the pipeline interactively. You will see how to do that as you go through the different components below.
# + [markdown] id="L9fwt9gQk3BR"
# ### ExampleGen
#
# You will start the pipeline with the [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component. This will:
#
# * split the data into training and evaluation sets (by default: 2/3 train, 1/3 eval).
# * convert each data row into `tf.train.Example` format. This [protocol buffer](https://developers.google.com/protocol-buffers) is designed for Tensorflow operations and is used by the TFX components.
# * compress and save the data collection under the `_pipeline_root` directory for other components to access. These examples are stored in `TFRecord` format. This optimizes read and write operations within Tensorflow especially if you have a large collection of data.
#
# Its constructor takes the path to your data source/directory. In our case, this is the `_data_root` path. The component supports several data sources such as CSV, tf.Record, and BigQuery. Since our data is a CSV file, we will use [CsvExampleGen](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/CsvExampleGen) to ingest the data.
#
# Run the cell below to instantiate `CsvExampleGen`.
# + id="PyXjuMt8f-9u"
# Instantiate ExampleGen with the input CSV dataset
example_gen = tfx.components.CsvExampleGen(input_base=_data_root)
# -
# You can execute the component by calling the `run()` method of the `InteractiveContext`.
# Execute the component
context.run(example_gen)
# + [markdown] id="OqCoZh7KPUm9"
# You will notice that an output cell showing the execution results is automatically shown. This metadata is recorded into the database created earlier. This allows you to keep track of your project runs. For example, if you run it again, you will notice the `.execution_id` incrementing.
#
# The output of the components are called *artifacts* and you can see an example by navigating through `.component.outputs > ['examples'] > Channel > ._artifacts > [0]` above. It shows information such as where the converted data is stored (`.uri`) and the splits generated (`.split_names`).
#
# You can also examine the output artifacts programmatically with the code below.
# + id="880KkTAkPeUg"
# get the artifact object
artifact = example_gen.outputs['examples'].get()[0]
# print split names and uri
print(f'split names: {artifact.split_names}')
print(f'artifact uri: {artifact.uri}')
# -
# If you're wondering , the `number` in `./pipeline/CsvExampleGen/examples/{number}` is the execution id associated with that dataset. If you restart the kernel of this workspace and re-run up to this cell, you will notice a new folder with a different id name created. This shows that TFX is keeping versions of your data so you can roll back if you want to investigate a particular execution.
# + [markdown] id="J6vcbW_wPqvl"
# As mentioned, the ingested data is stored in the directory shown in the `uri` field. It is also compressed using `gzip` and you can verify by running the cell below.
# +
# Get the URI of the output artifact representing the training examples
train_uri = os.path.join(artifact.uri, 'Split-train')
# See the contents of the `train` folder
# !ls {train_uri}
# -
# In a notebook environment, it may be useful to examine a few examples of the data especially if you're still experimenting. Since the data collection is saved in [TFRecord format](https://www.tensorflow.org/tutorials/load_data/tfrecord), you will need to use methods that work with that data type. You will need to unpack the individual examples from the `TFRecord` file and format it for printing. Let's do that in the following cells:
# + id="H4XIXjiCPwzQ"
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# -
# Define a helper function to get individual examples
def get_records(dataset, num_records):
'''Extracts records from the given dataset.
Args:
dataset (TFRecordDataset): dataset saved by ExampleGen
num_records (int): number of records to preview
'''
# initialize an empty list
records = []
# Use the `take()` method to specify how many records to get
for tfrecord in dataset.take(num_records):
# Get the numpy property of the tensor
serialized_example = tfrecord.numpy()
# Initialize a `tf.train.Example()` to read the serialized data
example = tf.train.Example()
# Read the example data (output is a protocol buffer message)
example.ParseFromString(serialized_example)
# convert the protocol bufffer message to a Python dictionary
example_dict = (MessageToDict(example))
# append to the records list
records.append(example_dict)
return records
# +
# Get 3 records from the dataset
sample_records = get_records(dataset, 3)
# Print the output
pp.pprint(sample_records)
# + [markdown] id="2gluYjccf-IP"
# Now that `ExampleGen` has finished ingesting the data, the next step is data analysis.
# + [markdown] id="csM6BFhtk5Aa"
# ### StatisticsGen
# The [StatisticsGen](https://www.tensorflow.org/tfx/guide/statsgen) component computes statistics over your dataset for data analysis, as well as for use in downstream components (i.e. next steps in the pipeline). As mentioned earlier, this component uses TFDV under the hood so its output will be familiar to you.
#
# `StatisticsGen` takes as input the dataset we just ingested using `CsvExampleGen`.
# + id="MAscCCYWgA-9"
# Instantiate StatisticsGen with the ExampleGen ingested dataset
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs['examples'])
# Execute the component
context.run(statistics_gen)
# -
# You can display the statistics with the `show()` method.
#
# *Note: You can safely ignore the warning shown when running the cell below.*
# Show the output statistics
context.show(statistics_gen.outputs['statistics'])
# + [markdown] id="HLKLTO9Nk60p"
# ### SchemaGen
#
# The [SchemaGen](https://www.tensorflow.org/tfx/guide/schemagen) component also uses TFDV to generate a schema based on your data statistics. As you've learned previously, a schema defines the expected bounds, types, and properties of the features in your dataset.
#
# `SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default.
# + id="ygQvZ6hsiQ_J"
# Instantiate SchemaGen with the StatisticsGen ingested dataset
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'],
)
# Run the component
context.run(schema_gen)
# + [markdown] id="zi6TxTUKXM6b"
# You can then visualize the generated schema as a table.
# + id="Ec9vqDXpXeMb"
# Visualize the schema
context.show(schema_gen.outputs['schema'])
# -
# Let's now move to the next step in the pipeline and see if there are any anomalies in the data.
# + [markdown] id="V1qcUuO9k9f8"
# ### ExampleValidator
#
# The [ExampleValidator](https://www.tensorflow.org/tfx/guide/exampleval) component detects anomalies in your data based on the generated schema from the previous step. Like the previous two components, it also uses TFDV under the hood.
#
# `ExampleValidator` will take as input the statistics from `StatisticsGen` and the schema from `SchemaGen`. By default, it compares the statistics from the evaluation split to the schema from the training split.
# + id="XRlRUuGgiXks"
# Instantiate ExampleValidator with the StatisticsGen and SchemaGen ingested data
example_validator = tfx.components.ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Run the component.
context.run(example_validator)
# + [markdown] id="855mrHgJcoer"
# As with the previous component, you can also visualize the anomalies as a table.
# + id="TDyAAozQcrk3"
# Visualize the results
context.show(example_validator.outputs['anomalies'])
# -
# With no anomalies detected, you can proceed to the next step in the pipeline.
# + [markdown] id="JPViEz5RlA36"
# ### Transform
# The [Transform](https://www.tensorflow.org/tfx/guide/transform) component performs feature engineering for both training and serving datasets. It uses the [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/get_started) library introduced in the first ungraded lab of this week.
#
# `Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module containing the preprocessing function.
#
# In this section, you will work on an example of a user-defined Transform code. The pipeline needs to load this as a module so you need to use the magic command `%% writefile` to save the file to disk. Let's first define a few constants that group the data's attributes according to the transforms we will perform later. This file will also be saved locally.
# + id="PuNSiUKb4YJf"
# Set the constants module filename
_census_constants_module_file = 'census_constants.py'
# + id="HPjhXuIF4YJh"
# %%writefile {_census_constants_module_file}
# Features with string data types that will be converted to indices
CATEGORICAL_FEATURE_KEYS = [
'education', 'marital-status', 'occupation', 'race', 'relationship', 'workclass', 'sex', 'native-country'
]
# Numerical features that are marked as continuous
NUMERIC_FEATURE_KEYS = ['fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
# Feature that can be grouped into buckets
BUCKET_FEATURE_KEYS = ['age']
# Number of buckets used by tf.transform for encoding each bucket feature.
FEATURE_BUCKET_COUNT = {'age': 4}
# Feature that the model will predict
LABEL_KEY = 'label'
# Utility function for renaming the feature
def transformed_name(key):
return key + '_xf'
# + [markdown] id="Duj2Ax5z4YJl"
# Next, you will work on the module that contains `preprocessing_fn()`. As you've seen in the previous lab, this function defines how you will transform the raw data into features that your model can train on (i.e. the next step in the pipeline). You will use the [tft module functions](https://www.tensorflow.org/tfx/transform/api_docs/python/tft) to make these transformations.
#
# *Note: After completing the entire notebook, we encourage you to go back to this section and try different tft functions aside from the ones already provided below. You can also modify the grouping of the feature keys in the constants file if you want. For example, you may want to scale some features to `[0, 1]` while others are scaled to the z-score. This will be good practice for this week's assignment.*
# + id="4AJ9hBs94YJm"
# Set the transform module filename
_census_transform_module_file = 'census_transform.py'
# + id="MYmxxx9A4YJn"
# %%writefile {_census_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
import census_constants
# Unpack the contents of the constants module
_NUMERIC_FEATURE_KEYS = census_constants.NUMERIC_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = census_constants.CATEGORICAL_FEATURE_KEYS
_BUCKET_FEATURE_KEYS = census_constants.BUCKET_FEATURE_KEYS
_FEATURE_BUCKET_COUNT = census_constants.FEATURE_BUCKET_COUNT
_LABEL_KEY = census_constants.LABEL_KEY
_transformed_name = census_constants.transformed_name
# Define the transformations
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# Scale these features to the range [0,1]
for key in _NUMERIC_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.scale_to_0_1(
inputs[key])
# Bucketize these features
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
inputs[key], _FEATURE_BUCKET_COUNT[key])
# Convert strings to indices in a vocabulary
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(inputs[key])
# Convert the label strings to an index
outputs[_transformed_name(_LABEL_KEY)] = tft.compute_and_apply_vocabulary(inputs[_LABEL_KEY])
return outputs
# + [markdown] id="wgbmZr3sgbWW"
# You can now pass the training data, schema, and transform module to the `Transform` component. You can ignore the warning messages generated by Apache Beam regarding type hints.
# + id="jHfhth_GiZI9"
# Ignore TF warning messages
tf.get_logger().setLevel('ERROR')
# Instantiate the Transform component
transform = tfx.components.Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_census_transform_module_file))
# Run the component
context.run(transform)
# + [markdown] id="fwAwb4rARRQ2"
# Let's examine the output artifacts of `Transform` (i.e. `.component.outputs` from the output cell above). This component produces several outputs:
#
# * `transform_graph` is the graph that can perform the preprocessing operations. This graph will be included during training and serving to ensure consistent transformations of incoming data.
# * `transformed_examples` points to the preprocessed training and evaluation data.
# * `updated_analyzer_cache` are stored calculations from previous runs.
# + [markdown] id="vyFkBd9AR1sy"
# Take a peek at the `transform_graph` artifact. It points to a directory containing three subdirectories.
# + id="5tRw4DneR3i7"
# Get the uri of the transform graph
transform_graph_uri = transform.outputs['transform_graph'].get()[0].uri
# List the subdirectories under the uri
os.listdir(transform_graph_uri)
# + [markdown] id="4fqV54CIR6Pu"
# * The `metadata` subdirectory contains the schema of the original data.
# * The `transformed_metadata` subdirectory contains the schema of the preprocessed data.
# * The `transform_fn` subdirectory contains the actual preprocessing graph.
#
# You can also take a look at the first three transformed examples using the helper function defined earlier.
# + id="pwbW2zPKR_S4"
# Get the URI of the output artifact representing the transformed examples
train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
transformed_dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# + id="mSDZ2rJC7NQW"
# Get 3 records from the dataset
sample_records_xf = get_records(transformed_dataset, 3)
# Print the output
pp.pprint(sample_records_xf)
# -
# **Congratulations!** You have now executed all the components in our pipeline. You will get hands-on practice as well with training and model evaluation in future courses but for now, we encourage you to try exploring the different components we just discussed. As mentioned earlier, a useful exercise for the upcoming assignment is to be familiar with using different `tft` functions in your transform module. Try exploring the [documentation](https://www.tensorflow.org/tfx/transform/api_docs/python/tft) and see what other functions you can use in the transform module. You can also do the optional challenge below for more practice.
#
# **Optional Challenge:** Using this notebook as reference, load the [Seoul Bike Sharing Demand Dataset](https://archive.ics.uci.edu/ml/datasets/Seoul+Bike+Sharing+Demand) and run it through the five stages of the pipeline discussed here. You will first go through the data ingestion and validation components then finally, you will study the dataset's features and transform it to a format that a model can consume. Once you're done, you can visit this [Discourse topic](https://community.deeplearning.ai/t/bike-sharing-dataset-in-c2-w2-lab-2-feature-engineering-pipeline/38979) where one of your mentors, Fabio, has shared his solution. Feel free to discuss and share your solution as well!
| c2-machine-learning-data-lifecycle-in-production/week2/C2_W2_Lab_2_Feature_Engineering_Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from theano.sandbox import cuda
cuda.use('gpu0')
# %matplotlib inline
from imp import reload
import utils; reload(utils)
from utils import *
from __future__ import division, print_function
# ## Setup
# +
path = "/home/anand/store/datasets/fast.ai.data/dogscats/"
model_path = path + 'models/'
if not os.path.exists(model_path): os.mkdir(model_path)
batch_size=64
# -
batches = get_batches(path+'train', shuffle=False, batch_size=batch_size)
val_batches = get_batches(path+'valid', shuffle=False, batch_size=batch_size)
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
# In this notebook we're going to create an ensemble of models and use their average as our predictions. For each ensemble, we're going to follow our usual fine-tuning steps:
#
# 1) Create a model that retrains just the last layer
# 2) Add this to a model containing all VGG layers except the last layer
# 3) Fine-tune just the dense layers of this model (pre-computing the convolutional layers)
# 4) Add data augmentation, fine-tuning the dense layers without pre-computation.
#
# So first, we need to create our VGG model and pre-compute the output of the conv layers:
model = Vgg16().model
conv_layers,fc_layers = split_at(model, Convolution2D)
conv_model = Sequential(conv_layers)
val_features = conv_model.predict_generator(val_batches, val_batches.nb_sample)
trn_features = conv_model.predict_generator(batches, batches.nb_sample)
save_array(model_path + 'train_convlayer_features.bc', trn_features)
save_array(model_path + 'valid_convlayer_features.bc', val_features)
# In the future we can just load these precomputed features:
trn_features = load_array(model_path+'train_convlayer_features.bc')
val_features = load_array(model_path+'valid_convlayer_features.bc')
# We can also save some time by pre-computing the training and validation arrays with the image decoding and resizing already done:
trn = get_data(path+'train')
val = get_data(path+'valid')
save_array(model_path+'train_data.bc', trn)
save_array(model_path+'valid_data.bc', val)
# In the future we can just load these resized images:
trn = load_array(model_path+'train_data.bc')
val = load_array(model_path+'valid_data.bc')
# Finally, we can precompute the output of all but the last dropout and dense layers, for creating the first stage of the model:
model.pop()
model.pop()
ll_val_feat = model.predict_generator(val_batches, val_batches.nb_sample)
ll_feat = model.predict_generator(batches, batches.nb_sample)
save_array(model_path + 'train_ll_feat.bc', ll_feat)
save_array(model_path + 'valid_ll_feat.bc', ll_val_feat)
ll_feat = load_array(model_path+ 'train_ll_feat.bc')
ll_val_feat = load_array(model_path + 'valid_ll_feat.bc')
# ...and let's also grab the test data, for when we need to submit:
test = get_data(path+'test')
save_array(model_path+'test_data.bc', test)
test = load_array(model_path+'test_data.bc')
# ## Last layer
# The functions automate creating a model that trains the last layer from scratch, and then adds those new layers on to the main model.
def get_ll_layers():
return [
BatchNormalization(input_shape=(4096,)),
Dropout(0.5),
Dense(2, activation='softmax')
]
def train_last_layer(i):
ll_layers = get_ll_layers()
ll_model = Sequential(ll_layers)
ll_model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
ll_model.optimizer.lr=1e-5
ll_model.fit(ll_feat, trn_labels, validation_data=(ll_val_feat, val_labels), nb_epoch=12)
ll_model.optimizer.lr=1e-7
ll_model.fit(ll_feat, trn_labels, validation_data=(ll_val_feat, val_labels), nb_epoch=1)
ll_model.save_weights(model_path+'ll_bn' + i + '.h5')
vgg = Vgg16()
model = vgg.model
model.pop(); model.pop(); model.pop()
for layer in model.layers: layer.trainable=False
model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
ll_layers = get_ll_layers()
for layer in ll_layers: model.add(layer)
for l1,l2 in zip(ll_model.layers, model.layers[-3:]):
l2.set_weights(l1.get_weights())
model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
model.save_weights(model_path+'bn' + i + '.h5')
return model
# ## Dense model
def get_conv_model(model):
layers = model.layers
last_conv_idx = [index for index,layer in enumerate(layers)
if type(layer) is Convolution2D][-1]
conv_layers = layers[:last_conv_idx+1]
conv_model = Sequential(conv_layers)
fc_layers = layers[last_conv_idx+1:]
return conv_model, fc_layers, last_conv_idx
def get_fc_layers(p, in_shape):
return [
MaxPooling2D(input_shape=in_shape),
Flatten(),
Dense(4096, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(4096, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(2, activation='softmax')
]
def train_dense_layers(i, model):
conv_model, fc_layers, last_conv_idx = get_conv_model(model)
conv_shape = conv_model.output_shape[1:]
fc_model = Sequential(get_fc_layers(0.5, conv_shape))
for l1,l2 in zip(fc_model.layers, fc_layers):
weights = l2.get_weights()
l1.set_weights(weights)
fc_model.compile(optimizer=Adam(1e-5), loss='categorical_crossentropy',
metrics=['accuracy'])
fc_model.fit(trn_features, trn_labels, nb_epoch=2,
batch_size=batch_size, validation_data=(val_features, val_labels))
gen = image.ImageDataGenerator(rotation_range=10, width_shift_range=0.05,
width_zoom_range=0.05, zoom_range=0.05,
channel_shift_range=10, height_shift_range=0.05, shear_range=0.05, horizontal_flip=True)
batches = gen.flow(trn, trn_labels, batch_size=batch_size)
val_batches = image.ImageDataGenerator().flow(val, val_labels,
shuffle=False, batch_size=batch_size)
for layer in conv_model.layers: layer.trainable = False
for layer in get_fc_layers(0.5, conv_shape): conv_model.add(layer)
for l1,l2 in zip(conv_model.layers[last_conv_idx+1:], fc_model.layers):
l1.set_weights(l2.get_weights())
conv_model.compile(optimizer=Adam(1e-5), loss='categorical_crossentropy',
metrics=['accuracy'])
conv_model.save_weights(model_path+'no_dropout_bn' + i + '.h5')
conv_model.fit_generator(batches, samples_per_epoch=batches.N, nb_epoch=1,
validation_data=val_batches, nb_val_samples=val_batches.N)
for layer in conv_model.layers[16:]: layer.trainable = True
conv_model.fit_generator(batches, samples_per_epoch=batches.N, nb_epoch=8,
validation_data=val_batches, nb_val_samples=val_batches.N)
conv_model.optimizer.lr = 1e-7
conv_model.fit_generator(batches, samples_per_epoch=batches.N, nb_epoch=10,
validation_data=val_batches, nb_val_samples=val_batches.N)
conv_model.save_weights(model_path + 'aug' + i + '.h5')
# + [markdown] heading_collapsed=true
# ## Build ensemble
# + hidden=true
for i in range(5):
i = str(i)
model = train_last_layer(i)
train_dense_layers(i, model)
# -
# ## Combine ensemble and test
ens_model = vgg_ft(2)
for layer in ens_model.layers: layer.trainable=True
def get_ens_pred(arr, fname):
ens_pred = []
for i in range(5):
i = str(i)
ens_model.load_weights('{}{}{}.h5'.format(model_path, fname, i))
preds = ens_model.predict(arr, batch_size=batch_size)
ens_pred.append(preds)
return ens_pred
val_pred2 = get_ens_pred(val, 'aug')
val_avg_preds2 = np.stack(val_pred2).mean(axis=0)
categorical_accuracy(val_labels, val_avg_preds2).eval()
| dogscats-ensemble.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3.8.5 32-bit
# metadata:
# interpreter:
# hash: 177429bd1865e7f7a0dbecbac90518c0d9641b1102b2e6c0df4b82dc948b5cb2
# name: python3
# ---
# # Speech
#
# Increasingly, we expect to be able to communicate with artificial intelligence (AI) systems by talking to them, often with the expectation of a spoken response.
#
# 
#
# *Speech recognition* (an AI system interpreting spoken language) and *speech synthesis* (an AI system generating a spoken response) are the key components of a speech-enabled AI solution.
#
# ## Create a Cognitive Services resource
#
# To build software that can interpret audible speech and respond verbally, you can use the **Speech** cognitive service, which provides a simple way to transcribe spoken language into text and vice-versa.
#
# If you don't already have one, use the following steps to create a **Cognitive Services** resource in your Azure subscription:
#
# > **Note**: If you already have a Cognitive Services resource, just open its **Quick start** page in the Azure portal and copy its key and endpoint to the cell below. Otherwise, follow the steps below to create one.
#
# 1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.
# 2. Click the **+Create a resource** button, search for *Cognitive Services*, and create a **Cognitive Services** resource with the following settings:
# - **Subscription**: *Your Azure subscription*.
# - **Resource group**: *Select existing resource group with name AI900-deploymentID*.
# - **Region**: *Choose any available region*:
# - **Name**: *speech-deploymentID*.
# - **Pricing tier**: S0
# - **I confirm I have read and understood the notices**: Selected.
# 3. Wait for deployment to complete. Then go to your cognitive services resource, and on the **Overview** page, click the link to manage the keys for the service. You will need the key and location to connect to your cognitive services resource from client applications.
#
# ### Get the Key and Location for your Cognitive Services resource
#
# To use your cognitive services resource, client applications need its authentication key and location:
#
# 1. In the Azure portal, on the **Keys and Endpoint** page for your cognitive service resource, copy the **Key1** for your resource and paste it in the code below, replacing **YOUR_COG_KEY**.
# 2. Copy the **Location** for your resource and and paste it in the code below, replacing **YOUR_COG_LOCATION**.
# >**Note**: Stay on the **Keys and Endpoint** page and copy the **Location** from this page (example: _westus_). Please _do not_ add spaces between words for the Location field.
# 3. Run the code below by clicking the **Run cell** (▷) button to the left of the cell.
# + gather={"logged": 1599695240794}
cog_key = 'YOUR_COG_KEY'
cog_location = 'YOUR_COG_LOCATION'
print('Ready to use cognitive services in {} using key {}'.format(cog_location, cog_key))
# -
# ## Speech recognition
#
# Suppose you want to build a home automation system that accepts spoken instructions, such as "turn the light on" or "turn the light off". Your application needs to be able to take the audio-based input (your spoken instruction), and interpret it by transcribing it to text that it can then parse and analyze.
#
# Now you're ready to transcribe some speech. The input can be from a **microphone** or an **audio file**.
#
# ### Speech Recognition with an audio file
#
# Run the cell below to see the Speech Recognition service in action with an **audio file**.
#
# +
import os
from playsound import playsound
from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, AudioConfig
# Get spoken command from audio file
file_name = 'light-on.wav'
audio_file = os.path.join('data', 'speech', file_name)
# Configure speech recognizer
speech_config = SpeechConfig(cog_key, cog_location)
audio_config = AudioConfig(filename=audio_file) # Use file instead of default (microphone)
speech_recognizer = SpeechRecognizer(speech_config, audio_config)
# Use a one-time, synchronous call to transcribe the speech
speech = speech_recognizer.recognize_once()
# Show transcribed text from audio file
print(speech.text)
# -
# ## Speech synthesis
#
# So now you've seen how the Speech service can be used to transcribe speech into text; but what about the opposite? How can you convert text into speech?
#
# Well, let's assume your home automation system has interpreted a command to turn the light on. An appropriate response might be to acknowledge the command verbally (as well as actually performing the commanded task!)
# + gather={"logged": 1599695261170}
import os
import matplotlib.pyplot as plt
from PIL import Image
from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, AudioConfig
# %matplotlib inline
# Get text to be spoken
response_text = 'Turning the light on.'
# Configure speech synthesis
speech_config = SpeechConfig(cog_key, cog_location)
speech_synthesizer = SpeechSynthesizer(speech_config)
# Transcribe text into speech
result = speech_synthesizer.speak_text(response_text)
# Display an appropriate image
file_name = response_text.lower() + "jpg"
img = Image.open(os.path.join("data", "speech", file_name))
plt.axis('off')
plt. imshow(img)
# -
# Try changing the **response_text** variable to *Turning the light off.* (including the period at the end) and run the cell again to hear the result.
#
# ## Learn more
#
# You've seen a very simple example of using the Speech cognitive service in this notebook. You can learn more about [speech-to-text](https://docs.microsoft.com/azure/cognitive-services/speech-service/index-speech-to-text) and [text-to-speech](https://docs.microsoft.com/azure/cognitive-services/speech-service/index-text-to-speech) in the Speech service documentation.
| 08 - Speech.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Convolutional Neural Networks
# This notebook will guide you through the use of the `keras` package to train convolutional neural networks for handwritten digits classification. You are going to use the `mnist` dataset from LeCun et al. 1998.
# ## Loading the packages
# +
import numpy as np
from matplotlib import pyplot as pl
from keras.datasets import mnist
from keras.models import Model
from keras.layers.core import Dense, Dropout, Flatten
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import Input
from sklearn import metrics as me
from scipy import stats
# %matplotlib inline
# -
# First, create some useful functions
# +
def build_grid_of_images(array):
assert len(array.shape) == 3
dim_0 = np.sqrt(array.shape[0])
assert dim_0.is_integer()
temp_out = np.reshape(array, (dim_0, dim_0, array.shape[1], array.shape[2]))
temp_out = np.rollaxis(temp_out, 1, 3)
return np.reshape(temp_out, (dim_0*array.shape[1], dim_0*array.shape[2]))
#a = np.array(np.arange(36))
#print a
#b = np.reshape(a, (4,3,3))
#print b[0,:,:]
#print b[1,:,:]
#c = build_grid_of_images(b)
#print c
def plot_conv_layer_output(temp_out, title):
temp_to_plot = build_grid_of_images(temp_out)
pl.imshow(temp_to_plot, interpolation='nearest', cmap=pl.get_cmap('Greys'))
ax = pl.gca()
ax.set_xticks(np.arange(-0.5, temp_to_plot.shape[0]+0.5, temp_out.shape[1]))
ax.set_yticks(np.arange(-0.5, temp_to_plot.shape[0]+0.5, temp_out.shape[2]))
pl.grid()
pl.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
pl.title(title)
def plot_dense_layer_output(temp_out, title):
pl.bar(np.arange(temp_out.shape[1])-0.4, temp_out[0,:])
pl.xlim(-0.5, temp_out.shape[1])
pl.grid()
pl.title(title)
# -
# Load the `mnist` dataset and normalize in the range [0, 1]
# +
(X_train, y_train), (X_test, y_test) = mnist.load_data()
n_train, height, width = X_train.shape
n_test, _, _ = X_test.shape
X_train = X_train.reshape(n_train, height, width, 1).astype('float32')
X_test = X_test.reshape(n_test, height, width, 1).astype('float32')
X_train /= 255.0
X_test /= 255.0
n_classes = 10
print n_train, 'train samples'
print n_test, 'test samples'
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, n_classes)
Y_test = np_utils.to_categorical(y_test, n_classes)
# -
# Create the CNN and show its architecture
# +
l0 = Input(shape=(height, width, 1), name='l0')
l1 = Convolution2D(9, 5, 5, border_mode='same', activation='relu', name='l1')(l0)
l1_mp = MaxPooling2D(pool_size=(2, 2), name='l1_mp')(l1)
l2 = Convolution2D(9, 5, 5, border_mode='same', activation='relu', name='l2')(l1_mp)
l2_mp = MaxPooling2D(pool_size=(2, 2), name='l2_mp')(l2)
l3 = Convolution2D(16, 3, 3, border_mode='same', activation='relu', name='l3')(l2_mp)
l3_mp = MaxPooling2D(pool_size=(2, 2), name='l3_mp')(l3)
flat = Flatten(name='flat')(l3_mp)
l4 = Dense(25, activation='relu', name='l4')(flat)
l5 = Dense(n_classes, activation='softmax', name='l5')(l4)
model = Model(input=l0, output=l5)
model.summary()
# -
# Define some constants and train de CNN
# +
batch_size = 128
n_epoch = 10
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=n_epoch, verbose=1, validation_data=(X_test, Y_test))
# -
# Show the performance of the model
# +
pl.plot(history.history['loss'], label='Training')
pl.plot(history.history['val_loss'], label='Testing')
pl.legend()
pl.grid()
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# -
pred = model.predict_on_batch(X_test)
print pred
# Confusion matrix
pred = model.predict_on_batch(X_test)
pred = np.argmax(pred, axis=-1)
me.confusion_matrix(y_test, pred)
| Other/2.FromMLP_to_CNN/cnn/CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_anaconda3)
# language: python
# name: conda_anaconda3
# ---
# +
## Imports
## NOTE: GOING TO HARDCODE THE PATH FOR THE TIME BEING, MUST BE CHANGED AS NEEDED
import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pandas as pd
import sys
import inspect
import datetime
from scipy.stats import percentileofscore
# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# parentdir = os.path.dirname(currentdir)
parentdir = "/home/ubuntu/new_uploader"
sys.path.append(parentdir)
sys.path.append(parentdir + '/modeling')
import load_data
from fit_and_predict import add_preds
from functions import merge_data
from viz import viz_interactive
import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pandas as pd
import sys
import matplotlib as plt
from scipy.stats import percentileofscore
from sklearn.metrics import mean_absolute_error
import statsmodels.api as sm
# CHANGE THIS
from exponential_modeling import *
from fit_and_predict import *
# -
## Toy Data:
df_hos = pd.read_csv(parentdir + "/data/CA_hospitalizations.csv")
df_hos = df_hos[["County Name", "Most Recent Date", "COVID-19 Positive Patients"]]
df_hos = df_hos.rename(columns={"County Name": "CountyName", "Most Recent Date": "Date",
"COVID-19 Positive Patients": "hospitalizations"})
df_hos = df_hos[df_hos["CountyName"] == "Los Angeles"]
df_hos = df_hos[["Date", "hospitalizations"]]
df_hos.head()
# +
# Function to Train CLEP on your Hospital Data
# Default: 7 Days Prediction
exponential = {'model_type':'exponential'}
shared_exponential = {'model_type':'shared_exponential'}
linear = {'model_type':'linear'}
advanced_model = {'model_type':'advanced_shared_model'}
def predict(df, k=7):
df_h = pd.DataFrame({"Hospital": ["Hospital_Name"]})
hospitz = df["hospitalizations"].values
df_h = pd.DataFrame({"Hospital": ["Hospital_Name"], "hospitalizations": [hospitz]})
# d = {'Name':['UCSF'],'hospitalizations':[tmp]}
# df_shared_UCSF = pd.DataFrame(data = d)
ensemble_prediction = fit_and_predict_ensemble(df_h,target_day = np.array([k]),
outcome = 'hospitalizations',
methods = [shared_exponential,linear],
mode = 'predict_future',
verbose = False)['predicted_hospitalizations_ensemble_' + str(k)].values
ensemble_prediction = [max(x[0], 0) for x in ensemble_prediction]
return ensemble_prediction[0]
# -
predict(df_hos, k=10)
| Model-Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 1: Hello World
# In this lesson, we will start with the very basics of Python using this Jupyter notebook. We can use Python in a variety of methods (at the terminal, using an [IDE](https://en.wikipedia.org/wiki/Integrated_development_environment) etc. However the notebook is a great way to play with Python with the support of lots of helpful notes. Let's start off getting to know Python by writing the classic first programmer's task, 'Hello world'
print("Hello world")
# To get the output of the above program, select the cell and then click the play button to run the command using Python
# ## Exercise 1: Playing around with printing
# As you may notice in the code block above a Python keyword 'print' is highlighted in a color (green). We call 'print' a key word because this word has special meaning to Python. Python is considered one of the best languages for new coders to learn because compared to other programming languages it is much more intuitive. In the [C language](https://en.wikipedia.org/wiki/C_%28programming_language%29) for example, printing 'Hello World' would look something like this:
#
# \#include< stdio.h >
#
# main()
# {
# printf("Hello World");
#
#
# }
#
# Although learning Python (and learning it well) will take effort, you can see why its considered simpler for a new learner. However Python *does* have rules you will have to follow. In the following exercises, let's see what some of these rules are.
# ### In the cell below, write the same program we used above to get the 'Hello World' statement
# **Do not** copy and paste! Once you have written the line of code, don't forget to hit 'run'
print("hello world")
# Did you get 'Hello World' - is it capitalized correctly? Is it exactly like the output of the example program?
# ### Below are a few questions and tasks using the 'Hello World' example. Try them out!
# * Does capitalization matter in Python?
# * Do single quotes ' or double quotes " matter in Python?
# * Does spacing matter in Python
#
# Play around with answering these questions (hopefully until you break something)
#
# **Tip**: You can reuse any of the empty code blocks. Just rewrite any code and hit play again to rerun the cell.
# ### Capitalization
# ### Quotes
# ### Spacing
# ## Exercise 2: Doing math with Python
#
# Another useful feature of Python is doing math:
2 + 2
# You can use any of several math operators:
#
# |Syntax|Math|
# |------|----|
# |( )|Parenthesis - think PEMDAS|
# |a\*\*b|exponentiation|
# |a\*b|multiplication|
# |a/b|division|
# |a+b|addition|
# |a-b|subtraction|
#
# There are a few other special math functions - see [Python basic math](https://en.wikibooks.org/wiki/Python_Programming/Basic_Math)
#
# Try some of the math functions below:
# Do some subtraction
# Use some parenthesis
# Try some division
# ## Lesson 2: Variables
# What makes Python a language is how it uses syntax to hold or express meaning. As mentioned before, Python let's you display messages to use user simply by using the word ``print`` followed by a message in ``"`` marks. Using the ``=`` sign - which is called the ``assignment operator`` we can ask Python to store values as variables:
a = 3
b = 4
c = 5
a_squared_plus_b_squared = a**2 + b**2
c_squared = c**2
print(a_squared_plus_b_squared)
print(c_squared)
# **Challenge:** Solve this equation showing your work using Python:
# How much 5M NaCl do you need to make 1L of 2.5M NaCl?<br>
# *hint* Concentration<sub>1</sub> * Volume<sub>1</sub> = Concentration<sub>2</sub> * Volume<sub>2</sub><br>
# *hint 2* solve for volume 1
# Besides math, variables could also be statements. When we put statements in characters, we refer to them as ``strings``, which can be sequences of letters, numbers, symbols, etc.
my_string = 'Hello World'
print(my_string)
# Create the variables and strings in the next codeblock to complete a mini autobiography:
# +
#Populate your variables before the print statements
print('My name is: ',my_name)
print('My favorite food is: ',my_fav_food)
print('The hobby I spend most of my time is: ', my_hobby)
print('If I wasn\'t here, I\'d probably be: ',my_somewhere_else)
print("If I wasn't using single quotes, I'd be using double quotes")
#tip, in the last print statement notice that we want to use the ' mark. We use the
# 'escape' character \ to tell Python to ignore that quote mark.
# -
# With math and strings, you can start to do lots of things in Python. More on this in the next lesson.
my_string = "'ello world "
print(my_string * 10)
print(my_string * 100)
# Want to try something else, keep going!
| notebooks/biocoding_2016_intro_python_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Configuration
# _Initial steps to get the notebook ready to play nice with our repository. Do not delete this section._
# Code formatting with [black](https://pypi.org/project/nb-black/).
# %load_ext lab_black
import os
import glob
import time
import requests
import pandas as pd
from datetime import datetime
from dateutil import tz
from bs4 import BeautifulSoup
data_dir = os.path.join(os.path.abspath(""), "data")
# ## County Names
countyNames = [
"california",
"losangeles",
"orange",
"ventura",
"sanbernardino",
"riverside",
"sandiego",
"imperial",
"inyo",
"santabarbara",
"tulare",
"kings",
"kern",
"fresno",
"sanluisobispo",
"monterey",
"mono",
"madera",
"merced",
"mariposa",
"sanmateo",
"santaclara",
"sanfrancisco",
"sacramento",
"alameda",
"napa",
"contracosta",
"solano",
"marin",
"sonoma",
"santacruz",
"sanbenito",
"sanjoaquin",
"calaveras",
"tuolumne",
"stanislaus",
"mendocino",
"lake",
"humboldt",
"trinity",
"delnorte",
"siskiyou",
"amador",
"placer",
"yolo",
"eldorado",
"alpine",
"sutter",
"yuba",
"nevada",
"sierra",
"colusa",
"glenn",
"butte",
"plumas",
"shasta",
"modoc",
"lassen",
"tehama",
]
# ## Function to loop through each county to get the data
# +
data = []
tempData = {}
def getData(i, verbose=False):
url = f"https://data.covid19.ca.gov/data/reviewed/equitydash/cumulative-{i}.json"
# https://data.covid19.ca.gov/data/reviewed/equitydash/healthequity-orange.json
if verbose:
print(url)
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
}
request = requests.get(url, headers=headers)
tempData = request.json()
for item in tempData:
data.append(item)
# -
for name in countyNames:
getData(name)
time.sleep(0.3)
# ## Get into DataFrame
df = pd.DataFrame.from_dict(data).fillna(0)
try:
assert df.shape[0] == 1593
except:
console.log("Irregular number of rows")
# ## Get Last Modified Date
# +
url = f"https://data.covid19.ca.gov/data/reviewed/equitydash/cumulative-california.json"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
}
request = requests.get(url, headers=headers)
date_raw = request.headers.get("Last-Modified")
objDate = datetime.strptime(date_raw, "%a, %d %b %Y %H:%M:%S GMT")
# -
# ## Convert from UTC to PST
from_zone = tz.gettz("GMT")
to_zone = tz.gettz("US/Pacific")
date_raw = objDate.replace(tzinfo=from_zone)
date = date_raw.astimezone(to_zone)
# ## Format Date
date = date.strftime("%Y-%m-%d")
# ## Write to CSV
df.to_csv(os.path.join(data_dir, f"cdph-equity-{date}.csv"), index=False)
df_list = []
for p in [
i for i in glob.glob(data_dir + "/*.csv") if not str(i).endswith("timeseries.csv")
]:
s = p[-14:-4]
d = pd.to_datetime(s)
df = pd.read_csv(p)
df["date"] = d
df_list.append(df)
big_df = pd.concat(df_list)
big_df.to_csv(os.path.join(data_dir, "timeseries.csv"), index=False)
| demographics-race-by-county/scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/datascience-1.0
# ---
# # Fairness and Explainability with SageMaker Clarify - Bring Your Own Container
# 1. [Overview](#Overview)
# 1. [Prerequisites and Data](#Prerequisites-and-Data)
# 1. [Initialize SageMaker](#Initialize-SageMaker)
# 1. [Download data](#Download-data)
# 1. [Loading the data: Adult Dataset](#Loading-the-data:-Adult-Dataset)
# 1. [Data inspection](#Data-inspection)
# 1. [Encode and Upload the Dataset](#Encode-and-Upload-the-Dataset)
# 1. [Samples for Inference](#Samples-for-Inference)
# 1. [Build Container](#Build-Container)
# 1. [Container Source Code](#Container-Source-Code)
# 1. [The Dockerfile](#The-Dockerfile)
# 1. [The train Script](#The-train-Script)
# 1. [The serve Script](#The-serve-Script)
# 1. [Local Debugging](#Local-Debugging)
# 1. [Build and Push](#Build-and-Push)
# 1. [Train Model](#Train-Model)
# 1. [Train](#Train)
# 1. [Deploy](#Deploy)
# 1. [Verification](#Verification)
# 1. [Amazon SageMaker Clarify](Amazon-SageMaker-Clarify)
# 1. [Detecting Bias](#Detecting-Bias)
# 1. [Writing DataConfig](#Writing-DataConfig)
# 1. [Writing ModelConfig](#Writing-ModelConfig)
# 1. [Writing BiasConfig](#Writing-BiasConfig)
# 1. [Writing ModelPredictedLabelConfig](#Writing-ModelPredictedLabelConfig)
# 1. [Pre-training Bias](#Pre-training-Bias)
# 1. [Post-training Bias](#Post-training-Bias)
# 1. [Viewing the Bias Report](#Viewing-the-Bias-Report)
# 1. [Explaining Predictions](#Explaining-Predictions)
# 1. [Viewing the Explainability Report](#Viewing-the-Explainability-Report)
# 1. [Clean Up](#Clean-Up)
# ## Overview
#
# Amazon SageMaker Clarify helps improve your machine learning models by detecting potential bias and helping explain how these models make predictions. The fairness and explainability functionality provided by SageMaker Clarify takes a step towards enabling AWS customers to build trustworthy and understandable machine learning models. The product comes with the tools to help you with the following tasks.
#
# * Measure biases that can occur during each stage of the ML lifecycle (data collection, model training and tuning, and monitoring of ML models deployed for inference).
# * Generate model governance reports targeting risk and compliance teams and external regulators.
# * Provide explanations of the data, models, and monitoring used to assess predictions.
#
# In order to compute post-training bias metrics and explainability, SageMaker Clarify needs to get inferences from the SageMaker model provided by the `model_name` parameter of Clarify [analysis configuration](https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-configure-processing-jobs.html#clarify-processing-job-configure-analysis) (or the same parameter of the [ModelConfig](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html?highlight=Processor#sagemaker.clarify.ModelConfig) if you use [SageMakerClarifyProcessor](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.clarify.SageMakerClarifyProcessor) API). To accomplish this, the Clarify job creates an ephemeral endpoint with the model, known as a shadow endpoint. The model and the Clarify job should follow certain contracts so that they can work together smoothly.
#
# This sample notebook introduces key terms and concepts needed to understand SageMaker Clarify, and it walks you through an end-to-end data science workflow demonstrating how to **build your own model and container that can work seamlessly with your Clarify jobs**, use the model and SageMaker Clarify to measure bias, explain the importance of the various input features on the model's decision and then access the reports through SageMaker Studio if you have an instance set up.
# ## Prerequisites and Data
# ### Initialize SageMaker
# +
import numpy as np
import pandas as pd
import json
import os
import sagemaker
import urllib
session = sagemaker.Session()
bucket = session.default_bucket()
prefix = "sagemaker/DEMO-sagemaker-clarify-byoc"
role = sagemaker.get_execution_role()
account_id = role.split(":")[4]
region = session.boto_region_name
if region.startswith("cn-"):
uri_suffix = "amazonaws.com.cn"
arn_partition = "aws-cn"
else:
uri_suffix = "amazonaws.com"
arn_partition = "aws"
# -
# ### Download data
# Data Source: [https://archive.ics.uci.edu/ml/machine-learning-databases/adult/](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/)
#
# Let's __download__ the data and save it in the local folder with the name adult.data and adult.test from UCI repository$^{[2]}$.
#
# $^{[2]}$<NAME>, and <NAME>. "[UCI Machine Learning Repository](http://archive.ics.uci.edu/ml)". Irvine, CA: University of California, School of Information and Computer Science (2017).
# +
adult_columns = [
"Age",
"Workclass",
"fnlwgt",
"Education",
"Education-Num",
"Marital Status",
"Occupation",
"Relationship",
"Ethnic group",
"Sex",
"Capital Gain",
"Capital Loss",
"Hours per week",
"Country",
"Target",
]
if not os.path.isfile("adult.data"):
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", "adult.data"
)
print("adult.data saved!")
else:
print("adult.data already on disk.")
if not os.path.isfile("adult.test"):
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", "adult.test"
)
print("adult.test saved!")
else:
print("adult.test already on disk.")
# -
# ### Loading the data: Adult Dataset
# From the UCI repository of machine learning datasets, this database contains 14 features concerning demographic characteristics of 45,222 rows (32,561 for training and 12,661 for testing). The task is to predict whether a person has a yearly income that is more or less than $50,000.
#
# Here are the features and their possible values:
# 1. **Age**: continuous.
# 1. **Workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
# 1. **Fnlwgt**: continuous (the number of people the census takers believe that observation represents).
# 1. **Education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# 1. **Education-num**: continuous.
# 1. **Marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
# 1. **Occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
# 1. **Relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# 1. **Ethnic group**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
# 1. **Sex**: Female, Male.
# * **Note**: this data is extracted from the 1994 Census and enforces a binary option on Sex
# 1. **Capital-gain**: continuous.
# 1. **Capital-loss**: continuous.
# 1. **Hours-per-week**: continuous.
# 1. **Native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
#
# Next, we specify our binary prediction task:
# 15. **Target**: <=50,000, >$50,000.
# +
training_data = pd.read_csv(
"adult.data", names=adult_columns, sep=r"\s*,\s*", engine="python", na_values="?"
).dropna()
testing_data = pd.read_csv(
"adult.test", names=adult_columns, sep=r"\s*,\s*", engine="python", na_values="?", skiprows=1
).dropna()
training_data.head()
# -
# ### Data inspection
# Plotting histograms for the distribution of the different features is a good way to visualize the data. Let's plot a few of the features that can be considered _sensitive_.
# Let's take a look specifically at the Sex feature of a census respondent. In the first plot we see that there are fewer Female respondents as a whole but especially in the positive outcomes, where they form ~$\frac{1}{7}$th of respondents.
training_data["Sex"].value_counts().sort_values().plot(kind="bar", title="Counts of Sex", rot=0)
training_data["Sex"].where(training_data["Target"] == ">50K").value_counts().sort_values().plot(
kind="bar", title="Counts of Sex earning >$50K", rot=0
)
# ### Encode and Upload the Dataset
# Here we encode the training and test data. Encoding input data is not necessary for SageMaker Clarify, but is necessary for the model.
# +
from sklearn import preprocessing
def number_encode_features(df):
result = df.copy()
encoders = {}
for column in result.columns:
if result.dtypes[column] == np.object:
encoders[column] = preprocessing.LabelEncoder()
# print('Column:', column, result[column])
result[column] = encoders[column].fit_transform(result[column].fillna("None"))
return result, encoders
training_data = pd.concat([training_data["Target"], training_data.drop(["Target"], axis=1)], axis=1)
training_data, _ = number_encode_features(training_data)
training_data.to_csv("train_data.csv", index=False, header=False)
testing_data, _ = number_encode_features(testing_data)
test_features = testing_data.drop(["Target"], axis=1)
test_target = testing_data["Target"]
test_features.to_csv("test_features.csv", index=False, header=False)
# -
# A quick note about our encoding: the "Female" Sex value has been encoded as 0 and "Male" as 1.
training_data.head()
# Lastly, let's upload the data to S3
# +
from sagemaker.s3 import S3Uploader
from sagemaker.inputs import TrainingInput
train_uri = S3Uploader.upload("train_data.csv", "s3://{}/{}".format(bucket, prefix))
train_input = TrainingInput(train_uri, content_type="csv")
test_uri = S3Uploader.upload("test_features.csv", "s3://{}/{}".format(bucket, prefix))
# -
# ### Samples for Inference
#
# Pick up some samples from the test dataset, later they will be used to test the real-time inference.
# +
sample = test_features.loc[0, :].values.tolist()
samples = test_features.loc[0:5, :].values.tolist()
def convert_to_csv_payload(samples):
return "\n".join([",".join([str(feature) for feature in sample]) for sample in samples])
def convert_to_jsonlines_payload(samples):
return "\n".join(
[json.dumps({"features": sample}, separators=(",", ":")) for sample in samples]
)
command_parameters = [
["text/csv", convert_to_csv_payload([sample])],
["text/csv", convert_to_csv_payload(samples)], # for batch request
["application/jsonlines", convert_to_jsonlines_payload([sample])],
["application/jsonlines", convert_to_jsonlines_payload(samples)], # for batch request
]
# -
# ## Build Container
#
# This section introduces how to build your custom container. For simplicity, a single container is built to serve two purposes: it can be used by SageMaker Training job for training your custom model, as well as being deployed by SageMaker Hosting service for real-time inference.
# ### Container Source Code
#
# There are three source files in the container subfolder.
# #### The Dockerfile
#
# The Dockerfile describes the image that you want to build. You can think of it as describing the complete operating system installation of the system that you want to run. A Docker container running is quite a bit lighter than a full operating system, however, because it takes advantage of Linux on the host machine for the basic operations.
#
# The following Dockerfile starts from a [miniconda3 image](https://hub.docker.com/r/continuumio/miniconda3) and runs the normal tools to install `scikit-learn` and `pandas` for data science operations, and install `flask` for building a simple web application to serve real-time inference. Then it adds the code that implements the training algorithm and the real-time inference logic, and informs Docker that the container listens on the specified network ports at runtime.
# !echo
# !cat container/Dockerfile | sed 's/^/ /'
# #### The train Script
#
# The `train` script implements the training algorithm. It is packaged to docker image which will be pushed to ECR (Elastic Container Registry) under your account. When triggering a SageMaker training job, your requested SageMaker instance will pull that image from your ECR and execute it with the data you specified in an S3 URI.
#
# It is important to know how SageMaker runs your image. For training job, SageMaker runs your image like
#
# docker run <image> train
#
# This is why your image needs to have the executable `train` to start the model training process. See [Use Your Own Training Algorithms
# ](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html) for more explanations on how Amazon SageMaker interacts with a Docker container that runs your custom training algorithm.
#
# The following script does the follow steps in sequence,
#
# * Parses command line parameters. In training job environment, SageMaker downloads data files and save them to local directory `/opt/ml/input`. For example, if the training dataset channel specified to the fit() method on client side is `train`, then the training dataset will be saved to folder `/opt/ml/input/train`. The model output directory is always `/opt/ml/model`.
# * Load training dataset. Here assume that the data files are in CSV format, and the first column is the label column.
# * Train a [sklearn.linear_model.LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) estimator.
# * Dump the estimator's model to a model file.
#
# The script is built from scratch for demonstration purpose, so it has to take care of many details. For example, if you want to get [hyperparameters specified on client side](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html#sagemaker.estimator.Estimator.set_hyperparameters), then the script should be updated to read them from `/opt/ml/input/config/hyperparameters.json`. One option to get rid of the details and focus on algorithms is integrating [SageMaker Training Toolkit](https://github.com/aws/sagemaker-training-toolkit) to your image, the toolkit gives you tools to create SageMaker-compatible Docker containers, and has additional tools for letting you create Frameworks (SageMaker-compatible Docker containers that can run arbitrary Python or shell scripts).
# !echo
# !cat container/train | sed 's/^/ /'
# #### The serve Script
#
# The `serve` script implements the real-time inference logic. When SageMaker deploys your image to a real-time inference instance, it runs your image as,
#
# docker run <image> serve
#
# The script is supposed to set up a web server that responds to `/invocations` and `/ping` on port 8080. See [Use Your Own Inference Code with Hosting Services](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html) for more explanations on how Amazon SageMaker interacts with a Docker container that runs your own inference code for hosting services.
#
# The following script uses [flask](https://github.com/pallets/flask) to implement a simple web server,
#
# * At container startup, the script initializes an estimator using the model file provided by the client side deploy() method. The model directory and model file name are the same as in the `train` script.
# * Once started, the server is ready to serve inference requests. The logic resides in the `predict` method,
# * Input validation. The example container supports the same MIME types as Clarify job does, i.e., `text/csv` and `application/jsonlines`.
# * Parse payload. Clarify job may send **batch requests** to the container for better efficiency, i.e., the payload can have multiple lines and each is a sample. So, the method decodes request payload and then split lines, then loads the lines according to the content type. For JSONLines content, the method uses a key "features" to extract the list of features from a JSON line. The key shall be the same as the one defined in your Clarify job analysis configuration `predictor.content_template`. It is a **contract** between the Clarify job and the container, here you can change it to something else, like "attributes", but remember to update the `predictor.content_template` configuration accordingly.
# * Do prediction. The method gets the probability scores instead of binary labels, because scores are better for feature explainability.
# * Format output. For a **batch request**, Clarify job expects the same number of result lines as the number of samples in the request. So, the method encodes each prediction and then join them by line-break. For JSONLines accept type, the method uses two keys "predicted_label" and "score" to indicate the prediction. The keys shall be the same as your Clarify job analysis configuration `predictor.label` and `predictor.probability`, and they are used by the Clarify job to extract predictions from container response payload. The keys are **contracts** between the Clarify job and the container, here you can change them to something else, but remember to update the analysis configuration accordingly.
#
# Similarly, the script is built from scratch for demonstration purpose. In a real project, you can utilize [SageMaker Inference Toolkit](https://github.com/aws/sagemaker-inference-toolkit) which implements a model serving stack built on [Multi Model Server](https://github.com/awslabs/multi-model-server), and it can serve your own models or those you trained on SageMaker using Machine Learning frameworks with native SageMaker support.
# !echo
# !cat container/serve | sed 's/^/ /'
# ### Local Debugging
#
# This section has some tips for debugging the container code locally. Considering that image build, push and deployment take time to complete, it is important to first test the container code thoroughly locally to save time. (Although you can safely skip it in this exercise because the container code is already functional.)
#
# As an example, you can download the container folder and dataset files to your local machine, setup Python development environment and install necessary dependencies (found in the Dockerfile), then import the code to your favorite IDE for editing/debugging.
# The `train` script can be executed as,
#
# ```
# python train --train_dir <dataset folder> --model_dir <model folder>
# ```
#
# Upon successful execution, the script should generate a model file `model.joblib` to the model folder.
# And then the `serve` script can be executed as,
#
# ```
# python serve --model_dir <model folder>
# ```
#
# Upon successful execution, the script should be listening on local host port `8080` for inference requests. The following cell generates a few CURL commands to send inference requests (both CSV and JSONLines) to the port. You can copy&paste them to your local terminal for execution, to hit the port and trigger the inference code. For a single sample request, the command should output only one result, and for a batch request, the command should output the same number of results (lines) as the number of samples in the request.
print("\n")
for mime_type, payload in command_parameters:
command = f" curl -X POST -H 'Content-Type: {mime_type}' -H 'Accept: {mime_type}' -d ${repr(payload)} http://0.0.0.0:8080/invocations"
print(command)
print("\n")
# If you have Docker installed locally, you can build image like this (the -t option specifies image repository and tag),
#
# ```
# docker build container -t bring-your-own-container:latest
# ```
#
# Then run the image for training (the -v option maps a folder of your local machine to the docker container),
#
# ```
# docker run -v /Local/Machine/Folder:/BYOC bring-your-own-container:latest train --train_dir /BYOC/dataset --model_dir /BYOC/model
# ```
#
# And then run it for inferring (the -p option maps a local machine port to the docker container),
#
# ```
# docker run -v /Local/Machine/Folder:/BYOC -p 8080:8080 bring-your-own-container:latest serve --model_dir /BYOC/model
# ```
#
# The docker image can be pushed to ECR manually, see [Building your own algorithm container](https://github.com/aws/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb) for more details.
# ### Build and Push
#
# To avoid manual operations in your local development environment. This notebook will use [SageMaker Docker Build CLI](https://github.com/aws-samples/sagemaker-studio-image-build-cli) to automatically build and push the container to ECR for you. The tool uses ECR and AWS CodeBuild, so it requires that the role to execute the tool has the necessary policies and permissions attached. For simplicity, you can update the SageMaker Execution Role attached to this notebook with the required permissions.
role
# **Ensure that the role has the following permissions before you continue!**
# * Add or merge below policy to the Trust relationships of the role
#
# ```
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Effect": "Allow",
# "Principal": {
# "Service": [
# "codebuild.amazonaws.com"
# ]
# },
# "Action": "sts:AssumeRole"
# }
# ]
# }
# ```
# * Add an inline policy to the role (execute the cell below to view the policy)
# + jupyter={"source_hidden": true}
from string import Template
template = Template(
"""{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"codebuild:DeleteProject",
"codebuild:CreateProject",
"codebuild:BatchGetBuilds",
"codebuild:StartBuild"
],
"Resource": "arn:$partition:codebuild:*:*:project/sagemaker-studio*"
},
{
"Effect": "Allow",
"Action": "logs:CreateLogStream",
"Resource": "arn:$partition:logs:*:*:log-group:/aws/codebuild/sagemaker-studio*"
},
{
"Effect": "Allow",
"Action": [
"logs:GetLogEvents",
"logs:PutLogEvents"
],
"Resource": "arn:$partition:logs:*:*:log-group:/aws/codebuild/sagemaker-studio*:log-stream:*"
},
{
"Effect": "Allow",
"Action": "logs:CreateLogGroup",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ecr:CreateRepository",
"ecr:BatchGetImage",
"ecr:CompleteLayerUpload",
"ecr:DescribeImages",
"ecr:DescribeRepositories",
"ecr:UploadLayerPart",
"ecr:ListImages",
"ecr:InitiateLayerUpload",
"ecr:BatchCheckLayerAvailability",
"ecr:PutImage"
],
"Resource": "arn:$partition:ecr:*:*:repository/sagemaker-studio*"
},
{
"Effect": "Allow",
"Action": "ecr:GetAuthorizationToken",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:PutObject"
],
"Resource": "arn:$partition:s3:::sagemaker-*/*"
},
{
"Effect": "Allow",
"Action": [
"s3:CreateBucket"
],
"Resource": "arn:$partition:s3:::sagemaker*"
},
{
"Effect": "Allow",
"Action": [
"iam:GetRole",
"iam:ListRoles"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": "arn:$partition:iam::*:role/*",
"Condition": {
"StringLikeIfExists": {
"iam:PassedToService": "codebuild.amazonaws.com"
}
}
}
]
}"""
)
permissions_policy = template.substitute(partition=arn_partition)
print(permissions_policy)
# -
# Once the permissions are attached to the role, install the tool by,
# !pip install sagemaker-studio-image-build --quiet
# Now define the ECR repository and tag, note that **the repository name must have the prefix sagemaker-studio** which is covered by above permissions.
byoc_repository = "sagemaker-studio-byoc"
byoc_tag = "latest"
# Then the build and push can be done by a single command,
# !sm-docker build container --repository $byoc_repository:$byoc_tag --no-logs
# The command should have pushed the image to below URI,
byoc_image_uri = "{}.dkr.ecr.{}.{}/{}:{}".format(
account_id, region, uri_suffix, byoc_repository, byoc_tag
)
print(f"Image URI: {byoc_image_uri}")
# ## Train Model
# ### Train
#
# Now you have a docker image that includes the logic of your model training, and the training data are available to SageMaker on S3. It is high time to train the model.
estimator = sagemaker.estimator.Estimator(
image_uri=byoc_image_uri,
role=role,
instance_count=1,
instance_type="ml.m5.xlarge",
sagemaker_session=session,
)
estimator.fit({"train": train_input}, logs=False)
# The trained model should have been uploaded to S3 as,
print(f"Model file: {estimator.model_data}")
# ### Deploy
#
# The model file should be deployed as a SageMaker Model which can be used in Clarify post-training bias analysis and feature explanation. The following code creates the model, and then deploys it to an inference host/endpoint for verification.
endpoint_name = "DEMO-clarify-byoc-endpoint"
model_name = "DEMO-clarify-byoc-model"
predictor = estimator.deploy(
initial_instance_count=1,
instance_type="ml.m5.xlarge",
endpoint_name=endpoint_name,
model_name=model_name,
)
# ### Verification
#
# A verification is necessary to make sure that the custom model and container follow the contracts with your Clarify jobs. The [AWS CLI](https://aws.amazon.com/cli/) tool is recommended for the test, it is preinstalled in SageMaker Studio and can be used to invoke the endpoint directly with raw payload, avoid intermediate processing steps in wrapper APIs like the [SageMaker Python SDK Predictor class](https://sagemaker.readthedocs.io/en/stable/api/inference/predictors.html).
#
# The following code generates a few AWS CLI commands to send inference requests to the endpoint, and also executes them in the notebook to get the results. You can copy&paste the commands to a Studio Terminal (File > New > Terminal), or to your local terminal, for execution and double-check the results. You can see, for a single sample request, the command outputs only one result, and for a batch request, the command outputs the same number of results (lines) as the number of samples in the request.
#
# Some tips:
# * If you use AWS CLI v2, then an additional parameter `--cli-binary-format raw-in-base64-out` should be added to the command. See [cli_binary_format](https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-cli_binary_format.html#setting-cli_binary_format-alternatives) for the reason.
# * To send batch requests, add `$` before the payload (`--body`) string to unescape the line-break character ('\n').
# +
import subprocess
import re
aws_cli_version = subprocess.run(["aws", "--version"], capture_output=True, text=True).stdout
aws_cli_major_version = re.match("aws-cli/(\d+).+", aws_cli_version).group(1)
if aws_cli_major_version == "1":
cli_binary_format = ""
else:
# https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-cli_binary_format.html
cli_binary_format = "--cli-binary-format raw-in-base64-out"
# +
from string import Template
for mime_type, payload in command_parameters:
template = Template(
f"aws sagemaker-runtime invoke-endpoint --endpoint-name {endpoint_name} --content-type {mime_type} --accept {mime_type} --body $payload {cli_binary_format} /dev/stderr 1>/dev/null"
)
command = template.substitute(payload=f"${repr(payload)}")
print(command)
command = template.substitute(payload=f"'{payload}'")
output = subprocess.run(command, shell=True, capture_output=True, text=True).stderr
print(output)
print("\n")
# -
# Once the verification is done, you can delete endpoint, but keep the model for Clarify jobs.
predictor.delete_endpoint()
# ## Amazon SageMaker Clarify
#
# Now that you have your own model and container set up. Let's say hello to SageMaker Clarify!
# +
from sagemaker import clarify
clarify_processor = clarify.SageMakerClarifyProcessor(
role=role, instance_count=1, instance_type="ml.m5.xlarge", sagemaker_session=session
)
# -
# There are three scenarios where Clarify handles data types, and they all support both CSV (`text/csv`) and JSONLines (`application/jsonlines`).
#
# * dataset type: the MIME type of the dataset and SHAP baseline.
# * content type: the MIME type of the shadow endpoint request payload
# * accept type: the MIME type of the shadow endpoint response payload
#
# The Clarify jobs in this notebook always uses CSV for dataset type, but you can choose for the other two. The following code chose JSONLines for both, but it is fine if you change one of them or both of them to CSV, because CSV and JSONLines are supported by the customer container as well.
# +
content_type = "application/jsonlines" # could be 'text/csv'
accept_type = "application/jsonlines" # could be 'text/csv'
if content_type == "text/csv":
content_template = None
else: # 'application/jsonlines'
content_template = '{"features":$features}'
probability_threshold = 0.4
if accept_type == "text/csv":
probability = None
else: # 'application/jsonlines'
probability = "score"
# -
# ### Detecting Bias
#
# SageMaker Clarify helps you detect possible pre- and post-training biases using a variety of metrics.
#
# #### Writing DataConfig
#
# A [DataConfig](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.clarify.DataConfig) object communicates some basic information about data I/O to SageMaker Clarify. We specify where to find the input dataset, where to store the output, the target column (`label`), the header names, and the dataset type.
bias_report_output_path = "s3://{}/{}/clarify-bias".format(bucket, prefix)
bias_data_config = clarify.DataConfig(
s3_data_input_path=train_uri,
s3_output_path=bias_report_output_path,
label="Target",
headers=training_data.columns.to_list(),
dataset_type="text/csv",
)
# #### Writing ModelConfig
#
# A [ModelConfig](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.clarify.ModelConfig) object communicates information about your trained model. To avoid additional traffic to your production models, SageMaker Clarify sets up and tears down a dedicated endpoint when processing.
# * `instance_type` and `instance_count` specify your preferred instance type and instance count used to run your model on during SageMaker Clarify's processing. The testing dataset is small so a single standard instance is good enough to run this example. If you have a large complex dataset, you may want to use a better instance type to speed up, or add more instances to enable Spark parallelization.
# * `accept_type` denotes the endpoint response payload format, and `content_type` denotes the payload format of request to the endpoint.
# * `content_template` is used by SageMaker Clarify to compose the request payload if the content type is JSONLines. To be more specific, the placeholder `$features` will be replaced by the features list from samples. For example, the first sample of the test dataset is `25,2,226802,1,7,4,6,3,2,1,0,0,40,37`, so the corresponding request payload is `'{"features":[25,2,226802,1,7,4,6,3,2,1,0,0,40,37]}'`, which conforms to [SageMaker JSONLines dense format](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html#common-in-formats).
model_config = clarify.ModelConfig(
model_name=model_name,
instance_type="ml.m5.xlarge",
instance_count=1,
accept_type=accept_type,
content_type=content_type,
content_template=content_template,
)
# #### Writing ModelPredictedLabelConfig
#
# A [ModelPredictedLabelConfig](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.clarify.ModelPredictedLabelConfig) provides information on the format of your predictions.
# * `probability` is used by SageMaker Clarify to locate the probability score in endpoint response if the accept type is JSONLines. In this case, the response payload for a single sample request looks like `'{"predicted_label": 0, "score": 0.026494730307781475}'`, so SageMaker Clarify can find the score `0.026494730307781475` by JSONPath `'score'`.
# * `probability_threshold` is used by SageMaker Clarify to convert the probability to binary labels for bias analysis. Prediction above the threshold is interpreted as label value 1 and below or equal as label value 0.
predictions_config = clarify.ModelPredictedLabelConfig(
probability=probability, probability_threshold=probability_threshold
)
# #### Writing BiasConfig
# SageMaker Clarify also needs information on what the sensitive columns (`facets`) are, what the sensitive features (`facet_values_or_threshold`) may be, and what the desirable outcomes are (`label_values_or_threshold`).
# SageMaker Clarify can handle both categorical and continuous data for `facet_values_or_threshold` and for `label_values_or_threshold`. In this case we are using categorical data.
#
# We specify this information in the [BiasConfig](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.clarify.BiasConfig) API. Here that the positive outcome is earning >$50,000, Sex is a sensitive category, and Female respondents are the sensitive group. `group_name` is used to form subgroups for the measurement of Conditional Demographic Disparity in Labels (CDDL) and Conditional Demographic Disparity in Predicted Labels (CDDPL) with regard to Simpson’s paradox.
bias_config = clarify.BiasConfig(
label_values_or_threshold=[1], facet_name="Sex", facet_values_or_threshold=[0], group_name="Age"
)
# #### Pre-training Bias
# Bias can be present in your data before any model training occurs. Inspecting your data for bias before training begins can help detect any data collection gaps, inform your feature engineering, and help you understand what societal biases the data may reflect.
#
# Computing pre-training bias metrics does not require a trained model.
#
# #### Post-training Bias
# Computing post-training bias metrics does require a trained model.
#
# Unbiased training data (as determined by concepts of fairness measured by bias metric) may still result in biased model predictions after training. Whether this occurs depends on several factors including hyperparameter choices.
#
#
# You can run these options separately with `run_pre_training_bias()` and `run_post_training_bias()` or at the same time with `run_bias()` as shown below.
clarify_processor.run_bias(
data_config=bias_data_config,
bias_config=bias_config,
model_config=model_config,
model_predicted_label_config=predictions_config,
pre_training_methods="all",
post_training_methods="all",
)
# #### Viewing the Bias Report
# In Studio, you can view the results under the experiments tab.
#
# <img src="./recordings/bias_report.gif">
#
# Each bias metric has detailed explanations with examples that you can explore.
#
# <img src="./recordings/bias_detail.gif">
#
# You could also summarize the results in a handy table!
#
# <img src="./recordings/bias_report_chart.gif">
#
# If you're not a Studio user yet, you can access the bias report in pdf, html and ipynb formats in the following S3 bucket:
bias_report_output_path
# For example, you can download a copy of the html report and view it in-place,
# !aws s3 cp {bias_report_output_path}/report.html ./bias-report.html
# +
import IPython
IPython.display.HTML(filename="bias-report.html")
# -
# ### Explaining Predictions
# There are expanding business needs and legislative regulations that require explanations of _why_ a model made the decision it did. SageMaker Clarify uses SHAP to explain the contribution that each input feature makes to the final decision.
# Kernel SHAP algorithm requires a baseline (also known as background dataset). Baseline dataset type shall be the same as `dataset_type` of `DataConfig`, and baseline samples shall only include features. By definition, `baseline` should either be a S3 URI to the baseline dataset file, or an in-place list of samples. In this case we chose the latter, and put the first sample of the test dataset to the list.
# +
shap_config = clarify.SHAPConfig(
baseline=[test_features.iloc[0].values.tolist()],
num_samples=15,
agg_method="mean_abs",
save_local_shap_values=False,
)
explainability_output_path = "s3://{}/{}/clarify-explainability".format(bucket, prefix)
explainability_data_config = clarify.DataConfig(
s3_data_input_path=train_uri,
s3_output_path=explainability_output_path,
label="Target",
headers=training_data.columns.to_list(),
dataset_type="text/csv",
)
# -
clarify_processor.run_explainability(
data_config=explainability_data_config,
model_config=model_config,
explainability_config=shap_config,
model_scores=probability,
)
# #### Viewing the Explainability Report
# As with the bias report, you can view the explainability report in Studio under the experiments tab
#
#
# <img src="./recordings/explainability_detail.gif">
#
# The Model Insights tab contains direct links to the report and model insights.
# If you're not a Studio user yet, as with the Bias Report, you can access this report at the following S3 bucket.
explainability_output_path
# For example, you can download a copy of the html report and view it in-place,
# !aws s3 cp {explainability_output_path}/report.html ./explainability-report.html
# +
import IPython
IPython.display.HTML(filename="explainability-report.html")
# -
# ### Clean Up
# Finally, don't forget to clean up the resources we set up and used for this demo!
session.delete_model(model_name)
| sagemaker_processing/fairness_and_explainability/fairness_and_explainability_byoc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="7765UFHoyGx6"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="KVtTDrUNyL7x"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="xPYxZMrWyA0N"
# #How to train Boosted Trees models in TensorFlow
# + [markdown] colab_type="text" id="p_vOREjRx-Y0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/estimators/boosted_trees"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="dW3r7qVxzqN5"
# This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.
#
# Boosted Trees models are popular with many machine learning practioners as they can achieve impressive performance with minimal hyperparameter tuning.
# + [markdown] colab_type="text" id="eylrTPAN3rJV"
# ## Load the titanic dataset
# You will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
# + colab={} colab_type="code" id="KuhAiPfZ3rJW"
from __future__ import absolute_import, division, print_function, unicode_literals
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
tf.set_random_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
# + [markdown] colab_type="text" id="3ioodHdVJVdA"
# The dataset consists of a training set and an evaluation set:
#
# * `dftrain` and `y_train` are the *training set*—the data the model uses to learn.
# * The model is tested against the *eval set*, `dfeval`, and `y_eval`.
#
# For training you will use the following features:
#
#
# <table>
# <tr>
# <th>Feature Name</th>
# <th>Description</th>
# </tr>
# <tr>
# <td>sex</td>
# <td>Gender of passenger</td>
# </tr>
# <tr>
# <td>age</td>
# <td>Age of passenger</td>
# </tr>
# <tr>
# <td>n_siblings_spouses</td>
# <td># siblings and partners aboard</td>
# </tr>
# <tr>
# <td>parch</td>
# <td># of parents and children aboard</td>
# </tr>
# <tr>
# <td>fare</td>
# <td>Fare passenger paid.</td>
# </tr>
# <tr>
# <td>class</td>
# <td>Passenger's class on ship</td>
# </tr>
# <tr>
# <td>deck</td>
# <td>Which deck passenger was on</td>
# </tr>
# <tr>
# <td>embark_town</td>
# <td>Which town passenger embarked from</td>
# </tr>
# <tr>
# <td>alone</td>
# <td>If passenger was alone</td>
# </tr>
# </table>
# + [markdown] colab_type="text" id="AoPiWsJALr-k"
# ## Explore the data
# + [markdown] colab_type="text" id="slcat1yzmzw5"
# Let's first preview some of the data and create summary statistics on the training set.
# + colab={} colab_type="code" id="15PLelXBlxEW"
dftrain.head()
# + colab={} colab_type="code" id="j2hiM4ETmqP0"
dftrain.describe()
# + [markdown] colab_type="text" id="-IR0e8V-LyJ4"
# There are 627 and 264 examples in the training and evaluation sets, respectively.
# + colab={} colab_type="code" id="_1NwYqGwDjFf"
dftrain.shape[0], dfeval.shape[0]
# + [markdown] colab_type="text" id="28UFJ4KSMK3V"
# The majority of passengers are in their 20's and 30's.
# + colab={} colab_type="code" id="CaVDmZtuDfux"
dftrain.age.hist(bins=20)
plt.show()
# + [markdown] colab_type="text" id="1pifWiCoMbR5"
# There are approximately twice as male passengers as female passengers aboard.
# + colab={} colab_type="code" id="-WazAq30MO5J"
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
# + [markdown] colab_type="text" id="7_XkxrpmmVU_"
# The majority of passengers were in the "third" class.
# + colab={} colab_type="code" id="zZ3PvVy4l4gI"
(dftrain['class']
.value_counts()
.plot(kind='barh'))
plt.show()
# + [markdown] colab_type="text" id="HM5SlwlxmZMT"
# Most passengers embarked from Southampton.
# + colab={} colab_type="code" id="RVTSrdr4mZaC"
(dftrain['embark_town']
.value_counts()
.plot(kind='barh'))
plt.show()
# + [markdown] colab_type="text" id="aTn1niLPob3x"
# Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
# + colab={} colab_type="code" id="Eh3KW5oYkaNS"
ax = (pd.concat([dftrain, y_train], axis=1)\
.groupby('sex')
.survived
.mean()
.plot(kind='barh'))
ax.set_xlabel('% survive')
plt.show()
# + [markdown] colab_type="text" id="krkRHuMp3rJn"
# ## Create feature columns and input functions
# The Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
# + colab={} colab_type="code" id="upaNWxcF3rJn"
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return fc.indicator_column(
fc.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(fc.numeric_column(feature_name,
dtype=tf.float32))
# + [markdown] colab_type="text" id="74GNtFpStSAz"
# You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
# + colab={} colab_type="code" id="Eaq79D9FtmF8"
example = dftrain.head(1)
class_fc = one_hot_cat_column('class', ('First', 'Second', 'Third'))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', fc.input_layer(dict(example), [class_fc]).numpy())
# + [markdown] colab_type="text" id="YbCUn3nCusC3"
# Additionally, you can view all of the feature column transformations together:
# + colab={} colab_type="code" id="omIYcsVws3g0"
fc.input_layer(dict(example), feature_columns).numpy()
# + [markdown] colab_type="text" id="-UOlROp33rJo"
# Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
# + colab={} colab_type="code" id="9dquwCQB3rJp"
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
y = np.expand_dims(y, axis=1)
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
# + [markdown] colab_type="text" id="HttfNNlN3rJr"
# ## Train and evaluate the model
#
# Below you will do the following steps:
#
# 1. Initialize the model, specifying the features and hyperparameters.
# 2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.
# 3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.
#
# Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
# + colab={} colab_type="code" id="JPOGpmmq3rJr"
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
results = linear_est.evaluate(eval_input_fn)
print('Accuracy : ', results['accuracy'])
print('Dummy model: ', results['accuracy_baseline'])
# + [markdown] colab_type="text" id="BarkNXwA3rJu"
# Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported, along with using any twice differentiable custom loss (`BoostedTreesEstimator`). Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
#
#
#
# + colab={} colab_type="code" id="tgEzMtlw3rJu"
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
results = est.evaluate(eval_input_fn)
print('Accuracy : ', results['accuracy'])
print('Dummy model: ', results['accuracy_baseline'])
# + [markdown] colab_type="text" id="hEflwznXvuMP"
# Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
# + colab={} colab_type="code" id="6zmIjTr73rJ4"
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
# + [markdown] colab_type="text" id="mBUaNN1BzJHG"
# Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
# + colab={} colab_type="code" id="NzxghvVz3rJ6"
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
| site/en/r1/tutorials/estimators/boosted_trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ricardodeazambuja/MaskDetection-EdgeTPU-RPI0/blob/main/EdgeTPU_MaskDetection_Testing_(using_COCO_API).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="2ROLBVz0W-b4"
# # Testing the FaceMask Detection on edgetpu using mobiledet ssdlite
# + colab={"base_uri": "https://localhost:8080/"} id="mGcoG6M55v_V" outputId="28447487-0414-41d8-d017-e3eb293d06ad"
# Mount Google Drive:
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="m4spO9FV3R5m"
# # Datasets
#
# https://makeml.app/datasets/mask has only 853 images
#
# https://www.kaggle.com/ivandanilovich/medical-masks-dataset-images-tfrecords has 1148 (you need to log in to download it, so I have it in my drive)
#
# https://github.com/TheSSJ2612/Real-Time-Medical-Mask-Detection/releases/download/v0.1/Dataset.zip has 5521 images for each category (mask/no-mask), but without annotations
# + colab={"base_uri": "https://localhost:8080/"} id="hFcNqnifoi2G" outputId="10c55d31-efee-4089-dda0-073613df9a44"
# Download the Mask Dataset from MakeML: https://makeml.app/datasets/mask
# License: Public Domain Mark
# !mkdir /content/dataset
# %cd /content/dataset
# !wget https://arcraftimages.s3-accelerate.amazonaws.com/Datasets/Mask/MaskPascalVOC.zip
# !unzip MaskPascalVOC.zip > /dev/null 2>&1
# !rm -r __* # remove MacOS cache
# + colab={"base_uri": "https://localhost:8080/"} id="r0oF8h686ERd" outputId="86d9a02a-3fae-4c7e-fe6f-0d772679929c"
# ls /content/dataset
# + id="J7SjRCLr6RJX"
# !rm -rf MaskPascalVOC.zip
# + colab={"base_uri": "https://localhost:8080/"} id="cHKNYZTI6pp_" outputId="0db9da16-cdcd-4dbe-9155-1957aae7a46b"
# !ls /content/dataset/images | wc -l
# + colab={"base_uri": "https://localhost:8080/"} id="AxyTXrSx6yt7" outputId="77eebedf-f15f-4cc8-c076-ca68b98ab0a5"
# !ls /content/dataset/annotations | wc -l
# + colab={"base_uri": "https://localhost:8080/"} id="h7iv7gu0s2nM" outputId="9dc8abc4-3271-469a-f71e-f33fbe876aef"
# Lets create a mask_label_map.pbtxt file
# %%file mask_label_map.pbtxt
item {
id: 1
name: 'with_mask'
}
item {
id: 2
name: 'without_mask'
}
item {
id: 3
name: 'mask_weared_incorrect'
}
# + id="bFqSZ_qm9dtG"
import glob
import os
import shutil
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="YJvbq-zd9fCB" outputId="eb5958e3-6578-4e87-f020-466dcae1faea"
# !rm -rf "/content/dataset/train"
# !rm -rf "/content/dataset/test"
seed = 42
test_perc = 0.1
img_list = glob.glob("/content/dataset/images/*")
img_list_filenames = [i.split("/")[-1] for i in img_list]
ann_list = glob.glob("/content/dataset/annotations/*.xml")
ann_list_filenames = [i.split("/")[-1] for i in ann_list]
# Let's make sure we don't have files with the same name...
# ... and hope the different datasets are not reusing images!
img_list = sorted(list(set(img_list_filenames)))
ann_list = sorted(list(set(ann_list_filenames)))
assert len(img_list)==len(ann_list), "They should have the same length..."
rs = np.random.RandomState(seed)
rnd_indices = list(range(len(img_list)))
rs.shuffle(rnd_indices)
if not os.path.exists("/content/dataset/train"):
os.mkdir("/content/dataset/train")
if not os.path.exists("/content/dataset/test"):
os.mkdir("/content/dataset/test")
n_test = int(len(img_list)*test_perc)
print(f"Test set size: {n_test}")
print(f"Train set size: {len(img_list)-n_test}")
#
# Both datasets use annotation as the same name as the image
#
for i in range(len(img_list)):
if i <= n_test:
shutil.copy(os.path.join("/content/dataset/images/",img_list[i]), f"/content/dataset/test/{i}.{img_list[i].split('.')[-1]}")
shutil.copy(os.path.join("/content/dataset/annotations/",ann_list[i]), f"/content/dataset/test/{i}.xml")
else:
shutil.copy(os.path.join("/content/dataset/images/",img_list[i]), f"/content/dataset/train/{i}.{img_list[i].split('.')[-1]}")
shutil.copy(os.path.join("/content/dataset/annotations/",ann_list[i]), f"/content/dataset/train/{i}.xml")
# + colab={"base_uri": "https://localhost:8080/"} id="YOB1poY6Love" outputId="9f12b23b-6819-466f-9cfc-d8639acc7250"
# ls /content/dataset/train | head -n 10
# + colab={"base_uri": "https://localhost:8080/"} id="qdF1oqYWuL4R" outputId="6eab4034-6c75-4374-cef4-afaa4d0ec3e6"
# !rm -rf /content/dataset/train_labels.csv
# !rm -rf /content/dataset/test_labels.csv
# This script will turn our xml files from each image to a single label file.
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
label_translator = {"with_mask":"with_mask",
"without_mask": "without_mask",
"mask_weared_incorrect":"mask_weared_incorrect",
"mask": "with_mask",
"none": "without_mask",
"poor": "mask_weared_incorrect"}
def xml_to_csv(path):
xml_list = []
file_list = list(glob.glob(path + '/*.jpg'))+list(glob.glob(path + '/*.png'))
for xml_file in file_list:
tree = ET.parse(os.path.join(path,xml_file.split('/')[-1].split('.')[0]+".xml"))
root = tree.getroot()
for member in root.findall('object'):
# fix names in different datasets
name = label_translator[member.find('name').text]
# fix problems with bbox https://stackoverflow.com/a/64407725
xmin = int(member.find('bndbox')[0].text)
ymin = int(member.find('bndbox')[1].text)
xmax = int(member.find('bndbox')[2].text)
ymax = int(member.find('bndbox')[3].text)
xmin = np.min((xmin, xmax))
xmax = np.max((xmin, xmax))
ymin = np.min((ymin, ymax))
ymax = np.max((ymin, ymax))
width = int(root.find('size')[0].text)
height = int(root.find('size')[1].text)
value = (xml_file.split('/')[-1],
width,
height,
name,
xmin,
ymin,
xmax,
ymax
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def get_labels_from_image_dir():
for name in ['test', 'train']:
full_dir = '/content/dataset/' + name + '/'
label_file = '/content/dataset/' + name + '_labels.csv'
image_path = full_dir
xml_df = xml_to_csv(image_path)
xml_df.to_csv(label_file, index=None)
get_labels_from_image_dir()
print('Training labels:')
# !head -n 20 /content/dataset/train_labels.csv
print('\nTesting labels:')
# !head -n 20 /content/dataset/test_labels.csv
# + [markdown] id="kvRghjbb3JEB"
# ## Generate COCO json stuff... manually
# + id="3pkDAORi3IPe"
import pandas as pd
import json
def csv2cocojson(csv_filepath,ordered_category_names):
df = pd.read_csv(csv_filepath)
category_translator = dict(zip(ordered_category_names,range(1,len(ordered_category_names)+1)))
categories = [{'id':id, 'name':class_name} for class_name,id in category_translator.items()]
annotations = []
images = {}
for id, row in df.iterrows():
tmp_dict = {}
tmp_dict['id'] = id+1
tmp_dict['image_id'] = int(row['filename'].split('.')[0])
images[tmp_dict['image_id']] = {'width': int(row['width']),
'height':int(row['height']),
'id': tmp_dict['image_id'],
'filename': row['filename']}
x = int(row['xmin'])
y = int(row['ymin'])
xmax = int(row['xmax'])
ymax = int(row['ymax'])
w = abs(xmax-x)
h = abs(ymax-y)
tmp_dict['bbox'] = [x,y,w,h]
tmp_dict['area'] = w*h
tmp_dict['category_id'] = category_translator[row['class']]
tmp_dict['segmentation'] = []
tmp_dict['iscrowd'] = 0 # https://github.com/cocodataset/cocoapi/blob/8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9/PythonAPI/pycocotools/cocoeval.py#L109
annotations.append(tmp_dict)
coco_gt_dict = {
'images': list(images.values()),
'categories': categories,
'annotations': annotations
}
with open("/content/dataset/cocoGT.json", 'w') as f:
json.dump(coco_gt_dict,f,indent=2)
return coco_gt_dict
# + id="1bJktDA1E0cz"
CSV_FILEPATH = "/content/dataset/test_labels.csv"
ordered_category_names = ["with_mask", "without_mask", "mask_weared_incorrect"]
coco_gt_dict = csv2cocojson(CSV_FILEPATH, ordered_category_names)
# + colab={"base_uri": "https://localhost:8080/"} id="iVN62lDoChFN" outputId="e2e7be02-e351-422d-a32e-07c976dfd0b5"
# cat "/content/dataset/cocoGT.json" | head -n 10
# + [markdown] id="2RuNxlX53bBF"
# ### Randomly visualize the datasets to make sure they makes sense...
# + colab={"base_uri": "https://localhost:8080/", "height": 551} id="Ar-zE3bd3aMJ" outputId="bc888c35-0ca2-4e0c-b25e-893f88be4183"
from PIL import Image, ImageDraw
ds_type = "test"
with open('/content/dataset/'+ds_type+'_labels.csv' ,'r') as f:
# filename,width,height,class,xmin,ymin,xmax,ymax
csv_file = f.read().splitlines()[1:]
labels = {}
for csv_data in csv_file:
filename, _, _, label, xmin,ymin,xmax, ymax = csv_data.split(',')
if label in labels:
labels[label]+=1
else:
labels[label]=1
sample_idx = np.random.randint(0, len(csv_file))
filename, _, _, label, xmin,ymin,xmax, ymax = csv_file[sample_idx].split(',')
img = Image.open('/content/dataset/'+ds_type+"/"+filename)
draw = ImageDraw.Draw(img)
draw.rectangle([(int(xmin), int(ymin)), (int(xmax), int(ymax))], width=3, outline='red')
# draw.text((int(xmin)+5, int(ymin)),label,fill='red')
print(labels)
print(label)
img.resize((500,500))
# + [markdown] id="hdppGnq96qFE"
# # Model path
# + id="WLAc8TvXE-AD"
# model_path = "/content/drive/MyDrive/Checkpoints/mask_full/tflite/output_model_5676/"
model_path = "/content/drive/MyDrive/Checkpoints/mask_full/tflite/output_model_18000/"
# + [markdown] id="8muTv5rE3grQ"
# # Prepare for testing... using the original model
# + colab={"base_uri": "https://localhost:8080/"} id="1kAR4ga5OyY0" outputId="66cd29bb-bfbf-42fe-fb20-24b7bfef8988"
# ls $model_path
# + id="q1pohWsjOUKP"
from PIL import Image
from PIL import ImageDraw
from IPython.display import display
def draw_bboxes(image, boxes, classes, scores, threshold=0.5, labels={1:'mask', 2: 'no_mask', 3:'poor_mask'}):
colors = ((128, 255, 102), (102, 255, 255), (232, 123, 212))
colors = dict(zip(labels.keys(),colors))
image_width, image_height = image.size
draw = ImageDraw.Draw(image)
# Visualization of the results of a detection.
for i in range(len(boxes)):
if scores[i] > threshold:
ymin = int(max(1, (boxes[i][0] * image_height)))
xmin = int(max(1, (boxes[i][1] * image_width)))
ymax = int(min(image_height, (boxes[i][2] * image_height)))
xmax = int(min(image_width, (boxes[i][3] * image_width)))
draw.rectangle((xmin, ymin, xmax, ymax), width=3, outline=colors[int(classes[i])])
text = f'{labels[int(classes[i])]} {scores[i]*100:1.0f}%'
draw.text((xmin+3, ymin-10), text, fill=colors[int(classes[i])], width=2)
return image
# + colab={"base_uri": "https://localhost:8080/"} id="mhg-2OA-m-9n" outputId="3d4e47b7-5187-428c-dedb-84fc04e71b03"
# Do a Quick Evaluation on the graph model/
import numpy as np
import os
import glob
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior() # https://stackoverflow.com/a/58473143
PATH_TO_CKPT = model_path + 'frozen_inference_graph.pb'
class inference_for_single_image:
def __init__(self, PATH_TO_CKPT):
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.gen = self.__run_inference()
def __call__(self, image, cont=True):
next(self.gen)
try:
self.gen.send(cont)
except StopIteration:
return
return self.gen.send(image)
def __run_inference(self):
with self.detection_graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
while True:
cont = yield
if not cont:
break
# Run inference
image = yield
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
yield output_dict
run_inference_for_single_image = inference_for_single_image(PATH_TO_CKPT)
# + id="BFqoDUMmzxag"
def results2cocojson(results, outputjson="/content/dataset/cocoRes.json"):
annotations = []
for boxes, classes, scores, (img_width, img_height), img_id in results:
for id, (b, c, s) in enumerate(zip(boxes, classes, scores)):
tmp_dict = {}
tmp_dict['id'] = id
tmp_dict['image_id'] = img_id
x = float(b[1]*img_width)
y = float(b[0]*img_height)
xmax = float(b[3]*img_width)
ymax = float(b[2]*img_height)
w = abs(xmax-x)
h = abs(ymax-y)
tmp_dict['bbox'] = [x,y,w,h]
tmp_dict['area'] = float(w*h)
tmp_dict['category_id'] = int(c)
tmp_dict['score'] = float(s)
annotations.append(tmp_dict)
# Save the annotations dictionary as a json file
with open(outputjson, 'w') as f:
json.dump(annotations,f,indent=2)
# List of image_ids (necessary to calculate the averages)
imgIds = list(set([i['image_id'] for i in annotations]))
return imgIds
# + colab={"base_uri": "https://localhost:8080/"} id="eQ3Wil9byavA" outputId="3afa082c-18d1-4a91-e557-42555fda3c60"
PATH_TO_TEST_IMAGES_DIR = '/content/dataset/test'
# TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, fi) for fi in labels[LABEL]]
TEST_IMAGE_PATHS = list(glob.glob(PATH_TO_TEST_IMAGES_DIR + '/*.jpg'))+list(glob.glob(PATH_TO_TEST_IMAGES_DIR + '/*.png'))
# np.random.shuffle(TEST_IMAGE_PATHS)
# TEST_IMAGE_PATHS = TEST_IMAGE_PATHS[:3]
results = []
for image_path in TEST_IMAGE_PATHS:
print('Evaluating:', image_path)
image = Image.open(image_path)
filename = image.filename.split('/')[-1]
image = image.convert('RGB')
image_id = int(filename.split('.')[0])
img_width, img_height = image.size
image_np = np.asarray(image)
# The model has image resizing in the graph...
# ... and the fake batch stuff is in the class already
output_dict = run_inference_for_single_image(image_np)
boxes, classes, scores = output_dict['detection_boxes'], output_dict['detection_classes'], output_dict['detection_scores']
results.append((boxes, classes, scores, image.size, image_id))
# display(draw_bboxes(image, boxes, classes, scores))
run_inference_for_single_image([], cont=False)
# + id="trrqZV0J7EHM"
# Save the annotations dictionary as a json file
# returning a list of image_ids (necessary to calculate the averages)
imgIds = results2cocojson(results, "/content/dataset/cocoRes.json")
# + colab={"base_uri": "https://localhost:8080/"} id="N08CHkEBR9-U" outputId="ddb119bf-bb87-4b19-d5bb-a0535bab0c88"
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
cocoGt=COCO("/content/dataset/cocoGT.json")
cocoDt=cocoGt.loadRes("/content/dataset/cocoRes.json")
cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
cocoEval.params.imgIds = imgIds
cocoEval.params.areaRng = [
[0 ** 2, 1e5 ** 2], # all - objects that have area between 1 sq pixels and 1e10 (10_000_000_000) sq pixels
[0 ** 2, 32 ** 2], # small - objects that have area between 1 sq pixels and 32^2 (1024) sq pixels
[32 ** 2, 96 ** 2], # medium - objects that have area between 32^2 (1024) sq pixels and 96^2 (9216) sq pixels
[96 ** 2, 1e5 ** 2] # large - objects that have area between 96^2 (9216) sq pixels and 1e10 (10_000_000_000) sq pixels
]
cocoEval.params.areaRngLbl = ['all', 'small', 'medium', 'large'] # labels for areaRng
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# + colab={"base_uri": "https://localhost:8080/"} id="pc38hvZFCTbD" outputId="64b811bf-7ed2-4b71-e726-4d1a87641af4"
# ls $model_path
# + [markdown] id="u77CRNbI5nsh"
# # Test tflite models
# + colab={"base_uri": "https://localhost:8080/"} id="isTD5FWZ0K6_" outputId="e9cb8dde-7947-41b6-d75c-ad129ac99b33"
# Now we install tflite_runtime package to evaluate the model (Python 3.7 => cp37).
# !pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp37-cp37m-linux_x86_64.whl
# + id="KA__iI2R63UF"
try:
import tflite_runtime.interpreter as tflite
except ModuleNotFoundError:
print("Did you install the TFLite Runtime? \
https://github.com/ricardodeazambuja/libedgetpu-rpi0/releases/tag/rpi0_tflite_edgetpu")
EDGETPU_SHARED_LIB = 'libedgetpu.so.1'
# + id="CG7KOKB96zKf"
def load_model(model_file):
if "edgetpu" in model_file:
print("Using Edge TPU...")
#
# EdgeTPU Accelerator
#
device = [] # I have only one USB accelerator...
tflite_interpreter = tflite.Interpreter(model_path=model_file,
experimental_delegates=[tflite.load_delegate(EDGETPU_SHARED_LIB,{'device': device[0]} if device else {})])
else:
print("Using CPU...")
tflite_interpreter = tflite.Interpreter(model_path=model_file)
tflite_interpreter.allocate_tensors()
input_details = tflite_interpreter.get_input_details()
output_details = tflite_interpreter.get_output_details()
return tflite_interpreter, input_details, output_details
# + id="Qy7eCtYj60Ps"
class inference_for_single_image_tflite:
def __init__(self, path_to_model):
tflite_interpreter, input_details, output_details = load_model(path_to_model)
self.interpreter = tflite_interpreter
self.input_details = input_details
self.output_details = output_details
self.width = self.input_details[0]['shape'][2]
self.height = self.input_details[0]['shape'][1]
def __call__(self, image_np_expanded):
self.interpreter.set_tensor(self.input_details[0]['index'], image_np_expanded)
self.interpreter.invoke()
return [self.interpreter.get_tensor(self.output_details[out_i]['index'])[0] for out_i in range(len(self.output_details))]
# + id="Kyqv1NI1BL8o"
from PIL import Image
from PIL import ImageDraw
from IPython.display import display
def process_bboxes(image, boxes, classes, scores, threshold=0.5,
labels={1:'mask', 2: 'no_mask', 3:'poor_mask'},
crop=False):
colors = ((128, 255, 102), (102, 255, 255), (232, 123, 212))
colors = dict(zip(labels.keys(),colors))
image_width, image_height = image.size
if not crop:
draw = ImageDraw.Draw(image)
# Visualization of the results of a detection.
for i in range(len(boxes)):
if scores[i] > threshold:
ymin = int(max(1, (boxes[i][0] * image_height)))
xmin = int(max(1, (boxes[i][1] * image_width)))
ymax = int(min(image_height, (boxes[i][2] * image_height)))
xmax = int(min(image_width, (boxes[i][3] * image_width)))
draw.rectangle((xmin, ymin, xmax, ymax), width=3, outline=colors[int(classes[i])])
text = f'{labels[int(classes[i])]} {scores[i]*100:1.2f}%'
draw.text((xmin+3, ymin-10), text, fill=colors[int(classes[i])], width=2)
return image
else:
for i in range(len(boxes)):
if scores[i] >= threshold:
box = boxes[i]
ymin = int(max(1, (boxes[i][0] * image_height)))
xmin = int(max(1, (boxes[i][1] * image_width)))
ymax = int(min(image_height, (boxes[i][2] * image_height)))
xmax = int(min(image_width, (boxes[i][3] * image_width)))
return image.crop((xmin, ymin, xmax, ymax))
# + colab={"base_uri": "https://localhost:8080/"} id="I-gjQdMeBIMz" outputId="05c60883-cba5-4819-fc6e-c40866ead76d"
PATH_TO_TFLITE = model_path + 'ssdlite_mobiledet_mask.tflite'
run_inference_for_single_image_tflite_ssd = inference_for_single_image_tflite(PATH_TO_TFLITE)
# + colab={"base_uri": "https://localhost:8080/"} id="3SnBaIBZ9aoo" outputId="553d2301-fc4e-44b4-b838-ac742c26072f"
run_inference_for_single_image_tflite_ssd.input_details
# + id="2CK4XGX9DfXj"
# # !wget --no-clobber https://images.canadagoose.com/image/upload/w_1333,c_scale,f_auto,q_auto:best/v1601577550/product-image/5558U_1073.jpg -O mask_example.jpg
# img_filename = "mask_example.jpg"
img_filename = "poor_mask_test.png"
# + id="u8Pw4J2iBIfC"
import numpy as np
image = Image.open(img_filename)
image = image.convert('RGB')
image_width, image_height = image.size
# tflite will not have the image resizing in the model
input_width = run_inference_for_single_image_tflite_ssd.width
input_height = run_inference_for_single_image_tflite_ssd.height
resized_image = image.resize((input_width, input_height))
np_image = np.asarray(resized_image)
image_np_expanded = np.expand_dims(np_image, axis=0)
# Actual detection.
boxes, classes, scores, _ = run_inference_for_single_image_tflite_ssd(image_np_expanded)
classes = [i+1 for i in classes] # class indices here start at 0
# + colab={"base_uri": "https://localhost:8080/"} id="gRGq244HlqMf" outputId="fc54e9b5-8dd4-4f02-a7a1-5e6ef10f2417"
classes
# + colab={"base_uri": "https://localhost:8080/"} id="y93iJv-ynz6j" outputId="2147e500-5309-413e-ee44-259a7bcc710e"
scores
# + id="ZagH7OqlBIm7"
resized_image
# + id="3-yg9voYD2lh"
process_bboxes(resized_image.copy(), boxes, classes, scores, threshold=0.5)
# + id="0N3DTXB4Cd_y"
# Now we do evaluation on the tflite model.
from tflite_runtime.interpreter import Interpreter
from tflite_runtime.interpreter import load_delegate
PATH_TO_TFLITE = model_path + 'ssdlite_mobiledet_mask.tflite'
class inference_for_single_image_tflite:
def __init__(self, path_to_model):
self.interpreter = Interpreter(path_to_model)
self.interpreter.allocate_tensors()
self.interpreter.invoke() # warmup
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.width = self.input_details[0]['shape'][2]
self.height = self.input_details[0]['shape'][1]
def __call__(self, image_np_expanded):
self.interpreter.set_tensor(self.input_details[0]['index'], image_np_expanded)
self.interpreter.invoke()
boxes = self.interpreter.get_tensor(self.output_details[0]['index'])[0]
classes = self.interpreter.get_tensor(self.output_details[1]['index'])[0]
scores = self.interpreter.get_tensor(self.output_details[2]['index'])[0]
return boxes, classes, scores
run_inference_for_single_image_tflite = inference_for_single_image_tflite(PATH_TO_TFLITE)
# + colab={"base_uri": "https://localhost:8080/"} id="xxgUtz9iDRad" outputId="b064daf7-4f5f-438a-f128-6d51fc92110b"
results_tflite = []
for image_path in TEST_IMAGE_PATHS:
print('Evaluating:', image_path)
image = Image.open(image_path)
filename = image.filename.split('/')[-1]
image = image.convert('RGB')
image_id = int(filename.split('.')[0])
image_width, image_height = image.size
# tflite will not have the image resizing in the model
input_width = run_inference_for_single_image_tflite.width
input_height = run_inference_for_single_image_tflite.height
if image_width!=input_width or image_height!=input_height:
resized_image = image.resize((input_width, input_height))
np_image = np.asarray(resized_image)
else:
np_image = np.asarray(image)
image_np_expanded = np.expand_dims(np_image, axis=0)
# Actual detection.
boxes, classes, scores = run_inference_for_single_image_tflite(image_np_expanded)
classes = [i+1 for i in classes] # class indices here start at 0
results_tflite.append((boxes, classes, scores, image.size, image_id))
# display(draw_bboxes(image, boxes, classes, scores, labels={1:'mask', 2: 'no_mask', 3:'poor_mask'}))
# + id="VFI3uhw0KJiH"
# Save the annotations dictionary as a json file
# returning a list of image_ids (necessary to calculate the averages)
imgIds = results2cocojson(results_tflite, "/content/dataset/cocoRes_tflite.json")
# + colab={"base_uri": "https://localhost:8080/"} id="U2TXl4rgKP_f" outputId="66633930-52e9-48f2-ce74-414fcc48c7a3"
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
cocoGt=COCO("/content/dataset/cocoGT.json")
cocoDt=cocoGt.loadRes("/content/dataset/cocoRes_tflite.json")
cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
cocoEval.params.imgIds = imgIds
cocoEval.params.areaRng = [
[0 ** 2, 1e5 ** 2], # all - objects that have area between 1 sq pixels and 1e10 (10_000_000_000) sq pixels
[0 ** 2, 32 ** 2], # small - objects that have area between 1 sq pixels and 32^2 (1024) sq pixels
[32 ** 2, 96 ** 2], # medium - objects that have area between 32^2 (1024) sq pixels and 96^2 (9216) sq pixels
[96 ** 2, 1e5 ** 2] # large - objects that have area between 96^2 (9216) sq pixels and 1e10 (10_000_000_000) sq pixels
]
cocoEval.params.areaRngLbl = ['all', 'small', 'medium', 'large'] # labels for areaRng
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# + colab={"base_uri": "https://localhost:8080/"} id="VYuDvA8aUkVx" outputId="ade9cb35-c291-4792-b531-744a101364d9"
# !pip install git+git://github.com/ricardodeazambuja/colab_utils.git
# + id="80lZ9OUlUqNg"
from colab_utils import webcam2numpy
# https://github.com/ricardodeazambuja/colab_utils/blob/master/Testing_colab_utils.ipynb
# + id="k85vAmjlVDii"
# Capture an image from your webcam
img_np = webcam2numpy()
# + id="KT9amdWGVd_M"
# we need to use PIL to more easily convert...
image = Image.fromarray(img_np)
image_width, image_height = image.size
# The model only accepts RGB (3 layers)
image = image.convert('RGB')
# tflite will not have the image resizing in the model
# so we check the model for input size
input_width = run_inference_for_single_image_tflite.width
input_height = run_inference_for_single_image_tflite.height
# resize the image
resized_image = image.resize((input_width, input_height))
# transform it into numpy array
np_image = np.asarray(resized_image)
# add the extra dimension (remember the batches...)
image_np_expanded = np.expand_dims(np_image, axis=0)
# Actual detection.
boxes, classes, scores = run_inference_for_single_image_tflite(image_np_expanded)
# Visualization of the results of a detection.
display(process_bboxes(image, boxes, classes, scores, labels={0:'mask', 1: 'no_mask', 2:'poor_mask', 3:'???'}))
# + colab={"base_uri": "https://localhost:8080/"} id="BQE-bXg7-Y6Q" outputId="179b0506-8a05-48c7-fb63-cd73200230e2"
classes, scores
# + [markdown] id="RcUJ8rHHg-Uj"
# Testing with images saved by the raspicam v2 and uploaded here:
# + id="9PGTJJCYVgiU"
# we need to use PIL to more easily convert...
# image = Image.open("mask_test.png")
# image = Image.open("nomask_far_test.png")
# image = Image.open("nomask_test_colab.png")
image = Image.open("poor_mask_test.png")
image_width, image_height = image.size
# The model only accepts RGB (3 layers)
image = image.convert('RGB')
# tflite will not have the image resizing in the model
# so we check the model for input size
input_width = run_inference_for_single_image_tflite.width
input_height = run_inference_for_single_image_tflite.height
# resize the image
resized_image = image.resize((input_width, input_height))
# transform it into numpy array
np_image = np.asarray(resized_image)
# add the extra dimension (remember the batches...)
image_np_expanded = np.expand_dims(np_image, axis=0)
# Actual detection.
boxes, classes, scores = run_inference_for_single_image_tflite(image_np_expanded)
# Visualization of the results of a detection.
display(draw_bboxes(image, boxes, classes, scores, labels={0:'mask', 1: 'no_mask', 2:'poor_mask'}))
# + colab={"base_uri": "https://localhost:8080/"} id="VZ_XEh7-8ehH" outputId="3bffcbf0-d4f6-415e-e720-4bdb1e93a700"
scores
# + colab={"base_uri": "https://localhost:8080/"} id="c8a85NY68jsc" outputId="671878ae-7a25-4a41-a6a2-a331e02ba83b"
classes
# + id="QceqdxhnftbP"
# we need to use PIL to more easily convert...
image = Image.open("nomask_test.png")
image_width, image_height = image.size
# The model only accepts RGB (3 layers)
image = image.convert('RGB')
# tflite will not have the image resizing in the model
# so we check the model for input size
input_width = run_inference_for_single_image_tflite.width
input_height = run_inference_for_single_image_tflite.height
# resize the image
resized_image = image.resize((input_width, input_height))
# transform it into numpy array
np_image = np.asarray(resized_image)
# add the extra dimension (remember the batches...)
image_np_expanded = np.expand_dims(np_image, axis=0)
# Actual detection.
boxes, classes, scores = run_inference_for_single_image_tflite(image_np_expanded)
# Visualization of the results of a detection.
display(draw_bboxes(image, boxes, classes, scores, labels={0:'mask', 1: 'no_mask', 2:'poor_mask'}))
# + colab={"base_uri": "https://localhost:8080/"} id="IG6yp5xCg8AH" outputId="0f1b87f7-e389-4146-a117-f2af9164b6e3"
scores
# + colab={"base_uri": "https://localhost:8080/"} id="PcNwfZ4M4zJP" outputId="b0bfe47b-2f8e-4a62-bfd9-cf709e6cd34d"
classes
| EdgeTPU_MaskDetection_Testing_(using_COCO_API).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# # Dask Distributed Runtime
#
# <img src="../_static/images/dask_horizontal.svg" width="300"> <br>
#
# ```{admonition} Quick Links
# * [Running Dask Over MLRun](./dask-mlrun.ipynb)
# * [Pipelines Using Dask, Kubeflow and MLRun](./dask-pipeline.ipynb)
# ```
#
# ## Dask Overview
# Source: [Dask docs](https://docs.dask.org/en/latest/)<br>
# Dask is a flexible library for parallel computing in Python.
#
# Dask is composed of two parts:
#
# 1. **Dynamic task scheduling** optimized for computation. This is similar to Airflow, Luigi, Celery, or Make, but optimized for interactive computational workloads.
# 2. **“Big Data” collections** like parallel arrays, dataframes, and lists that extend common interfaces like NumPy, Pandas, or Python iterators to larger-than-memory or distributed environments. These parallel collections run on top of dynamic task schedulers.
#
# **Dask emphasizes the following virtues:**
#
# * **Familiar**: Provides parallelized NumPy array and Pandas DataFrame objects<br>
# * **Flexible**: Provides a task scheduling interface for more custom workloads and integration with other projects.<br>
# * **Native**: Enables distributed computing in pure Python with access to the PyData stack.<br>
# * **Fast**: Operates with low overhead, low latency, and minimal serialization necessary for fast numerical algorithms<br>
# * **Scales up**: Runs resiliently on clusters with 1000s of cores<br>
# * **Scales down**: Trivial to set up and run on a laptop in a single process<br>
# * **Responsive**: Designed with interactive computing in mind, it provides rapid feedback and diagnostics to aid humans<br>
# Dask collections and schedulers
#
# <img src="../_static/images/dask-overview.svg" width="800"><br>
#
# ## Dask DataFrame mimics Pandas
#
# ```python
# import pandas as pd import dask.dataframe as dd
# df = pd.read_csv('2015-01-01.csv') df = dd.read_csv('2015-*-*.csv')
# df.groupby(df.user_id).value.mean() df.groupby(df.user_id).value.mean().compute()
#
# ```
# Dask Array mimics NumPy - documentation
#
# ```python
# import numpy as np import dask.array as da
# f = h5py.File('myfile.hdf5') f = h5py.File('myfile.hdf5')
# x = np.array(f['/small-data']) x = da.from_array(f['/big-data'],
# chunks=(1000, 1000))
# x - x.mean(axis=1) x - x.mean(axis=1).compute()
# ```
#
# Dask Bag mimics iterators, Toolz, and PySpark - documentation
#
# ```python
# import dask.bag as db
# b = db.read_text('2015-*-*.json.gz').map(json.loads)
# b.pluck('name').frequencies().topk(10, lambda pair: pair[1]).compute()
# ```
#
# Dask Delayed mimics for loops and wraps custom code - documentation
#
# ```python
# from dask import delayed
# L = []
# for fn in filenames: # Use for loops to build up computation
# data = delayed(load)(fn) # Delay execution of function
# L.append(delayed(process)(data)) # Build connections between variables
#
# result = delayed(summarize)(L)
# result.compute()
# ```
#
# The concurrent.futures interface provides general submission of custom tasks: - documentation
#
# ```python
# from dask.distributed import Client
# client = Client('scheduler:port')
#
# futures = []
# for fn in filenames:
# future = client.submit(load, fn)
# futures.append(future)
#
# summary = client.submit(summarize, futures)
# summary.result()
# ```
#
# ## Dask.distributed<br>
# `Dask.distributed` is a lightweight library for distributed computing in Python. It extends both the concurrent.futures and dask APIs to moderate sized clusters.<br>
#
# <img src="../_static/images/dask_dist.png" width="800"><br>
#
# ### Motivation
# Distributed serves to complement the existing PyData analysis stack. In particular it meets the following needs:
#
# * **Low latency**: Each task suffers about 1ms of overhead. A small computation and network roundtrip can complete in less than 10ms.<br>
# * **Peer-to-peer data sharing**: Workers communicate with each other to share data. This removes central bottlenecks for data transfer.<br>
# * **Complex Scheduling**: Supports complex workflows (not just map/filter/reduce) which are necessary for sophisticated algorithms used in nd-arrays, machine learning, image processing, and statistics.<br>
# * **Pure Python**: Built in Python using well-known technologies. This eases installation, improves efficiency (for Python users), and simplifies debugging.<br>
# * **Data Locality**: Scheduling algorithms cleverly execute computations where data lives. This minimizes network traffic and improves efficiency.<br>
# * **Familiar APIs**: Compatible with the concurrent.futures API in the Python standard library. Compatible with dask API for parallel algorithms<br>
# * **Easy Setup**: As a Pure Python package distributed is pip installable and easy to set up on your own cluster.<br>
#
# ### Architecture
# `Dask.distributed` is a centrally managed, distributed, dynamic task scheduler. The central `dask-scheduler` process coordinates the actions of several `dask-worker` processes spread across multiple machines and the concurrent requests of several clients.
#
# The scheduler is asynchronous and event driven, simultaneously responding to requests for computation from multiple clients and tracking the progress of multiple workers. The event-driven and asynchronous nature makes it flexible to concurrently handle a variety of workloads coming from multiple users at the same time while also handling a fluid worker population with failures and additions. Workers communicate amongst each other for bulk data transfer over TCP.
#
# Internally the scheduler tracks all work as a constantly changing directed acyclic graph of tasks. A task is a Python function operating on Python objects, which can be the results of other tasks. This graph of tasks grows as users submit more computations, fills out as workers complete tasks, and shrinks as users leave or become disinterested in previous results.
#
# Users interact by connecting a local Python session to the scheduler and submitting work, either by individual calls to the simple interface `client.submit(function, *args, **kwargs)` or by using the large data collections and parallel algorithms of the parent dask library. The collections in the dask library like `dask.array` and `dask.dataframe` provide easy access to sophisticated algorithms and familiar APIs like NumPy and Pandas, while the simple `client.submit` interface provides users with custom control when they want to break out of canned “big data” abstractions and submit fully custom workloads.
# ## ~5X Faster with Dask
#
# Short example which demonstrates the power of Dask, in this notebook we will preform the following:
# * Generate random text files
# * Process the file by sorting and counting it's content
# * Compare run times
# ### Generate Random Text Files
# +
import random
import string
import os
from collections import Counter
from dask.distributed import Client
import warnings
warnings.filterwarnings('ignore')
# -
def generate_big_random_letters(filename, size):
"""
generate big random letters/alphabets to a file
:param filename: the filename
:param size: the size in bytes
:return: void
"""
chars = ''.join([random.choice(string.ascii_letters) for i in range(size)]) #1
with open(filename, 'w') as f:
f.write(chars)
pass
# +
PATH = '/User/howto/dask/random_files'
SIZE = 10000000
for i in range(100):
generate_big_random_letters(filename = PATH + '/file_' + str(i) + '.txt',
size = SIZE)
# -
# ### Set Function for Benchmark
def count_letters(path):
"""
count letters in text file
:param path: path to file
"""
# open file in read mode
file = open(path, "r")
# read the content of file
data = file.read()
# sort file
sorted_file = sorted(data)
# count file
number_of_characters = len(sorted_file)
return number_of_characters
def process_files(path):
"""
list file and count letters
:param path: path to folder with files
"""
num_list = []
files = os.listdir(path)
for file in files:
cnt = count_letters(os.path.join(path, file))
num_list.append(cnt)
l = num_list
return print("done!")
# ### Sort & Count Number of Letters with Python
# %%time
PATH = '/User/howto/dask/random_files/'
process_files(PATH)
# ### Sort & Count Number of Letters with Dask
# get the dask client address
client = Client()
# list all files in folder
files = [PATH + x for x in os.listdir(PATH)]
# %%time
# run the count_letter function on a list of files while using multiple workers
a = client.map(count_letters, files)
# %%time
# gather results
l = client.gather(a)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Additional Topics
#
# ```{toctree}
# :maxdepth: 2
# dask-mlrun
# dask-pipeline
# ```
#
| docs/runtimes/dask-overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''base'': conda)'
# name: python3
# ---
# # RANS from DNS
# Our goal here, in general, is to find new valid equations that describe fluid flow.
# We will try to find new closures for the steady RANS equations based on direct numerical
# simulation (DNS) of a boundary layer.
#
#
# ## Steady RANS equations with generic Reynolds stress effects
#
# $$
# (\vec{U} \cdot \nabla) \vec{U}
# + \frac{1}{\rho} \nabla P
# - \nu \nabla^2 \vec{U}
# = \mathbf{R},
# $$
#
# where in this case $\mathbf{R}$ is simply the effects of the Reynolds stresses (i.e., the opposite of the gradient), not the Reynolds stresses themselves.
#
# Some ideas for what $\mathbf{R}$ could be:
#
# $$
# \mathbf{R} = A\nabla P^2 + B\nabla K + C \nabla \times \vec{U}
# + D\nabla(\nabla \times \vec{U})^2
# + E \vec{U}
# $$
#
#
# ## Algorithm
#
# 1. Pick terms (in addition to non-Reynolds stress Navier--Stokes terms).
# 2. Create a random list of points in space that is at least as large as the number
# of terms.
# 3. At each point, acquire all data for all terms for all times.
# 4. Average data at each point for all times.
# 5. Solve for coefficients using a linear model.
# ## Terms
#
# $$
# U \frac{\partial U}{\partial x}
# + V \frac{\partial U}{\partial y} + W \frac{\partial U}{\partial z}
# + \frac{1}{\rho}\frac{\partial P}{\partial x}
# - \nu \left(
# \frac{\partial^2 U}{\partial x^2}
# + \frac{\partial^2 U}{\partial y^2}
# + \frac{\partial^2 U}{\partial z^2}
# \right)
# $$
#
# $$
# =
# A \left( \frac{\partial U}{\partial x} \right)^2
# + B \left( \frac{\partial U}{\partial y} \right)^2
# + C \left( \frac{\partial U}{\partial z} \right)^2
# + D \left( \frac{\partial P}{\partial x} \right)^2
# + E \frac{\partial^2 P}{\partial x^2}
# + F U \frac{\partial P}{\partial x}
# $$
#
# $$
# U \frac{\partial V}{\partial x}
# + V \frac{\partial V}{\partial y}
# + W \frac{\partial V}{\partial z}
# + \frac{1}{\rho}\frac{\partial P}{\partial y}
# - \nu \left(
# \frac{\partial^2 V}{\partial x^2}
# + \frac{\partial^2 V}{\partial y^2}
# + \frac{\partial^2 V}{\partial z^2}
# \right)
# $$
#
# $$
# =
# A \left( \frac{\partial V}{\partial x} \right)^2
# + B \left( \frac{\partial V}{\partial y} \right)^2
# + C \left( \frac{\partial V}{\partial z} \right)^2
# + D \left( \frac{\partial P}{\partial y} \right)^2
# + E \frac{\partial^2 P}{\partial y^2}
# + F V \frac{\partial P}{\partial y}
# $$
# ## Terms in index notation
#
# To be general and consistent, since we don't have any x- or z-variation
#
# $$
# \frac{\partial U_i}{\partial t} + U_j \frac{\partial U_i}{\partial x_j}
# + \frac{1}{\rho}\frac{\partial P}{\partial x_i}
# - \nu \frac{\partial ^2 U_i}{\partial x_j x_j}
# =
# A \frac{(\partial U_i)^2}{\partial x_j \partial x_j}
# + B \frac{\partial U_j U_j}{\partial x_i}
# + C \frac{\partial P^2}{\partial x_i}
# + D \left( \frac{\partial P}{\partial x_i} \right)^2
# + E U_j \frac{\partial P}{\partial x_j}
# $$
# What these coefficients describe:
#
# * $A$: The square of the velocity gradient
# * $B$: The gradient of kinetic energy
# * $C$: The gradient of squared pressure
# * $D$: The square of the pressure gradient
#
# Other possible quantities
# * Absolute distance from a solid boundary
# +
# %load_ext autoreload
# %autoreload 2
import matplotlib
matplotlib.use("nbAgg")
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import seaborn
import jhtdb
seaborn.set()
# %matplotlib inline
# -
# ## Using data from pyJHTDB
#
# 1. Pick a bunch of points randomly throughout the domain, at least more than the number of terms we want to test.
# 2. Add points in each direction for computing spatial derivatives.
# 3. Get $\vec{u}$, $p$, and their gradients for all time at all points in the list.
# 4. Calculate terms based on mean values.
# 5. Use a regression model to determine coefficients on each term.
# 6. Repeat this process to ensure the coefficients don't change?
# 7. Run a RANS simulation with this new model and check the results against the mean profiles.
df = jhtdb.read_stats()
# Let's check continuity
div = df["dudx"] + df["dvdy"] + df["dwdz"]
div.describe()
# The check above gives us an idea on how accurate these gradient calculations
# are.
df.columns
# +
ax = (
df.loc[df.index.get_level_values("x")[-1000]]
.reset_index()
.plot(x="u", y="y", legend=False, ylabel="$U$", xlabel="$y$")
)
# +
# Check the momentum equation
from jhtdb import nu, rho
momx_no_res = (
df.u * df.dudx
+ df.v * df.dudy
+ df.w * df.dudz
+ (1 / rho) * df.dpdx
- nu * (df.d2udx2 + df.d2udy2 + df.d2udz2)
)
momx = momx_no_res + (df.duudx_fd + df.duvdy_fd)
momx.dropna().describe()
# -
# ## Check the $y$-component of the momentum equation
#
# $$
# \frac{\partial V}{\partial t}
# + U \frac{\partial V}{\partial x}
# + V \frac{\partial V}{\partial y}
# + W \frac{\partial V}{\partial z}
# = - \frac{1}{\rho} \frac{\partial P}{\partial y}
# + \nu \left(
# \frac{\partial^2 V}{\partial x^2}
# + \frac{\partial^2 V}{\partial y^2}
# + \frac{\partial^2 V}{\partial z^2}
# \right)
# $$
#
# $$
# - \left(
# \frac{\partial \overline{u'v'}}{\partial x}
# + \frac{\partial \overline{v'v'}}{\partial y}
# + \frac{\partial \overline{v'w'}}{\partial x}
# \right
# )
# $$
# +
# Check the momentum equation
from jhtdb import nu, rho
momy_no_res = (
df.u * df.dvdx
+ df.v * df.dvdy
+ df.w * df.dvdz
+ (1 / rho) * df.dpdy
- nu * (df.d2vdx2 + df.d2vdy2 + df.d2wdz2)
)
momy = momy_no_res + (df.duvdx + df.duvdy_fd)
momy_no_res.dropna().describe()
# -
momy_no_res.plot.hist()
# +
# Let's see how large the Reynold's stress residual is at different locations
df1 = df.dropna().reset_index()
df1.plot.scatter(
x="x", y="y", color=momy_no_res.dropna().values, cmap="viridis"
)
# +
# TODO: Compute a bunch of quantities and add to the data
# Mean kinetic energy
# Squared gradients
# Gradients multiplied by each other
# Gradients multiplied by mean values
# +
# Compute the Reynolds stress residual as a target for an ML model
# Solve a linear regression for the coefficients of all derived terms
# Throw out terms with coefficients below a threshold
# +
# TODO: Write as a RANS model for OpenFOAM and solve this same problem there
# First run a baseline case with a high Re kOmegaSST model
# How to handle wall functions?
# +
# TODO: Check mean flow from OpenFOAM simulation matches DNS
| notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
1 + 1
2 + 2
# # My first IBM Watson Project
#
daysofweek = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
nightsofweek = [s[0:3] + "night" for s in daysofweek]
print(nightsofweek)
| MyPythonNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Open Addressing - Double hashing
# Given:
# - Hashing function H1 = **(2 * Key) + 3**
# - Hashing function H2 = **(3 * Key) + 1**
# - Hash Table size = M = 10
#
# - To use Division Method for Hashing.
#
# - When collision occurs we will insert **Key** at first free place from **(U + V * i) % 10** where i = {0 to (m-1)}
# +
def DoubleHashing(arr, M):
hash_table = [None for i in range(M)]
probe_table = [1 for i in range(M)]
probe = 1
for key in arr:
hash_value = (2 * key + 3) % 10
if hash_table[hash_value] is None:
hash_table[hash_value] = key
probe_table[hash_value] = probe
else:
probe, index = get_location_DoubleHashing(key, hash_value, hash_table)
if index != -1:
hash_table[index] = key
probe_table[index] = probe
return probe_table, hash_table
def get_location_DoubleHashing(key, hash_value, hash_table):
M = len(hash_table)
probe = 1
U = hash_value
V = (3 * key + 1) % M
for i in range(M):
probe += 1
new_loc = (U + V * i) % M
if hash_table[new_loc] is None:
return probe, new_loc
return -1, -1
def display_probes_HashTable(probe_table, hash_table):
M = len(hash_table)
print("Index | Key | Probe")
for i in range(M):
if hash_table[i] == None:
key = "-"
probe = "X"
else:
key = hash_table[i]
probe = probe_table[i]
print(i," | ", key, " | ", probe)
# -
arr = [3, 2, 9, 6, 11, 13, 7, 12]
M = 10
probe_table, hash_table = DoubleHashing(arr, M)
print(hash_table)
print()
display_probes_HashTable(probe_table, hash_table)
| DataStructures/Primary_DataStructures/Hash_Tables/.ipynb_checkpoints/Open_Addressing_Double_Hashing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorwatch as tw
# from tensorwatch import utils
# utils.set_debug_verbosity(5)
r = tw.render()
r.show()
r2=tw.render('map(lambda x:math.sqrt(x.sum), l)', cell=r.cell)
r3=tw.render('map(lambda x:math.sqrt(x.sum), l)', renderer=r2)
r3=tw.render('map(lambda x:math.sqrt(x.sum), l)', cell=r.cell, only_summary=True)
| notebooks/archive/text_sum_log.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="RvwFrkzSPbw7" colab_type="text"
# # PyTorch
#
# In this notebook, we'll learn the basics of [PyTorch](https://pytorch.org), which is a machine learning library used to build dynamic neural networks. We'll learn about the basics, like creating and using Tensors.
# + [markdown] id="0aqN-ffaP4t1" colab_type="text"
# <div align="left">
# <a href="https://github.com/madewithml/basics/blob/master/notebooks/06_PyTorch.ipynb" role="button"><img class="notebook-badge-image" src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
# <a href="https://colab.research.google.com/github/madewithml/basics/blob/master/notebooks/06_PyTorch.ipynb"><img class="notebook-badge-image" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
# </div>
# + [markdown] id="SFa_PSr2tvaC" colab_type="text"
# # Set seeds
# + id="eLAkqoRKtyFD" colab_type="code" colab={}
import numpy as np
import torch
# + id="l9krh147uJOV" colab_type="code" colab={}
SEED = 1234
# + id="1uLEnBgft22Y" colab_type="code" outputId="81fb9f46-9b61-4858-a591-aee25b32f74b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Set seed for reproducibility
np.random.seed(seed=SEED)
torch.manual_seed(SEED)
# + [markdown] id="08AUKP9xu8YQ" colab_type="text"
# # Basics
# + id="o3jBRfYZuNqF" colab_type="code" outputId="f40c3f56-ef66-45c4-e1cc-140f5267a44c" colab={"base_uri": "https://localhost:8080/", "height": 102}
# Creating a random tensor
x = torch.randn(2, 3) # normal distribution (rand(2,3) -> uniform distribution)
print(f"Type: {x.type()}")
print(f"Size: {x.shape}")
print(f"Values: \n{x}")
# + id="Pho5A7JluNvj" colab_type="code" outputId="2904d1ef-5e23-4fd2-d8ac-e90d9c2b415b" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Zero and Ones tensor
x = torch.zeros(2, 3)
print (x)
x = torch.ones(2, 3)
print (x)
# + id="UTecl1RduNtL" colab_type="code" outputId="4980a540-a66a-4e98-a9e8-0849827efb58" colab={"base_uri": "https://localhost:8080/", "height": 85}
# List → Tensor
x = torch.Tensor([[1, 2, 3],[4, 5, 6]])
print(f"Size: {x.shape}")
print(f"Values: \n{x}")
# + id="2OQTnxWOuNnY" colab_type="code" outputId="9b172f99-a46b-4311-9367-0f6894ff2666" colab={"base_uri": "https://localhost:8080/", "height": 85}
# NumPy array → Tensor
x = torch.Tensor(np.random.rand(2, 3))
print(f"Size: {x.shape}")
print(f"Values: \n{x}")
# + id="8K2kWrkZuilf" colab_type="code" outputId="8049a7a6-c565-4c16-a6a0-27fa89a58751" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Changing tensor type
x = torch.Tensor(3, 4)
print(f"Type: {x.type()}")
x = x.long()
print(f"Type: {x.type()}")
# + [markdown] id="6LxCmxqFu6sq" colab_type="text"
# # Operations
# + id="yfYLm_1Buixy" colab_type="code" outputId="12a99436-64ac-42e1-89be-409f23adf757" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Addition
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = x + y
print(f"Size: {z.shape}")
print(f"Values: \n{z}")
# + id="22abfI18uiuw" colab_type="code" outputId="b82398cd-cc4d-4d8a-9255-03d0fa49ef7a" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Dot product
x = torch.randn(2, 3)
y = torch.randn(3, 2)
z = torch.mm(x, y)
print(f"Size: {z.shape}")
print(f"Values: \n{z}")
# + id="p1ztJNrruiqv" colab_type="code" outputId="b7c10009-0881-4c14-c643-ba5003853948" colab={"base_uri": "https://localhost:8080/", "height": 170}
# Transpose
x = torch.randn(2, 3)
print(f"Size: {x.shape}")
print(f"Values: \n{x}")
y = torch.t(x)
print(f"Size: {y.shape}")
print(f"Values: \n{y}")
# + id="zoLDryFYuioF" colab_type="code" outputId="8c3668f3-b521-48fc-8e22-51977cddc17a" colab={"base_uri": "https://localhost:8080/", "height": 102}
# Reshape
x = torch.randn(2, 3)
z = x.view(3, 2)
print(f"Size: {z.shape}")
print(f"Values: \n{z}")
# + id="2fdNmFu3vlE7" colab_type="code" outputId="2083c766-8960-43e4-a4f9-d389e7046db2" colab={"base_uri": "https://localhost:8080/", "height": 612}
# Dangers of reshaping (unintended consequences)
x = torch.tensor([
[[1,1,1,1], [2,2,2,2], [3,3,3,3]],
[[10,10,10,10], [20,20,20,20], [30,30,30,30]]
])
print(f"Size: {x.shape}")
print(f"x: \n{x}\n")
a = x.view(x.size(1), -1)
print(f"\nSize: {a.shape}")
print(f"a: \n{a}\n")
b = x.transpose(0,1).contiguous()
print(f"\nSize: {b.shape}")
print(f"b: \n{b}\n")
c = b.view(b.size(0), -1)
print(f"\nSize: {c.shape}")
print(f"c: \n{c}")
# + id="HcW6i9xJwU2Q" colab_type="code" outputId="0cb8f6f0-4f62-4424-8554-b47fcf09e49e" colab={"base_uri": "https://localhost:8080/", "height": 136}
# Dimensional operations
x = torch.randn(2, 3)
print(f"Values: \n{x}")
y = torch.sum(x, dim=0) # add each row's value for every column
print(f"Values: \n{y}")
z = torch.sum(x, dim=1) # add each columns's value for every row
print(f"Values: \n{z}")
# + [markdown] id="kqxljkudzH0M" colab_type="text"
# # Indexing, Splicing and Joining
# + id="Q8-w1Cb3wsj0" colab_type="code" outputId="ae1ad2f2-2c89-496f-9cba-221d7bec8995" colab={"base_uri": "https://localhost:8080/", "height": 153}
x = torch.randn(3, 4)
print (f"x: \n{x}")
print (f"x[:1]: \n{x[:1]}")
print (f"x[:1, 1:3]: \n{x[:1, 1:3]}")
# + id="jBGk_740wsm3" colab_type="code" outputId="9dfafe5a-4e2b-47d1-b1f3-c49278a21195" colab={"base_uri": "https://localhost:8080/", "height": 153}
# Select with dimensional indicies
x = torch.randn(2, 3)
print(f"Values: \n{x}")
col_indices = torch.LongTensor([0, 2])
chosen = torch.index_select(x, dim=1, index=col_indices) # values from column 0 & 2
print(f"Values: \n{chosen}")
row_indices = torch.LongTensor([0, 1])
col_indices = torch.LongTensor([0, 2])
chosen = x[row_indices, col_indices] # values from (0, 0) & (2, 1)
print(f"Values: \n{chosen}")
# + id="UI_hboLNwsqQ" colab_type="code" outputId="24125e4f-ac96-49df-e0ea-69d35027de38" colab={"base_uri": "https://localhost:8080/", "height": 153}
# Concatenation
x = torch.randn(2, 3)
print(f"Values: \n{x}")
y = torch.cat([x, x], dim=0) # stack by rows (dim=1 to stack by columns)
print(f"Values: \n{y}")
# + [markdown] id="lK1OQUYL1bE3" colab_type="text"
# # Gradients
# + id="9Ft6PAeW0WCe" colab_type="code" outputId="6d56d9cc-a1df-4afd-9aa7-446e835f7bbd" colab={"base_uri": "https://localhost:8080/", "height": 153}
# Tensors with gradient bookkeeping
x = torch.rand(3, 4, requires_grad=True)
y = 3*x + 2
z = y.mean()
z.backward() # z has to be scalar
print(f"x: \n{x}")
print(f"x.grad: \n{x.grad}")
# + [markdown] id="VF5Q5kfs1rXZ" colab_type="text"
# * $ y = 3x + 2 $
# * $ z = \sum{y}/N $
# * $ \frac{\partial(z)}{\partial(x)} = \frac{\partial(z)}{\partial(y)} \frac{\partial(y)}{\partial(x)} = \frac{1}{N} * 3 = \frac{1}{12} * 3 = 0.25 $
# + [markdown] id="kseQSKj72H8S" colab_type="text"
# # CUDA tensors
# + id="ZE-ZyECv0WOX" colab_type="code" outputId="d497625f-a7d0-4150-a6f6-8bd6d64e3b37" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Is CUDA available?
print (torch.cuda.is_available())
# + [markdown] id="n5sWo3Yv2MxO" colab_type="text"
# If False (CUDA is not available), let's change that by following these steps: Go to *Runtime* > *Change runtime type* > Change *Hardware accelertor* to *GPU* > Click *Save*
# + id="ewamITzX2W-B" colab_type="code" colab={}
import torch
# + id="IwsrvGad2NDO" colab_type="code" outputId="9770c0f0-40bc-45ce-8725-d9842eab1158" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Is CUDA available now?
print (torch.cuda.is_available())
# + id="50ewrqUVCRHg" colab_type="code" outputId="e72f5517-dc4e-4304-9618-eb04168c95a2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print (device)
# + id="s12ivWJZCLq7" colab_type="code" outputId="ac5dde7a-fd72-4c80-c6c1-ad2cd19b912b" colab={"base_uri": "https://localhost:8080/", "height": 51}
x = torch.rand(2,3)
print (x.is_cuda)
x = torch.rand(2,3).to(device) # sTensor is stored on the GPU
print (x.is_cuda)
# + [markdown] id="xr1Vsnq7CLpB" colab_type="text"
# ---
# Share and discover ML projects at <a href="https://madewithml.com/">Made With ML</a>.
#
# <div align="left">
# <a class="ai-header-badge" target="_blank" href="https://github.com/madewithml/basics"><img src="https://img.shields.io/github/stars/madewithml/basics.svg?style=social&label=Star"></a>
# <a class="ai-header-badge" target="_blank" href="https://www.linkedin.com/company/madewithml"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
# <a class="ai-header-badge" target="_blank" href="https://twitter.com/madewithml"><img src="https://img.shields.io/twitter/follow/madewithml.svg?label=Follow&style=social"></a>
# </div>
#
| notebooks/06_PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Demo: PCA Eigen-Faces
#
# In this demo, we will illustrate how to compute the PCA for a simple data set and use it as a visualization tool.
# ## Loading the data
#
# We begin by loading the standard packages.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
# We will demonstrate PCA on a face dataset taken from news articles about 10 years ago. The full data set has thousands of faces, but we will use a small subset here. Since this data set is widely-used, it is installed as part of the `sklearn`
# We first download the data. This is large and can take several minutes.
from sklearn.datasets import fetch_lfw_people
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# Once the data is loaded, we can get see the dimensions
# +
# Get images
n_samples, h, w = lfw_people.images.shape
npix = h*w
# Data in 2D form
X = lfw_people.data
n_features = X.shape[1]
# Labels of images
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Image size = {0:d} x {1:d} = {2:d} pixels".format(h,w,npix))
print("Number faces = {0:d}".format(n_samples))
print("Number classes = {0:d}".format(n_classes))
# -
# ## Plotting the Faces
#
# We will plot a few faces to look at the dataset.
# +
def plt_face(x):
h = 50
w = 37
plt.imshow(x.reshape((h, w)), cmap=plt.cm.gray)
plt.xticks([])
plt.yticks([])
I = np.random.permutation(n_samples)
plt.figure(figsize=(10,20))
nplt = 4;
for i in range(nplt):
ind = I[i]
plt.subplot(1,nplt,i+1)
plt_face(X[ind])
plt.title(target_names[y[ind]])
# -
# ## Compute the PCA
#
# First, we subtract the mean from the data set.
npix = h*w
Xmean = np.mean(X,0)
Xs = X - Xmean[None,:]
# Then, we compute an SVD. Note that in python the SVD returns `V` not `V.T`. Also, you should include the `full_matrices` option to get the *economy* SVD
U,S,V = np.linalg.svd(Xs, full_matrices=False)
# We can plot the proportion of variance as a function of the number of PCs
# +
lam = S**2
PoV = np.cumsum(lam)/np.sum(lam)
plt.plot(PoV)
plt.grid()
plt.axis([1,n_samples,0, 1.1])
plt.xlabel('Number of PCs', fontsize=16)
plt.ylabel('PoV', fontsize=16)
# -
nsvd = 20
Xhat = (U[:,:nsvd]*S[None,:nsvd]).dot(V[:nsvd,:])
Xhat = Xhat + Xmean[None,:]
Xhat.shape
#show_face(Xhat[i,:], target_names[y[i]])
# +
nplt = 2 # number of faces to plot
ds = [0,5,10,20,100] # number of SVD approximations
# Select random faces
inds = np.random.permutation(n_samples)
inds = inds[:nplt]
nd = len(ds)
# Set figure size
plt.figure(figsize=(1.8 * (nd+1), 2.4 * nplt))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
# Loop over figures
iplt = 0
for ind in inds:
for d in ds:
plt.subplot(nplt,nd+1,iplt+1)
Xhati = (U[ind,:d]*S[None,:d]).dot(V[:d,:]) + Xmean
plt_face(Xhati)
plt.title('d={0:d}'.format(d))
iplt += 1
# Plot the true face
plt.subplot(nplt,nd+1,iplt+1)
plt_face(X[ind,:])
plt.title('Full')
iplt += 1
# -
# Finally, we plot the PCs
nplt = 6
plt.figure(figsize=(10, 20))
for i in range(nplt):
plt.subplot(1,nplt,i+1)
Vi = V[i,:]
plt_face(V[i,:])
plt.title('i={0:d}'.format(i))
| pca/eigen_face.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Quantum
# language: python
# name: quantum
# ---
# ## _*The EOH (Evolution of Hamiltonian) Algorithm*_
#
# This notebook demonstrates how to use the `Qiskit Aqua` library to invoke the EOH algorithm and process the result.
#
# Further information may be found for the algorithms in the online [Aqua documentation](https://qiskit.org/documentation/aqua/algorithms.html).
#
# For this particular demonstration, we illustrate the `EOH` algorithm. First, two `Operator` instances we created are randomly generated Hamiltonians.
# +
import numpy as np
from qiskit_aqua.operator import Operator
num_qubits = 2
temp = np.random.random((2 ** num_qubits, 2 ** num_qubits))
qubitOp = Operator(matrix=temp + temp.T)
temp = np.random.random((2 ** num_qubits, 2 ** num_qubits))
evoOp = Operator(matrix=temp + temp.T)
# -
# For EOH, we would like to evolve some initial state (e.g. the uniform superposition state) with `evoOp` and do a measurement using `qubitOp`. Below, we illustrate how such an example dynamics process can be easily prepared.
# +
from qiskit_aqua.input import get_input_instance
params = {
'problem': {
'name': 'eoh'
},
'algorithm': {
'name': 'EOH',
'num_time_slices': 1
},
'initial_state': {
'name': 'CUSTOM',
'state': 'uniform'
},
'backend': {
'name': 'statevector_simulator'
}
}
algo_input = get_input_instance('EnergyInput')
algo_input.qubit_op = qubitOp
algo_input.add_aux_op(evoOp)
# -
# With all the necessary pieces prepared, we can then proceed to run the algorithm and examine the result.
# +
from qiskit_aqua import run_algorithm
ret = run_algorithm(params, algo_input)
print('The result is\n{}'.format(ret))
# -
| community/aqua/general/eoh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Background checks
#
# This nootebook tests the sampling of the background. We simulate only the background and we then fit it, leaving the tilt and normalization parameters free to vary.
# cd ..
import make
# +
from pathlib import Path
import logging
import warnings
import click
import multiprocessing
from itertools import repeat
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.convolution import Tophat2DKernel
from astropy.coordinates import SkyCoord
from astropy.table import Table
from gammapy.data import GTI, Observation, EventList
from gammapy.datasets import MapDataset, MapDatasetEventSampler
from gammapy.estimators import LiMaMapEstimator as lima
from gammapy.maps import MapAxis, WcsGeom, Map
from gammapy.irf import EnergyDispersion2D, load_cta_irfs
from gammapy.makers import MapDatasetMaker
from gammapy.estimators import ImageProfile, ImageProfileEstimator
from gammapy.modeling import Fit
from gammapy.modeling.models import Models
from gammapy.utils.table import table_from_row_data
from regions import CircleSkyRegion
from gammapy.modeling.models import Model, Models, SkyModel, PowerLawSpectralModel, PointSpatialModel
import copy
from gammapy.maps import MapCoord
from gammapy.modeling.models import BackgroundModel, ConstantTemporalModel, LightCurveTemplateTemporalModel
from gammapy.utils.random import get_random_state
import yaml
from astropy.time import Time
from gammapy.utils.random import InverseCDFSampler
# -
BASE_PATH = Path("make.py").parent
model="point-pwl-time"
LIVETIME = 8 * u.hr
filename_dataset = make.get_filename_dataset(LIVETIME)
filename_model = BASE_PATH / f"models/{model}.yaml"
# ### SIMULATE THE EVENTS
make.simulate_events(filename_model,filename_dataset,100)
# ### COMPARE EVENT TIMES WITH THE SIMULATED LIGHT CURVE
model = 'point-pwl-time'
obsid=0
filename_events = (BASE_PATH / f"data/models/{model}/events_{LIVETIME.value:.0f}{LIVETIME.unit}_{obsid:04d}.fits.gz")
events = EventList.read(filename_events)
src = events.table[np.where(events.table['MC_ID'] == 2)]
len(src)
times = src['TIME']
# +
# curve = np.histogram(times,bins=np.logspace(2,4.5,30))
curve = np.histogram(times, bins=np.linspace(0,3e4,60))
rate = curve[0]/np.linspace(0,3e4,60)[:-1]
plt.plot(curve[1][:-1],rate)
plt.plot(time,norm)
# plt.loglog()
# -
def f(x, a, tau,c):
return c+a*np.exp(-(x/tau))
# +
from scipy.optimize import curve_fit
init_vals = [1, 2000, 0]
best_vals, covar = curve_fit(f, curve[1][1:-4],
rate[1:-3],
p0=init_vals,
sigma=((curve[0][1:-3])**0.5/np.linspace(0,3e4,60)[1:-4]))
print("############")
print(f"This is the norm: {best_vals[0]} +\- {covar[0,0]**0.5}")
print(f"This is the tau: {best_vals[1]} +\- {covar[1,1]**0.5}")
print(f"This is the constant: {best_vals[2]} +\- {covar[2,2]**0.5}")
print("############")
# -
plt.errorbar(curve[1][:-1],rate, yerr=((curve[0])**0.5/np.linspace(0,3e4,60)[1:]))
plt.plot(curve[1][:-1], f(curve[1][:-1], *best_vals))
# #### Check metadata
class EventListChecker(Checker):
"""Event list checker.
Data format specification: ref:`gadf:iact-events`
Parameters
----------
event_list : `~gammapy.data.EventList`
Event list
"""
def __init__(self, Checker):
self.Checker = Checker
CHECKS = {
"meta": "check_meta",
"columns": "check_columns",
"times": "check_times",
"coordinates_galactic": "check_coordinates_galactic",
"coordinates_altaz": "check_coordinates_altaz",
}
accuracy = {"angle": Angle("1 arcsec"), "time": Quantity(1, "microsecond")}
# https://gamma-astro-data-formats.readthedocs.io/en/latest/events/events.html#mandatory-header-keywords
meta_required = [
"HDUCLASS",
"HDUDOC",
"HDUVERS",
"HDUCLAS1",
"OBS_ID",
"TSTART",
"TSTOP",
"ONTIME",
"LIVETIME",
"DEADC",
"RA_PNT",
"DEC_PNT",
# TODO: what to do about these?
# They are currently listed as required in the spec,
# but I think we should just require ICRS and those
# are irrelevant, should not be used.
# 'RADECSYS',
# 'EQUINOX',
"ORIGIN",
"TELESCOP",
"INSTRUME",
"CREATOR",
# https://gamma-astro-data-formats.readthedocs.io/en/latest/general/time.html#time-formats
"MJDREFI",
"MJDREFF",
"TIMEUNIT",
"TIMESYS",
"TIMEREF",
# https://gamma-astro-data-formats.readthedocs.io/en/latest/general/coordinates.html#coords-location
"GEOLON",
"GEOLAT",
"ALTITUDE",
]
_col = collections.namedtuple("col", ["name", "unit"])
columns_required = [
_col(name="EVENT_ID", unit=""),
_col(name="TIME", unit="s"),
_col(name="RA", unit="deg"),
_col(name="DEC", unit="deg"),
_col(name="ENERGY", unit="TeV"),
]
def __init__(self, event_list):
self.event_list = event_list
def _record(self, level="info", msg=None):
obs_id = self.event_list.table.meta["OBS_ID"]
return {"level": level, "obs_id": obs_id, "msg": msg}
def check_meta(self):
meta_missing = sorted(set(self.meta_required) - set(self.event_list.table.meta))
if meta_missing:
yield self._record(
level="error", msg=f"Missing meta keys: {meta_missing!r}"
)
def check_columns(self):
t = self.event_list.table
if len(t) == 0:
yield self._record(level="error", msg="Events table has zero rows")
for name, unit in self.columns_required:
if name not in t.colnames:
yield self._record(level="error", msg=f"Missing table column: {name!r}")
else:
if Unit(unit) != (t[name].unit or ""):
yield self._record(
level="error", msg=f"Invalid unit for column: {name!r}"
)
def check_times(self):
dt = (self.event_list.time - self.event_list.observation_time_start).sec
if dt.min() < self.accuracy["time"].to_value("s"):
yield self._record(level="error", msg="Event times before obs start time")
dt = (self.event_list.time - self.event_list.observation_time_end).sec
if dt.max() > self.accuracy["time"].to_value("s"):
yield self._record(level="error", msg="Event times after the obs end time")
if np.min(np.diff(dt)) <= 0:
yield self._record(level="error", msg="Events are not time-ordered.")
def check_coordinates_galactic(self):
"""Check if RA / DEC matches GLON / GLAT."""
t = self.event_list.table
if "GLON" not in t.colnames:
return
galactic = SkyCoord(t["GLON"], t["GLAT"], unit="deg", frame="galactic")
separation = self.event_list.radec.separation(galactic).to("arcsec")
if separation.max() > self.accuracy["angle"]:
yield self._record(
level="error", msg="GLON / GLAT not consistent with RA / DEC"
)
def check_coordinates_altaz(self):
"""Check if ALT / AZ matches RA / DEC."""
t = self.event_list.table
if "AZ" not in t.colnames:
return
altaz_astropy = self.event_list.altaz
separation = angular_separation(
altaz_astropy.data.lon,
altaz_astropy.data.lat,
t["AZ"].quantity,
t["ALT"].quantity,
)
if separation.max() > self.accuracy["angle"]:
yield self._record(
level="error", msg="ALT / AZ not consistent with RA / DEC"
)
from gammapy.data import EventList, EventListBase
r = EventListBase(filename_events)
evt = r.read(filename_events)
tab = EventList.read(filename_events)
tab.check()
# ### TOOLS
# #### Create the skymodel
spectral_model = PowerLawSpectralModel(index=2,amplitude="1e-12 TeV-1 cm-2 s-1", reference='1 TeV')
spatial_model = PointSpatialModel(lon_0= "0 deg", lat_0="0 deg", frame='galactic')
sky_model = SkyModel(spectral_model=spectral_model, spatial_model=spatial_model)
print(sky_model.temporal_model)
# +
def rate(x, c="2e3 s"):
c = u.Quantity(c)
return np.exp(-x / c)
time = np.arange(3e4) * u.s
norm = rate(time)
plt.plot(time,norm)
# plt.loglog()
# plt.ylim(1e-1,1.1)
# -
# +
table = Table()
table["TIME"] = time
table["NORM"] = norm
table.meta = dict(MJDREFI=55197.0, MJDREFF=0, TIMEUNIT="s")
temporal_model = LightCurveTemplateTemporalModel(table)
filename = BASE_PATH / "models/lc.fits"
temporal_model.write(path=filename, overwrite=True)
# -
sky_model = SkyModel(spectral_model=spectral_model, spatial_model=spatial_model, temporal_model=temporal_model)
models = Models([sky_model])
file_model = (BASE_PATH / "models/point-pwl-time.yaml")
models.write(file_model, overwrite=True)
# #### TEST
filename_dataset = BASE_PATH / "data/dataset_8h.fits.gz"
dataset = MapDataset.read(filename_dataset)
filename_model = BASE_PATH / "models/point-pwl-time.yaml"
models = Models.read(filename_model)
dataset.models.extend(models)
model=dataset.models[1]
evaluator = dataset.evaluators.get(model.name)
evaluator = copy.deepcopy(evaluator)
evaluator.model.apply_irf["psf"] = False
evaluator.model.apply_irf["edisp"] = False
npred = evaluator.compute_npred()
temporal_model = dataset.models[1].temporal_model
print(temporal_model)
# +
obsid=0
irfs = load_cta_irfs(make.IRF_FILE)
observation = Observation.create(
obs_id=obsid, pointing=make.POINTING, livetime=make.LIVETIME, irfs=irfs
)
sampler = MapDatasetEventSampler(random_state=0)
# +
n_events = sampler.random_state.poisson(np.sum(npred.data))
coords = npred.sample_coord(n_events=n_events, random_state=sampler.random_state)
# +
table = Table()
try:
energy = coords["energy_true"]
except KeyError:
energy = coords["energy"]
table["ENERGY_TRUE"] = energy
table["RA_TRUE"] = coords.skycoord.icrs.ra.to("deg")
table["DEC_TRUE"] = coords.skycoord.icrs.dec.to("deg")
# -
time_start, time_stop, time_ref = (dataset.gti.time_start, dataset.gti.time_stop, dataset.gti.time_ref)
time = temporal_model.sample_time(
n_events=n_events, t_min=time_start, t_max=time_stop, t_delta='100 s', random_state=sampler.random_state
)
table["TIME"] = u.Quantity(((time.mjd - time_ref.mjd) * u.day).to(u.s)).to("s")
min(table["TIME"]), max(table["TIME"])/500
lc = plt.hist(np.sort(table["TIME"]), bins=np.logspace(0,4.5,50))
# plt.loglog()
np.logspace(0,4.5,50)
plt.plot(lc[1][1:],lc[0]/np.logspace(0,4.5,50)[1:])
plt.loglog()
# +
t_delta='1 s'
time_unit = getattr(u, temporal_model.table.meta["TIMEUNIT"])
t_min = Time(dataset.gti.time_start)
t_max = Time(dataset.gti.time_stop)
t_delta = u.Quantity(t_delta)
random_state = get_random_state(0)
ontime = u.Quantity((t_max - t_min).sec, "s")
t_stop = ontime.to_value(time_unit)
# -
# TODO: the separate time unit handling is unfortunate, but the quantity support for np.arange and np.interp
# is still incomplete, refactor once we change to recent numpy and astropy versions
t_step = t_delta.to_value(time_unit)
t = np.arange(0, t_stop, t_step)
pdf = temporal_model.evaluate(t)
sampler = InverseCDFSampler(pdf=pdf, random_state=random_state)
time_pix = sampler.sample(n_events)[0]
time = np.interp(time_pix, np.arange(len(t)), t) * time_unit
tempo = t_min+time
a=plt.hist(tempo.value)
IRF_FILE = "/Users/fabio/LAVORO/CTA/ACDC_ASTRI/ACDC1.1/ACDC1.1_ma9a/caldb/data/cta/prod3b/bcf/Astri_50h_June/irf_file.fits"
irfs = load_cta_irfs(IRF_FILE)
aeff=irfs['aeff']
aeff.plot_energy_dependence()
plt.loglog()
plt.xlim(0.1,200)
plt.legend(loc='lower right')
aeff.plot()
plt.xlim(0.1,200)
plt.savefig("/Users/fabio/Articles/MNRAS/ACDC/JoHEA/REVISION/ASTRI_eff.png", dpi=300)
plt.clf()
plt.close()
| validation/event-sampling/checks/time_sampling_check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %matplotlib inline
# +
# stdlib imports
import json
import warnings
# third party imports
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.ticker as mticker
from IPython.display import Image
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import cartopy.feature as cfeature
import numpy as np
import pandas as pd
from scipy import stats
# -
# # Anatomy of a Matplotlib Plot
#
# You will notice in the figure below various annotated elements. We can programmatically control all of these using matplotlib functions.
Image(filename='sample_figure_annotated.png')
# ## Code
#
# The code below demonstrates how the plot above was made (minus the annotation, which was done manually).
#
# Note that we're saving the figure to the PDF (vector) format. Most of the time, you should be fairly safe in assuming that you can save to the following formats:
# - png Portable Network Graphics, a small and loss-less raster format
# - pdf Portable Document Format, a vector format
# - ps Postscript, the original vector format, works well with Illustrator and the like
# - eps Encapsulated Postscript, an extension of postscript
# - svg Scalable Vector Graphics XML-based vector format.
#
# Note that vector formats are scalable without losing resolution, whereas raster formats are composed of pixels that you will see clearly as you zoom in on the figure. PNG is small and well suited for web pages, for example.
# +
f = plt.figure(figsize=(20,10))
x = np.arange(1,101)
y = np.sin(x/4) * 100
line2d = plt.plot(x,y)
xlabel = plt.xlabel('Time', fontsize=18);
ylabel = plt.ylabel('Amplitude', fontsize=18);
title = plt.title('Response over Time', fontsize=18);
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(['Instrument Response'], fontsize=18);
plt.tight_layout()
# Note here that we can save the figure to PDF
plt.savefig('sample_figure.pdf', dpi=300);
# -
# # Colors
#
# As matplotlib documentation says:
#
# `You can specify colors in many weird and wonderful ways.`
#
# Let's make one plot showing the different ways to specify color
# +
f = plt.figure(figsize=(20,10))
x = np.arange(1,101)
y1 = np.sin(x/4) * 100
y2 = np.sin(x/3) * 100
y3 = np.sin(x/5) * 100
y4 = np.sin(x/2) * 100
line1 = plt.plot(x,y1, color='b') # color abbreviation
line2 = plt.plot(x,y2, color='black') # color name
line3 = plt.plot(x,y3, color='#eb34db') # hex (HTML) specification
line4 = plt.plot(x,y4, color=(0,1,0)) # RGB or RGBA tuples from 0-1
# -
# ## Wait... What's RGBA?
#
# You can optionally specify a fourth color value called `alpha`, which sets the transparency of the thing being plotted. An alpha value of 1 is fully opaque, and an alpha value of 0 is completely transparent.
# +
x1 = [0,5,5,0]
y1 = [0,0,5,5]
x2 = [10,15,15,10]
y2 = [0,0,5,5]
x3 = [20,25,25,20]
y3 = [0,0,5,5]
fig = plt.figure(figsize=(15,8))
f1 = plt.fill(x1,y1,color='blue', alpha=0.75)
f2 = plt.fill(x2,y2,color='blue', alpha=0.50)
f3 = plt.fill(x3,y3,color='blue', alpha=0.25)
t1 = plt.text(1.5,5.5,'alpha=0.75', fontweight='bold')
t2 = plt.text(11.5,5.5,'alpha=0.5', fontweight='bold')
t3 = plt.text(21.5,5.5,'alpha=0.25', fontweight='bold')
plt.axis('equal');
# -
# ### Quiz 1
#
# **Plot x versus y1 and y2as shown in the line plot above and add a transparent box between 40-60 on the x-axis that spans the lenth of the y-axis.**
# <details>
# <summary>SEE SOLUTION</summary>
#
# ```
# # Define x and y data (can copy from above) & plot
# x = np.arange(1,101)
# y1 = np.sin(x/4) * 100
# y2 = np.sin(x/3) * 100
# f = plt.figure(figsize=(20,10))
# plt.plot(x,y1,x,y2)
#
# # add transparent box
# x_box = [40,60,60,40]
# y_box = [-105,-105,105,105]
# plt.fill(x_box,y_box,color='red', alpha=0.25)
#
# # Bonus: set range for y axis:
# axes = plt.gca()
# axes.set_ylim([-105,105])
#
# ```
# </details>
# # Different Types of Plots
# ## Read in Data
#
# Let's use pandas (subject of a future tutorial) to read in 4 days of US earthquake data obtained from ComCat.
# +
# Read in the data, using the first row as columns, parse the "time" and "updated" columns as DateTime objects.
data = pd.read_csv('eq_query.csv', header=0, parse_dates=['time', 'updated'])
# How many earthquakes are in this data set?
print(f'{len(data)} events read.')
# Display the first five events
data.head()
# -
# ## Histogram of Magnitudes
fig = plt.figure(figsize=(16,8))
n, bins, patches = plt.hist(data['mag'], 80, facecolor='b', alpha=0.75)
xlabel = plt.xlabel('Earthquake Magnitude')
ylabel = plt.ylabel('Number of Earthquakes')
start_date = data['time'].min()
end_date = data['time'].max()
start_str = start_date.strftime('%b %d, %Y')
end_str = end_date.strftime('%b %d, %Y')
title_string = f'Earthquake Frequency from {start_str} to {end_str}'
title = plt.title(title_string)
# ## Magnitude Error Bars
data = data.sort_values('time')
data_short = data.iloc[0:100].copy()
data_short['elapsed'] = data_short['time'] - data_short['time'].min()
data_short['elapsed'] = data_short['elapsed'] / np.timedelta64(3600, 's')
fig = plt.figure(figsize=(16,8))
error = plt.errorbar(data_short['elapsed'], data_short['mag'], yerr=data_short['magError'], capsize=5, fmt='b.')
timestr = data_short['time'].min().strftime('%Y-%m-%d %H:%M:%S')
xlabel = plt.xlabel(f'Elapsed Time since {timestr} (hours)')
ylabel = plt.ylabel('Magnitude')
title = plt.title('Magnitude with errorbars')
# ## Filled Polygons & Continuous Error Bars
# ### Quiz 2
#
# **Use Pandas to read in Wasatch Fault Zone slip rate data (NSHM2023-WasatchFaultSlipRates.csv) & sort by latitude.** This data set is Hatem et al., 2021, Earthquake geology inputs for the National Seismic Hazard Model (NSHM) 2023, version 1.0: U.S. Geological Survey data release, https://doi.org/10.5066/P918XCUU.
# <details>
# <summary>SEE SOLUTION</summary>
#
# ```
# # Read in the data, using the first row as columns
# slipRates = pd.read_csv('NSHM2023-WasatchFaultSlipRates.csv', header=0)
# slipRates = slipRates.sort_values('lat')
#
# # How many slip rates are there?
# print(f'{len(slipRates)} slip rates read.')
#
# # Display the first five events
# slipRates.head()
# ```
# </details>
# Plot error bars with upper & lower slip rates, and use a filled polygon to plot continuous error bars
fig, ax = plt.subplots(1,1,figsize=(16,8))
ax.plot(slipRates['SlipRateID'], slipRates['PrefRate'], '.')
error = plt.errorbar(slipRates['SlipRateID'], slipRates['PrefRate'], yerr=(slipRates['LowRate'],slipRates['HighRate']), capsize=4, elinewidth=0.25, fmt='b.')
ax.fill_between(slipRates['SlipRateID'], slipRates['PrefRate'] - slipRates['LowRate'], slipRates['PrefRate'] + slipRates['HighRate'], alpha=0.2)
xlabel = plt.xlabel('Slip Rate ID')
ylabel = plt.ylabel('Slip Rate (mm/yr)')
title = plt.title('Slip Rate with Continuous Error Bars')
# Rotate x axis lables
plt.xticks(rotation=45,ha='right');
# ## Data as an Image
#
# We can plot gridded or raster data in matplotlib using `plt.imshow()`. Let's first load in the [2014 10% probability of exceedance for 1 second spectral acceleration from the National Seismic Hazard Map](https://www.sciencebase.gov/catalog/item/5db9be62e4b06957974eb5ca). We'll determine the number of rows and columns by counting number of unique latitudes and longitudes. By inspecting the data, we can determine that the resolution of the data in X and Y is 0.05 degrees. Finally, we'll get the min/max values of the data for use later on.
grid = np.loadtxt('2014_1hz10pct50yrs.dat')
nrows = len(np.unique(grid[:,1])) # number of unique latitudes
ncols = len(np.unique(grid[:,0])) # number of unique longitudes
prob = np.reshape(grid[:,2],(nrows, ncols))
# from inspection of the data...
dx = 0.05
dy = 0.05
xmin = grid[:,0].min()
ymax = grid[:,1].max()
xmax = grid[:,0].max()
ymin = grid[:,1].min()
# +
figwidth = 20
figheight= nrows/ncols * figwidth
fig = plt.figure(figsize=(figwidth,figheight))
ax = plt.gca()
# let's reset the dynamic range of the data so we can see more of the variation in the Midwest.
dmean = prob.mean()
dstd = prob.std()
dmin = dmean - dstd*3
dmax = dmean + dstd*3
img = plt.imshow(prob, vmin=dmin, vmax=dmax, interpolation='none')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.05)
cbar = plt.colorbar(img, cax=cax)
plt.sca(ax)
tstring = plt.title('2014 SA(1.0sec)')
xlabel = plt.xlabel('Pixel columns')
ylabel = plt.ylabel('Pixel rows')
# -
# ## ColorMaps
#
# There are many colormaps provided by matplotlib - the default one above is `viridis`. This is a good choice because it has a number of desirable properties:
# - Colorful
# - Pretty
# - Sequential
# - Perceptually uniform (even in black and white)
# - Accessible to colorblind viewers
#
# In case you DON'T think this is pretty, you have many other choices. (You can also make your own).
#
#
# *Note: You can reverse any colormap by appending "_r" to the name*
figwidth = 20
figheight= 10
fig, axes = plt.subplots(nrows=2, ncols=2,figsize=(figwidth,figheight))
colormaps = [['plasma', 'cividis'], ['Greens_r', 'Reds_r']]
# let's reset the dynamic range of the data so we can see more of the variation in the Midwest.
dmean = prob.mean()
dstd = prob.std()
dmin = dmean - dstd*3
dmax = dmean + dstd*3
for irow in range(0,len(colormaps)):
crow = colormaps[irow]
for icol in range(0,len(crow)):
ax = axes[irow][icol]
cmap = colormaps[irow][icol]
plt.sca(ax)
img = plt.imshow(prob, vmin=dmin, vmax=dmax, interpolation='none', cmap=cmap)
tstring = plt.title(f'{cmap} ColorMap')
# # Making Maps with Cartopy
#
# Cartopy is a mapping library based on matplotlib, which supports all of the same features, but adds the ability to draw projected data and make nice looking maps.
#
# A GeoAxes is a subclass of the matplotlib Axes class, which has a *projection*. If you add data to this GeoAxes by using one of the standard matplotlib plotting commands (plot(), scatter(), imshow(), etc.) then you must also supply a *transform* keyword to that function telling the GeoAxes what projection the data you are plotting is in.
#
# Note that *PlateCarree* is the projection we use to denote latitudes and longitudes (no projection).
# ## Scatter Plot
# +
fig = plt.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())
# make the map global rather than have it zoom in to
# the extents of any plotted data
ax.set_global()
# ax.stock_img()
ax.coastlines()
# set extent to just the US
ymin, ymax = [24.6, 50] # Latitude
xmin, xmax = [-125, -65] # Longitude
ax.set_extent((xmin, xmax, ymin, ymax))
m = 99
b = 10
sizes = m*data['mag'] + b
points = plt.scatter(x=data['longitude'], y=data['latitude'],
s=sizes,
c="red",
edgecolor='k',
transform=ccrs.PlateCarree()) ## Important
# -
# ### Quiz 3
#
# **Can you use cfeature (Cartopy Features) to add country borders, rivers, lakes, and state borders to the figure above?** [Check out this link for some hints](https://scitools.org.uk/cartopy/docs/v0.16/matplotlib/feature_interface.html). Also, check out [Tutorial 3 from Matlab to Python](https://mybinder.org/v2/gh/khaynie-usgs/matlab_to_python.git/main?filepath=matlab_to_python.ipynb) for some more examples.
# <details>
# <summary>SEE SOLUTION</summary>
#
# ```
# # Add the following to the cell above:
# ax.add_feature(cfeature.BORDERS, linestyle=':')
# ax.add_feature(cfeature.RIVERS)
# ax.add_feature(cfeature.LAKES)
# ax.add_feature(cfeature.STATES, edgecolor='gray')
# ```
# </details>
# ## Image (Bonus Subplot!)
# +
warnings.filterwarnings("ignore")
figwidth = 20
figheight= nrows/ncols * figwidth
figheight = 20
# use cartopy to draw this data on a map
# image extent is xmin, xmax, ymin, ymax
img_extent = (xmin, xmax, ymin, ymax)
fig = plt.figure(figsize=(figwidth, figheight))
geoaxes = plt.subplot(2, 1, 1, projection=ccrs.PlateCarree())
# geoaxes.stock_img()
img = geoaxes.imshow(prob, vmin=dmin, vmax=dmax, origin='upper', extent=img_extent, transform=ccrs.PlateCarree())
geoaxes.coastlines(color='black', lw=2)
title = plt.title('2014 SA(1.0sec)')
# draw gridlines on the plot
gl = geoaxes.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='black', alpha=0.5, linestyle='--')
gl.top_labels = False
gl.right_labels = False
gl.xlines = True
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
m = 99
b = 10
sizes = m*data['mag'] + b
points = plt.scatter(x=data['longitude'], y=data['latitude'],
s=sizes,
c="red",
edgecolor='k',
transform=ccrs.PlateCarree()) ## Important
# draw sampling line across peak in the midwest
lat = 36.0
lon_start = -88
lons = np.arange(-92, -88, 0.05)
lats = [lat] * len(lons)
geoaxes.plot(lons, lats, 'b', transform=ccrs.PlateCarree())
geoaxes.set_extent((xmin, xmax, ymin, ymax))
# convert those lat/lon values to row/col
cols = [int(c) for c in (lons - xmin) / dx]
row = int((ymax - lat) / dy)
pdata = prob[row, cols]
# draw a second plot with the sampled probability on the y axis
pltaxes = plt.subplot(2, 1, 2)
line2d = pltaxes.plot(lons, pdata, 'b')
tstring = plt.title('Transect Across New Madrid Fault')
# -
# # Resources
#
# - Matplotlib pyplot documentation: https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html
# - Matplotlib colormaps: https://matplotlib.org/stable/gallery/color/colormap_reference.html
# - Cartopy Tutorial: https://coderzcolumn.com/tutorials/data-science/cartopy-basic-maps-scatter-map-bubble-map-and-connection-map#6
| python_plotting_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:nairobi_ambulance] *
# language: python
# name: conda-env-nairobi_ambulance-py
# ---
# +
import math
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
import h3 # h3 bins from uber
# -
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import sys
sys.path.append('../Scripts')
import capstone_functions as cf
# # Prediction models
# ## A. Identifying frequency outliers
# **Idea:** Take historical data but cut off "frequency outliers" which occurred only once in the whole data set.
# ### Overall function
df_predictions_a = cf.rta_prediction_pipeline(type_of_pred="a", frequency_cutoff=1)
# ### Separate function steps
# Load data and create temporal features and hex bins.
df_raw = cf.create_crash_df()
df = cf.create_temporal_features(df_raw)
df = cf.assign_hex_bin(df)
# Create empty data frame for all hex bin / time window combinations
df_pred_template = cf.create_pred_template(df)
# Create data frame for all RTA's with the respective hex bin / time window combinations
df_tw_hex = cf.rta_per_time_window_hex_bin(df)
# Merge the RTA's onto the empty data frame
df_merged = cf.fill_overall_df(df_pred_template, df_tw_hex)
df_merged.head()
# Generate list of frequency outliers
list_freq_outliers = cf.generate_outlier_list(df_merged, frequency_cutoff=1)
# Filter overall data frame to exclude frequency outliers
df_pred_a = cf.filter_df_for_pred_a(df, list_freq_outliers)
# Create latitude and longitude
df_pred_a_lat_long = cf.convert_h3_to_lat_lon(df_pred_a)
# ### Output
# Overview of output file
df_pred_a_lat_long.head()
df_pred_a_lat_long.shape
# Export to csv
cf.export_df_to_csv(df_pred_a_lat_long,path_file='../Inputs/predictions_for_clustering_a.csv')
# ***
# ## B. Using RTA frequency as a prediction measure
# **Idea**: For each hex bin, use the frequencies (sum of occurrences, not the magnitude) for each time window as a prediction value
# ### Overall function
df_predictions_b = cf.rta_prediction_pipeline(type_of_pred="b", frequency_cutoff=1)
# ### Separate function steps
# Reduce RTA magnitude to 1 to predict based solely on frequency
df_pred_b = cf.filter_df_for_pred_b(df_merged, list_freq_outliers)
# Clean up data frame
df_pred_b_clean = cf.clean_pred_b(df_pred_b)
# Create latitutde and longitude
df_pred_b_lat_long = cf.convert_h3_to_lat_lon(df_pred_b_clean)
# ### Output
df_pred_b_lat_long.head()
df_pred_b_lat_long.shape
# Export to csv
cf.export_df_to_csv(df_pred_b_lat_long,path_file='../Inputs/predictions_for_clustering_b.csv')
# ***
# ## C. Using weather data to predict RTA occurrence (yes/no?) per time window and hex_bin class
# **Idea**: Adds weather data (data per day) to B and fits a regression model on this weather data for all hex bins.
# ### Overall function
df_predictions_c = cf.rta_prediction_pipeline(type_of_pred="c", frequency_cutoff=1)
# ### Generate predictions
# Import weather data
df_weather = pd.read_csv('../Inputs/Weather_Nairobi_Daily_GFS.csv', parse_dates=['Date'])
# Prediction for first half year of 2019
predicted_rta_first_half_2019 = cf.predict_accidents_on_weather_first_half_2019(df_raw, df_weather)
predicted_rta_first_half_2019_round = [int(round(i, 0)) for i in predicted_rta_first_half_2019]
sum(predicted_rta_first_half_2019_round)
# Prediction for second half year of 2019
predicted_rta = cf.predict_accidents_on_weather(df_raw, df_weather)
predicted_rta_round = [int(round(i, 0)) for i in predicted_rta]
sum(predicted_rta_round)
# ### Separate function steps
# Create sample basis for each weekday
df_samples = cf.create_samples(df_merged, list_freq_outliers)
# Generate overall prediction based on RTA's per day as predicted by daily weather and the sampling base
df_pred_c = cf.generate_predictions_first_half_2019(df_samples, predicted_rta_round)
# Clean up data frame
df_pred_c_clean = cf.reduce_to_time_windows(df_pred_c)
# Create latitude and longitude
df_pred_c_lat_long = cf.convert_h3_to_lat_lon(df_pred_c_clean)
# ### Output
df_pred_c_lat_long.head()
df_pred_c_lat_long.shape
# Export to csv
cf.export_df_to_csv(df_pred_c_lat_long,path_file='../Inputs/predictions_for_clustering_c.csv')
# ***
| Notebooks/Prediction_Output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.3
# language: julia
# name: julia-0.4
# ---
# +
using Images, DataFrames, Colors, HDF5, DecisionTree, Mocha
ENV["MOCHA_USE_NATIVE_EXT"] = "true"
using Mocha
backend = CPUBackend()
init(backend)
wd="/media/u01/analytics/scoring/k/USNS"
ssub = readtable("$(wd)/sample_submission.csv")
tm = readtable("$(wd)/train_masks.csv")
#Load Train
flist = readdir("$(wd)/train/")
numfiles=length(flist)
train = zeros(numfiles, 243600+3)
row=0
for f in flist
#println(f)
subject=float(split(replace(f,".tif",""),"_")[1])
inum=float(split(replace(f,".tif",""),"_")[2])
isMask=contains(f,"_mask") ? 1.0 : 0.0
row=row+1
fp="$(wd)/train/$(f)"
if isfile(fp)
img = imread(fp)
train[row, 4:end] = reshape(img, 1, 243600)
train[row,1]=subject
train[row,2]=inum
train[row,3]=isMask
#println(f," ~ ", typeof(img)," ~ ",length(img))
#println(f,"~",subject,"~",inum,"~",isMask)
end
end
#http://stackoverflow.com/questions/30926050/how-to-save-an-nxnxn-array-or-matrix-into-a-file-in-julia-or-python
Pkg.add("HDF5")
using HDF5, JLD
r = rand(3, 3, 3)
save("data.jld", "data", r)
load("data.jld")["data"]
# ---- Mocha -----
img_width, img_height, img_channels = (256, 256, 3)
crop_size = (227, 227)
batch_size = 1 # could be larger if you want to classify a bunch of images at a time
layers = [ MemoryDataLayer(name="data",
tops=[:data],
batch_size=batch_size,
transformers=[(:data, DataTransformers.Scale(scale=255)),
(:data, DataTransformers.SubMean(mean_file="model/ilsvrc12_mean.hdf5"))
],
data = Array[zeros(img_width, img_height, img_channels, batch_size)]
)
CropLayer(name="crop",
tops=[:cropped],
bottoms=[:data],
crop_size=crop_size
)
ConvolutionLayer(name="conv1",
tops=[:conv1], bottoms=[:cropped],
kernel=(11,11),
stride=(4,4),
n_filter=96,
neuron=Neurons.ReLU()
)
PoolingLayer(name="pool1",
tops=[:pool1],
bottoms=[:conv1],
kernel=(3,3),
stride=(2,2),
pooling=Pooling.Max()
)
LRNLayer(name="norm1",
tops=[:norm1],
bottoms=[:pool1],
kernel=5,
scale=0.0001,
power=0.75
)
ConvolutionLayer(name="conv2",
tops=[:conv2],
bottoms=[:norm1],
kernel=(5,5),
pad=(2,2),
n_filter=256,
n_group=2,
neuron=Neurons.ReLU()
)
PoolingLayer(name="pool2",
tops=[:pool2],
bottoms=[:conv2],
kernel=(3,3),
stride=(2,2),
pooling=Pooling.Max()
)
LRNLayer(name="norm2",
tops=[:norm2],
bottoms=[:pool2],
kernel=5,
scale=0.0001,
power=0.75)
ConvolutionLayer(name="conv3",
tops=[:conv3],
bottoms=[:norm2],
kernel=(3,3),
pad=(1,1),
n_filter=384,
neuron=Neurons.ReLU()
)
ConvolutionLayer(name="conv4",
tops=[:conv4],
bottoms=[:conv3],
kernel=(3,3),
pad=(1,1),
n_filter=384,
n_group=2,
neuron=Neurons.ReLU())
ConvolutionLayer(name="conv5",
tops=[:conv5],
bottoms=[:conv4],
kernel=(3,3),
pad=(1,1),
n_filter=256,
n_group=2,
neuron=Neurons.ReLU()
)
PoolingLayer(name="pool5",
tops=[:pool5],
bottoms=[:conv5],
kernel=(3,3),
stride=(2,2),
pooling=Pooling.Max()
)
InnerProductLayer(name="fc6",
tops=[:fc6],
bottoms=[:pool5],
output_dim=4096,
neuron=Neurons.ReLU()
)
InnerProductLayer(name="fc7",
tops=[:fc7],
bottoms=[:fc6],
output_dim=4096,
neuron=Neurons.ReLU())
InnerProductLayer(name="fc8",
tops=[:fc8],
bottoms=[:fc7],
output_dim=1000
)
SoftmaxLayer(name="prob",
tops=[:prob],
bottoms=[:fc8]
)
]
net = Net("imagenet", backend, layers)
println(net)
| src/old/nb/Overlay_Nerve_Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
stations = pd.read_excel("../input/Metadane_wer20160914.xlsx")
nazywStacji = set(dane.columns.values)
stacje = stacje.set_index("Nr")
stacje[(stacje["Stary Kod stacji"]).isin(nazywStacji) | (stacje["Kod stacji"]).isin(nazywStacji)]
interesujaceStacje = stacje[(stacje["Stary Kod stacji"]).isin(nazywStacji) | (stacje["Kod stacji"]).isin(nazywStacji)]
interesujaceStacje
interesujaceStacje.shape
interesujaceStacje[[u'WGS84 \u03bb E',u'WGS84 \u03c6 N']]
wspolrzedne = interesujaceStacje[[u'WGS84 \u03bb E',u'WGS84 \u03c6 N']].values
wspolrzedne[:,1]
import matplotlib.pyplot as plt
plt.scatter(wspolrzedne[:,0], wspolrzedne[:,1])
import folium
map_osm = folium.Map(location=[52.069167, 19.480556], zoom_start=6)
map_osm
interesujaceStacje.index
for index, row in interesujaceStacje.iterrows():
print row['Nazwa stacji']
folium.Marker([row[u'WGS84 \u03c6 N'], row[u'WGS84 \u03bb E']], popup=row['Nazwa stacji']).add_to(map_osm)
map_osm
jeden_dzien = dane[dane.index == "2000-06-12 08:00:00"]
do_interpolacji = pd.melt(jeden_dzien)
do_interpolacji.rename(columns={"variable":"<NAME> stacji"}, inplace=True)
final = do_interpolacji.merge(interesujaceStacje[[u'WGS84 \u03bb E',u'WGS84 \u03c6 N', "<NAME> stacji"]])
x = final[u'WGS84 \u03bb E'].values
y = final[u'WGS84 \u03c6 N'].values
z = final[u'value'].values
x
import numpy as np
from scipy.interpolate import griddata
# +
xi = np.linspace(x.min(),x.max(),100)
yi = np.linspace(y.min(),y.max(),200)
xi = np.append(xi,x)
xi.sort()
yi = np.append(yi,y)
yi.sort()
zi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='linear')
# -
(x,y), z
zi
# contour the gridded data, plotting dots at the randomly spaced data points.
CS = plt.contour(xi,yi,zi)
CS = plt.contourf(xi,yi,zi)
plt.colorbar() # draw colorbar
# plot data points.
plt.scatter(x,y,marker='o',c='b',s=5)
plt.show()
from folium import plugins
nxpoints = (x.max() - x.min()) / .001
nypoints = (y.max() - y.min()) / .001
xi = np.linspace(x.min(),x.max(),int(nxpoints))
yi = np.linspace(y.min(),y.max(),int(nypoints))
zi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='linear')
print(xi.shape)
print(yi.shape)
np.isnan(zi[4502,5000])
from tqdm import tqdm
xlist = []
ylist = []
zlist = []
for xelement in tqdm(range(xi.shape[0])):
for yelement in range(yi.shape[0]):
if np.isnan(zi[yelement,xelement]):
pass
else:
#tmpData = pd.DataFrame()
#tmpData["x"] = xi[xelement]
xlist.append(xi[xelement])
ylist.append(yi[yelement])
zlist.append(zi[yelement,xelement])
#tmpData["y"] = yi[yelement]
#tmpData["z"] = zi[yelement,xelement]
#dataForHeatmap.append(tmpData, ignore_index=True)
dataForHeatmap = pd.DataFrame({"x":xlist, "y":ylist, "z":zlist})
dataForHeatmap
# +
#plugins.HeatMap(zip(ylist, xlist, zlist)).add_to(map_osm)
# +
#map_osm
# -
file
[ basename(wholeFilename) for wholeFilename in glob.glob("../input/2*.xlsx") ]
| workspace/scrapbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + tags=["remove-input"]
from datascience import *
# %matplotlib inline
path_data = '../data/'
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
import numpy as np
# -
# # The Method of Least Squares
# We have developed the equation of the regression line that runs through a football shaped scatter plot. But not all scatter plots are football shaped, not even linear ones. Does every scatter plot have a "best" line that goes through it? If so, can we still use the formulas for the slope and intercept developed in the previous section, or do we need new ones?
#
# To address these questions, we need a reasonable definition of "best". Recall that the purpose of the line is to *predict* or *estimate* values of $y$, given values of $x$. Estimates typically aren't perfect. Each one is off the true value by an *error*. A reasonable criterion for a line to be the "best" is for it to have the smallest possible overall error among all straight lines.
#
# In this section we will make this criterion precise and see if we can identify the best straight line under the criterion.
# + tags=["remove-input"]
def standard_units(any_numbers):
"Convert any array of numbers to standard units."
return (any_numbers - np.mean(any_numbers))/np.std(any_numbers)
def correlation(t, x, y):
return np.mean(standard_units(t.column(x))*standard_units(t.column(y)))
def slope(table, x, y):
r = correlation(table, x, y)
return r * np.std(table.column(y))/np.std(table.column(x))
def intercept(table, x, y):
a = slope(table, x, y)
return np.mean(table.column(y)) - a * np.mean(table.column(x))
def fit(table, x, y):
"""Return the height of the regression line at each x value."""
a = slope(table, x, y)
b = intercept(table, x, y)
return a * table.column(x) + b
# -
# Our first example is a dataset that has one row for every chapter of the novel "Little Women." The goal is to estimate the number of characters (that is, letters, spaces punctuation marks, and so on) based on the number of periods. Recall that we attempted to do this in the very first lecture of this course.
little_women = Table.read_table(path_data + 'little_women.csv')
little_women = little_women.move_to_start('Periods')
little_women.show(3)
little_women.scatter('Periods', 'Characters')
# To explore the data, we will need to use the functions `correlation`, `slope`, `intercept`, and `fit` defined in the previous section.
correlation(little_women, 'Periods', 'Characters')
# The scatter plot is remarkably close to linear, and the correlation is more than 0.92.
# ## Error in Estimation
#
# The graph below shows the scatter plot and line that we developed in the previous section. We don't yet know if that's the best among all lines. We first have to say precisely what "best" means.
lw_with_predictions = little_women.with_column('Linear Prediction', fit(little_women, 'Periods', 'Characters'))
lw_with_predictions.scatter('Periods')
# Corresponding to each point on the scatter plot, there is an error of prediction calculated as the actual value minus the predicted value. It is the vertical distance between the point and the line, with a negative sign if the point is below the line.
actual = lw_with_predictions.column('Characters')
predicted = lw_with_predictions.column('Linear Prediction')
errors = actual - predicted
lw_with_predictions.with_column('Error', errors)
# We can use `slope` and `intercept` to calculate the slope and intercept of the fitted line. The graph below shows the line (in light blue). The errors corresponding to four of the points are shown in red. There is nothing special about those four points. They were just chosen for clarity of the display. The function `lw_errors` takes a slope and an intercept (in that order) as its arguments and draws the figure.
lw_reg_slope = slope(little_women, 'Periods', 'Characters')
lw_reg_intercept = intercept(little_women, 'Periods', 'Characters')
# + tags=["remove-input"]
sample = [[131, 14431], [231, 20558], [392, 40935], [157, 23524]]
def lw_errors(slope, intercept):
little_women.scatter('Periods', 'Characters')
xlims = np.array([50, 450])
plots.plot(xlims, slope * xlims + intercept, lw=2)
for x, y in sample:
plots.plot([x, x], [y, slope * x + intercept], color='r', lw=2)
# -
print('Slope of Regression Line: ', np.round(lw_reg_slope), 'characters per period')
print('Intercept of Regression Line:', np.round(lw_reg_intercept), 'characters')
lw_errors(lw_reg_slope, lw_reg_intercept)
# Had we used a different line to create our estimates, the errors would have been different. The graph below shows how big the errors would be if we were to use another line for estimation. The second graph shows large errors obtained by using a line that is downright silly.
lw_errors(50, 10000)
lw_errors(-100, 50000)
# ## Root Mean Squared Error
#
# What we need now is one overall measure of the rough size of the errors. You will recognize the approach to creating this – it's exactly the way we developed the SD.
#
# If you use any arbitrary line to calculate your estimates, then some of your errors are likely to be positive and others negative. To avoid cancellation when measuring the rough size of the errors, we will take the mean of the squared errors rather than the mean of the errors themselves.
#
# The mean squared error of estimation is a measure of roughly how big the squared errors are, but as we have noted earlier, its units are hard to interpret. Taking the square root yields the root mean square error (rmse), which is in the same units as the variable being predicted and therefore much easier to understand.
# ## Minimizing the Root Mean Squared Error
#
# Our observations so far can be summarized as follows.
#
# - To get estimates of $y$ based on $x$, you can use any line you want.
# - Every line has a root mean squared error of estimation.
# - "Better" lines have smaller errors.
#
# Is there a "best" line? That is, is there a line that minimizes the root mean squared error among all lines?
#
# To answer this question, we will start by defining a function `lw_rmse` to compute the root mean squared error of any line through the Little Women scatter diagram. The function takes the slope and the intercept (in that order) as its arguments.
def lw_rmse(slope, intercept):
lw_errors(slope, intercept)
x = little_women.column('Periods')
y = little_women.column('Characters')
fitted = slope * x + intercept
mse = np.mean((y - fitted) ** 2)
print("Root mean squared error:", mse ** 0.5)
lw_rmse(50, 10000)
lw_rmse(-100, 50000)
# Bad lines have big values of rmse, as expected. But the rmse is much smaller if we choose a slope and intercept close to those of the regression line.
lw_rmse(90, 4000)
# Here is the root mean squared error corresponding to the regression line. By a remarkable fact of mathematics, no other line can beat this one.
#
# - **The regression line is the unique straight line that minimizes the mean squared error of estimation among all straight lines.**
lw_rmse(lw_reg_slope, lw_reg_intercept)
# The proof of this statement requires abstract mathematics that is beyond the scope of this course. On the other hand, we do have a powerful tool – Python – that performs large numerical computations with ease. So we can use Python to confirm that the regression line minimizes the mean squared error.
# ### Numerical Optimization ###
# First note that a line that minimizes the root mean squared error is also a line that minimizes the squared error. The square root makes no difference to the minimization. So we will save ourselves a step of computation and just minimize the mean squared error (mse).
#
# We are trying to predict the number of characters ($y$) based on the number of periods ($x$) in chapters of Little Women. If we use the line
#
# $$
# \mbox{prediction} ~=~ ax + b
# $$
#
# it will have an mse that depends on the slope $a$ and the intercept $b$. The function `lw_mse` takes the slope and intercept as its arguments and returns the corresponding mse.
def lw_mse(any_slope, any_intercept):
x = little_women.column('Periods')
y = little_women.column('Characters')
fitted = any_slope*x + any_intercept
return np.mean((y - fitted) ** 2)
# Let's check that `lw_mse` gets the right answer for the root mean squared error of the regression line. Remember that `lw_mse` returns the mean squared error, so we have to take the square root to get the rmse.
lw_mse(lw_reg_slope, lw_reg_intercept)**0.5
# That's the same as the value we got by using `lw_rmse` earlier:
lw_rmse(lw_reg_slope, lw_reg_intercept)
# You can confirm that `lw_mse` returns the correct value for other slopes and intercepts too. For example, here is the rmse of the extremely bad line that we tried earlier.
lw_mse(-100, 50000)**0.5
# And here is the rmse for a line that is close to the regression line.
lw_mse(90, 4000)**0.5
# If we experiment with different values, we can find a low-error slope and intercept through trial and error, but that would take a while. Fortunately, there is a Python function that does all the trial and error for us.
#
# The `minimize` function can be used to find the arguments of a function for which the function returns its minimum value. Python uses a similar trial-and-error approach, following the changes that lead to incrementally lower output values.
#
# The argument of `minimize` is a function that itself takes numerical arguments and returns a numerical value. For example, the function `lw_mse` takes a numerical slope and intercept as its arguments and returns the corresponding mse.
#
# The call `minimize(lw_mse)` returns an array consisting of the slope and the intercept that minimize the mse. These minimizing values are excellent approximations arrived at by intelligent trial-and-error, not exact values based on formulas.
best = minimize(lw_mse)
best
# These values are the same as the values we calculated earlier by using the `slope` and `intercept` functions. We see small deviations due to the inexact nature of `minimize`, but the values are essentially the same.
print("slope from formula: ", lw_reg_slope)
print("slope from minimize: ", best.item(0))
print("intercept from formula: ", lw_reg_intercept)
print("intercept from minimize: ", best.item(1))
# ## The Least Squares Line
#
# Therefore, we have found not only that the regression line minimizes mean squared error, but also that minimizing mean squared error gives us the regression line. The regression line is the only line that minimizes mean squared error.
#
# That is why the regression line is sometimes called the "least squares line."
| Mathematics/Statistics/Statistics and Probability Python Notebooks/Computational and Inferential Thinking - The Foundations of Data Science (book)/Notebooks - by chapter/15. Prediction, Correlation, Least Squares/3. Method_of_Least_Squares.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import scipy
from sklearn.model_selection import ParameterGrid
from sklearn.manifold import Isomap
import time
from tqdm import tqdm
import librosa
from librosa import cqt
from librosa.core import amplitude_to_db
from librosa.display import specshow
import os
import glob
# -
data_dir= '/Users/sripathisridhar/googleDrive/projects/helix/helix2019_data/SOL'
# ## Data management
# +
file_paths= sorted(glob.glob(os.path.join(data_dir, '**', '*.wav')))
file_names= []
for file_path in file_paths:
file_names.append(os.path.basename(file_path))
# -
# ## Feature extraction
hop_size= 512
q= 24
# +
features_dict= {}
feature_key= ''
for file_path in tqdm(file_paths, disable=False):
# Read audio files
waveform, sample_rate= librosa.load(file_path, sr=None)
# Compute CQTs
cqt_complex= cqt(y=waveform,
sr=sample_rate,
hop_length=hop_size,
bins_per_octave=q,
n_bins=q*7,
sparsity=1e-6,
)
scalogram= np.abs(cqt_complex)**2
# Find frame with maximum RMS value
rms= librosa.feature.rms(y=waveform,
hop_length=hop_size,
)
rms_argmax= np.argmax(rms)
feature= scalogram[:,rms_argmax]
# Stack in dict
file_name= os.path.basename(file_path)
feature_key= f'{file_name}'
features_dict[feature_key]= feature
# -
# with h5py.File('fig2_SOL_temp.h5', 'w') as h5_file:
# for key in features_dict.keys():
# h5_file[key] = features_dict[key]
# ## Settings list
# +
grid = {
'Q': [24],
'k': [3],
'comp': ['log'],
'instr': ['all'],
'dyn': ['all']
}
settings = list(ParameterGrid(grid))
for setting in settings:
if setting["instr"] == 'all':
setting['instr'] = ''
if setting['dyn'] == 'all':
setting['dyn'] = ''
# -
# ## Isomap embedding
# +
batch_str = []
CQT_OCTAVES = 7
features_keys = list(features_dict.keys())
for setting in settings:
q = setting['Q']
# Batch process and store in a folder
batch_str = [setting['instr'], setting['dyn']]
batch_features = []
for feature_key in features_keys:
# Get features that match setting
if all(x in feature_key for x in batch_str):
batch_features.append(features_dict[feature_key])
batch_features = np.stack(batch_features, axis=1)
# Isomap parameters
hop_size = 512
compression = 'log'
features = amplitude_to_db(batch_features)
n_neighbors = setting['k']
n_dimensions = 3
n_octaves = 3
# Prune feature matrix
bin_low = np.where((np.std(features, axis=1) / np.std(features)) > 0.1)[0][0] + q
bin_high = bin_low + n_octaves*q
X = features[bin_low:bin_high, :]
# Z-score Standardization- improves contrast in correlation matrix
mus = np.mean(X, axis=1)
sigmas = np.std(X, axis=1)
X_std = (X - mus[:, np.newaxis]) / (1e-6 + sigmas[:, np.newaxis]) # 1e-6 to avoid runtime division by zero
# Pearson correlation matrix
rho_std = np.dot(X_std, X_std.T) / X_std.shape[1]
# Isomap embedding
isomap = Isomap(n_components= n_dimensions, n_neighbors= n_neighbors)
coords = isomap.fit_transform(rho_std)
# Get note value
freqs= librosa.cqt_frequencies(q*CQT_OCTAVES, fmin=librosa.note_to_hz('C1'), bins_per_octave=q) #librosa CQT default fmin is C1
chroma_list= librosa.core.hz_to_note(freqs[bin_low:bin_high])
notes = []
reps = q//12
for chroma in chroma_list:
for i in range(reps):
notes.append(chroma)
# -
# ## Isomap and Pearson correlation matrix
# +
curr_fig= plt.figure(figsize=(5.5, 2.75))
ax= curr_fig.add_subplot(121)
ax.axis('off')
import colorcet as cc
subsampled_color_ids = np.floor(np.linspace(0, 256, q, endpoint=False)).astype('int')
color_list= [cc.cyclic_mygbm_30_95_c78[i] for i in subsampled_color_ids]
# Plot embedding with color
for i in range(coords.shape[0]):
plt.scatter(coords[i, 0], coords[i, 1], color= color_list[i%q], s=30.0)
plt.plot(coords[:, 0], coords[:, 1], color='black', linewidth=0.2)
# Plot Pearson correlation matrix
rho_frequencies = freqs[bin_low:bin_high]
freq_ticklabels = ['A2', 'A3', 'A4']
freq_ticks = librosa.core.note_to_hz(freq_ticklabels)
tick_bins = []
tick_labels= []
for i,freq_tick in enumerate(freq_ticks):
tick_bin = np.argmin(np.abs(rho_frequencies-freq_tick))
tick_bins.append(tick_bin)
tick_labels.append(freq_ticklabels[i])
plt.figure(figsize=(2.5,2.5))
plt.imshow(np.abs(rho_std), cmap='magma_r')
plt.xticks(tick_bins)
plt.gca().set_xticklabels(freq_ticklabels)
# plt.xlabel('Log-frequency (octaves)')
plt.yticks(tick_bins)
plt.gca().set_yticklabels(freq_ticklabels)
# plt.ylabel('Log-frequency (octaves)')
plt.gca().invert_yaxis()
plt.clim(0, 1)
# -
# ## Shortest path distance matrix
# +
kng = isomap.nbrs_.kneighbors_graph
A = kng().toarray()
plt.figure(figsize=(2.5,2.5))
specshow(isomap.dist_matrix_, cmap='magma_r')
# -
# ### Trumpet scalogram
# +
wav_name= 'TpC-ord-C4-pp.wav'
tpc_dir= 'TpC-ord'
wav_path= os.path.join(data_dir, tpc_dir, wav_name)
wav_raw, sr= librosa.core.load(wav_path, sr=None)
# wav_trim,_= librosa.effects.trim(y=wav_raw)
wav_trim= np.roll(wav_raw, shift=0)
# Compute spectrogram
scalogram= librosa.core.cqt(y=wav_trim,
bins_per_octave=24,
hop_length=512,
n_bins=24*7,
sr=sr,
sparsity=1e-7)
scalogram= np.abs(scalogram)**2
# Second plot with log magnitude scaling
plt.figure(figsize=(2.5,2.5))
# plt.imshow(scalogram, cmap='magma_r', origin='lower')
librosa.display.specshow(librosa.amplitude_to_db(scalogram), cmap='magma_r')
# plt.ylim((50,167))
# plt.xlim((0,500))
# plt.clim(0,1)
# plt.colorbar()
# -
# ### Harp scalogram
# +
wav_name= 'Hp-ord-D3-ff.wav'
hp_dir= 'Hp-ord'
wav_path= os.path.join(data_dir, hp_dir, wav_name)
wav_raw, sr= librosa.core.load(wav_path, sr=None)
# wav_trim,_= librosa.effects.trim(y=wav_raw)
# wav_trim= np.roll(wav_raw, shift=0)
# Compute spectrogram
scalogram= librosa.core.cqt(y=wav_trim,
bins_per_octave=24,
hop_length=512,
n_bins=24*7,
sr=sr,
sparsity=1e-7)
scalogram= np.abs(scalogram)**2
plt.figure(figsize=(2.5,2.5))
librosa.display.specshow(librosa.amplitude_to_db(scalogram), cmap='magma_r')
plt.ylim((50,167))
plt.xlim((0,500))
# plt.clim(0,1)
# plt.colorbar()
# -
# ### Trumpet data matrix
# +
batch_str=['TpC','ff']
data_keys=[]
features_keys= features_dict.keys()
for key in features_dict.keys():
if all(x in key for x in batch_str):
data_keys.append(key)
# list(sorted(tpc_keys, key= lambda x: x.strip('#')))
data_keys=sorted(data_keys)
data_mat= []
for key in data_keys:
data_mat.append(features_dict[key])
data_mat= np.stack(data_mat, axis=1)
from librosa import cqt_frequencies
from librosa.core import note_to_hz
freqs=cqt_frequencies(n_bins=24*3,bins_per_octave=24,fmin=note_to_hz('C1'))
plt.figure(figsize=(2.5,2.5))
specshow(librosa.amplitude_to_db(data_mat), cmap='magma_r')
# -
# ### Harp data matrix
# +
batch_str=['Hp','ff']
data_keys=[]
features_keys= features_dict.keys()
for key in features_dict.keys():
if all(x in key for x in batch_str):
data_keys.append(key)
# list(sorted(tpc_keys, key= lambda x: x.strip('#')))
data_keys=sorted(data_keys)
data_mat= []
for key in data_keys:
data_mat.append(features_dict[key])
data_mat= np.stack(data_mat, axis=1)
from librosa import cqt_frequencies
from librosa.core import note_to_hz
freqs=cqt_frequencies(n_bins=24*3,bins_per_octave=24,fmin=note_to_hz('C1'))
plt.figure(figsize=(2.5,2.5))
specshow(librosa.amplitude_to_db(data_mat), cmap='magma_r')
| Fig2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import json
import time
from IPython.display import Image, display
import pymongo
from datetime import datetime
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# # NASA Mars News
url = "https://redplanetscience.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "lxml")
news = soup.find("div", {"id": "news"})
# +
rows = news.find_all("div", {"class", "row"})
first_row = rows[0]
news_title = first_row.find("div", {"class", "content_title"}).text
news_paragraph = first_row.find("div", {"class": "article_teaser_body"}).text
print(news_title)
print(news_paragraph)
# -
# # JPL Mars Space Images - Featured Image
url = "https://spaceimages-mars.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "lxml")
image = soup.find("img", {"class": "headerimage"})
featured_image_url = url + image["src"]
print(featured_image_url)
display(Image(featured_image_url))
# # Mars Facts
url = "https://galaxyfacts-mars.com/"
browser.visit(url)
html = browser.html
dfs = pd.read_html(html)
len(dfs)
dfs[0]
dfs[1]
facts = dfs[1]
# facts = facts.set_index(0).T
# facts.columns = [x.strip(":") for x in facts.columns]
facts
# +
# facts_json = json.loads(facts.to_json(orient="records"))
# facts_json
# -
facts_html = facts.to_html()
facts_html
# # <NAME>
url = "https://marshemispheres.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "lxml")
hemis = soup.find_all("div", {"class":"item"})
len(hemis)
hemi_info = []
for hemi in hemis:
hemi_url = url + hemi.find("a", {"class": "itemLink"})["href"]
browser.visit(hemi_url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, "lxml")
link = url + soup.find("img", {"class", "wide-image"})["src"]
title = soup.find("h2", {"class", "title"}).text
data = {
"img_url": link,
"title": title
}
hemi_info.append(data)
hemi_info
browser.quit()
# # Combine
# +
final_data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image_url": featured_image_url,
"mars_facts": facts_html,
"hemispheres": hemi_info,
"last_updated": datetime.utcnow()
}
print(final_data)
# -
# # MongoDB
# +
import pymongo
# Create connection variable
conn = 'mongodb://localhost:27017'
# Pass connection to the pymongo instance.
client = pymongo.MongoClient(conn)
# Connect to a database. Will create one if not already available.
db = client.mars_scrape
# -
db.mars_data.insert_one(final_data)
facts = db.mars_data.find({}, sort=[('last_updated', pymongo.DESCENDING )], limit=1)
facts[0]
| webscraping.ipynb |