code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Defining ground truth
# +
# standard
import pickle
from datetime import timedelta
from functools import partial
# third party
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# first party
from config import Config
from data_containers import LocationSeries
from deconvolution import deconvolution
# +
as_of = Config.ground_truth_date
kernel_file = "../data/naive_delay_distributions/uncensored_delay_distribution.p"
kernel_dict = pickle.load(open(kernel_file, "rb"))
convolved_truth_indicator = Config.ground_truth_indicator
convolved_response_prefix = f'../data/jhu-csse_confirmed_incidence_prop/{convolved_truth_indicator.source}_{convolved_truth_indicator.signal}'
convolved_ground_truth = pickle.load(open(convolved_response_prefix + f'_{as_of}.p', 'rb'))
# +
tf = partial(
deconvolution.deconvolve_tf_cv,
k=3,
fit_func=partial(deconvolution.deconvolve_tf, natural=False),
lam_cv_grid=np.r_[np.logspace(1, 3.5, 10), [5000, 8000, 15000]],
gam_cv_grid=np.array([0.]),
verbose=False,
)
start_date = Config.first_data_date
end_date = as_of - timedelta(convolved_truth_indicator.lag)
full_dates = pd.date_range(start_date, end_date)
ground_truths = {}
for loc, data in convolved_ground_truth.items():
print(data.geo_value, data.geo_type)
signal = data.get_data_range(start_date, end_date, 'locf')
out = tf(
y=np.array(signal),
x=np.arange(1, len(signal)+1),
kernel_dict=kernel_dict,
as_of_date=as_of)
# We only store estimates up till t-2, because the convolutional reporting distribution
# is not supported on 0.
ground_truths[data.geo_value] = LocationSeries(
data.geo_value, data.geo_type, dict(zip(full_dates[:-1], out[:-1])))
with open(f'../data/tf_ground_truths.p', 'wb') as f:
pickle.dump(ground_truths, f, protocol=pickle.HIGHEST_PROTOCOL)
# -
len(ground_truths.keys())
plt.figure(figsize=(12, 5))
plt.scatter(convolved_ground_truth[('jhu-csse', 'confirmed_incidence_prop', 'state', 'ny')].dates,
convolved_ground_truth[('jhu-csse', 'confirmed_incidence_prop', 'state', 'ny')].values,
color='gray', s=0.3, label='cases')
plt.plot(ground_truths['ny'].dates, ground_truths['ny'].values, ls='--', label='tf')
plt.legend()
plt.show()
|
code/01_generate_ground_truth.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import napari
from skimage.io import imread
from skimage.morphology import disk, ball
from skimage.filters.rank import gradient
image = imread('blobs.tif')
image.shape
# +
viewer = napari.Viewer()
viewer.add_image(image, colormap='green', blending='additive')
viewer.add_image(gradient(image, disk(5)), name='gradient', colormap='magenta', blending='additive')
viewer.add_shapes([[ 100,80], [140, 150]], shape_type='path', edge_color='cyan', edge_width=3)
# -
from napari_plot_profile import PlotProfile
profiler = PlotProfile(viewer)
viewer.window.add_dock_widget(profiler, area='right')
viewer.camera.zoom = 3
napari.utils.nbscreenshot(viewer)
profiler._list_values()
napari.utils.nbscreenshot(viewer)
|
docs/demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Importing packages
from pyLMS import *
import matplotlib.pyplot as plt
# # Loading different files
accelerometer = pyLMS('data/accelerometro.mat')
psd = pyLMS('data/psd.mat')
spectrum = pyLMS('data/spectrum.mat')
# # Plotting data
# +
fig, axes = plt.subplots(3,1)
axes[0].plot(accelerometer['signals']['x'], accelerometer['signals']['y'])
axes[0].set_xlabel(accelerometer['magnitudes']['x'] + ' [' + accelerometer['units']['x'] +']')
axes[0].set_ylabel(accelerometer['magnitudes']['y'] + ' [' + accelerometer['units']['y'] +']')
axes[1].plot(spectrum['signals']['x'], np.abs(spectrum['signals']['y']))
axes[1].set_xlabel(spectrum['magnitudes']['x'] + ' [' + spectrum['units']['x'] +']')
axes[1].set_ylabel(spectrum['magnitudes']['y'] + ' [' + spectrum['units']['y'] +']')
axes[2].plot(psd['signals']['x'], psd['signals']['y'])
axes[2].set_xlabel(psd['magnitudes']['x'] + ' [' + psd['units']['x'] +']')
axes[2].set_ylabel(psd['magnitudes']['y'] + ' [' + psd['units']['y'] +']')
axes[0].grid()
axes[1].grid()
axes[2].grid()
plt.tight_layout()
# -
|
Example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.embeddings import Embedding
from keras import backend as K
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
# ### Embedding
# **[embeddings.Embedding.0] input_dim 5, output_dim 3, input_length=7, mask_zero=False, dropout=0.**
# +
input_dim = 5
output_dim = 3
input_length = 7
data_in_shape = (input_length,)
emb = Embedding(input_dim, output_dim, input_length=input_length, mask_zero=False, dropout=0.)
layer_0 = Input(shape=data_in_shape)
layer_1 = emb(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1200 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = np.random.randint(0, input_dim - 1, data_in_shape)
print('')
print('in shape:', data_in_shape)
print('in:', data_in.ravel().tolist())
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
# -
# **[embeddings.Embedding.1] input_dim 20, output_dim 5, input_length=10, mask_zero=True, dropout=0.**
# +
input_dim = 20
output_dim = 5
input_length = 10
data_in_shape = (input_length,)
emb = Embedding(input_dim, output_dim, input_length=input_length, mask_zero=True, dropout=0.)
layer_0 = Input(shape=data_in_shape)
layer_1 = emb(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1210 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = np.random.randint(0, input_dim - 1, data_in_shape)
print('')
print('in shape:', data_in_shape)
print('in:', data_in.ravel().tolist())
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
# -
# **[embeddings.Embedding.2] input_dim 33, output_dim 2, input_length=5, mask_zero=False, dropout=0.5**
# +
input_dim = 33
output_dim = 2
input_length = 5
data_in_shape = (input_length,)
emb = Embedding(input_dim, output_dim, input_length=input_length, mask_zero=False, dropout=0.5)
layer_0 = Input(shape=data_in_shape)
layer_1 = emb(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1220 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = np.random.randint(0, input_dim - 1, data_in_shape)
print('')
print('in shape:', data_in_shape)
print('in:', data_in.ravel().tolist())
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
# -
|
notebooks/layers/embedding/Embedding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: solaris
# language: python
# name: python3
# ---
# # Tiling imagery and labels using the `solaris` Python API
#
# This tutorial will walk you through an example case of using the `solaris` Python API to tile one of the SpaceNet cities - in this case, Rio de Janeiro. We'll assume that you have already [installed solaris](https://solaris.readthedocs.io/en/master/installation.html).
#
# __First__, downloaded and extracted two files from the `spacenet-dataset` AWS S3 bucket:
#
# 1. Imagery: https://s3.amazonaws.com/spacenet-dataset/AOIs/AOI_1_Rio/PS-RGB/PS-RGB_mosaic_013022223133.tif
# 2. Vector labels: https://spacenet-dataset.s3.amazonaws.com/AOIs/AOI_1_Rio/srcData/buildingLabels/Rio_Buildings_Public_AOI_v2.geojson
#
# Move both of these files to your working directory or alter the paths below to point to the files at the downloaded location.
#
# As you're getting started, your directory should have the following in it:
#
# - A directory named 3band which contains the imagery files
# - A directory named geojson which contains two files: Rio_Buildings_Public_AOI_v2.geojson and Rio_OUTLINE_Public_AOI.geojson (we only need the first of those two).
#
# Feel free to open up the imagery/vector labels in QGIS or another browser and explore to see what you're looking at.
# ## Tiling the imagery
#
# For this working example we'll tile into 500-by-500-pixel chips beginning at the top left corner. Note that you can also tile based on the metric units covered by an image - for example, we could specify 250 meter-by-250 meter chips (which is the same size in this case). See the documentation for `sol.tile.raster_tile.RasterTiler()` for more details.
#
# Initialize the `RasterTiler` object:
# +
import solaris as sol
import os
raster_tiler = sol.tile.raster_tile.RasterTiler(dest_dir='rio_chips', # the directory to save images to
src_tile_size=(500, 500), # the size of the output chips
verbose=True)
# -
# This object can be re-used with the same parameters for multiple images if desired. This way, you can tile multiple images collected over the same geography with the same settings. There are additional arguments that you can provide (for example, the destination coordinate reference system).
#
# To tile the imagery, pass the image file to the tiler's `tile()` method, which returns the CRS of the source raster for vector tiling:
raster_bounds_crs = raster_tiler.tile('/Users/nweir/code/cosmiq_repos/solaris/PS-RGB_mosaic_013022223133.tif')
# This should throw a few warnings/errors about the input file, which you can ignore. You'll create 1600 files in your "rio_chips" subdirectory, one for each 500x500 tile. The filenames are in the format `[src-filename]\_[longitude]\_[latitude].tif`. Reprojection takes a while, so be patient.
#
# Once that process finishes, we'll use these auto-generated tile boundaries, which are stored in `raster_tiler`, to create vector tiles.
# These bounds are in the format `[left, bottom, right, top]` in the input file CRS. The following line prints the first set of bounds (there are 1600 in the list):
print(raster_tiler.tile_bounds[0])
# `raster_tiler.tile_bounds` is passed as an argument into the `VectorTiler` instance.
vector_tiler = sol.tile.vector_tile.VectorTiler(dest_dir='rio_labels',
verbose=True)
vector_tiler.tile('/Users/nweir/code/cosmiq_repos/solaris/Rio_Buildings_Public_AOI_v2.geojson',
tile_bounds=raster_tiler.tile_bounds,
tile_bounds_crs=raster_bounds_crs)
#
#
# ...And you're done! Simple as that. For more details, check out the [tiling API docs](https://solaris.readthedocs.io/en/master/api.html).
|
docs/tutorials/notebooks/api_tiling_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Example of Logistic regression
# ## Predict student admission based on exams result
# Data is taken from [<NAME>'s CS229 course on Machine Learning at Stanford](http://cs229.stanford.edu/).
import pandas as pd
data = pd.read_csv("datasets/ex2data1.txt", header=None,
names=['Exam1', 'Exam2', 'Admitted'])
data.head()
# Historical data from previous students: each student has two exams scores associated and the final admission result (1=yes, 0= no).
# Let's plot the points in a chart (green means admitted, red not admitted).
import matplotlib.pyplot as plt
# %matplotlib inline
colours = ['red' if i==0 else 'green' for i in data.Admitted]
fig,ax = plt.subplots()
ax.scatter(data.Exam1, data.Exam2, c=colours)
ax.grid(True)
ax.set_xlabel("Exam 1 score")
ax.set_ylabel("Exam 2 score")
fig.suptitle("Student admission vs. past two exams")
# If the score of the first or the second exam was too low, it might be not enough to be admitted. You need a good balance.
# Let's try to quantify it.
# ## The sigmoid function
# Logistic regression uses a special function to model how the probability of the event "Admitted" P(y=1) is affected by our variables (the exams score).
# This function is the sigmoid function:
# $$ g(z) = \frac{1}{1 + e^{-z}}$$
import numpy as np
def sigmoid(z):
"""
Compute the sigmoid function of each input value.
It uses numpy to leverage the vectorised format.
Argument:
z: matrix, vector or scalar (float)
Returns:
matrix, vector or float
"""
return 1 / (1 + np.exp(-z))
# Let's plot it:
x = np.arange(-10., 10., 0.2)
sig = sigmoid(x)
fig,ax = plt.subplots()
ax.plot(x,sig)
ax.grid(True)
ax.set_xlabel("x")
ax.set_ylabel("Sigmoid(x)")
fig.suptitle("The sigmoid function")
# Unit tests:
sigmoid(1)
sigmoid(np.array([2,3]))
# ## Logistic Response function: cost and gradient
# This is the logistic function to model our admission:
#
# $P(y=1) = \frac{1}{1 + e^{-(\beta_{0} + \beta_{1} \cdot x_{1} + ... + \beta_{n} \cdot x_{n}) }} $
# where y is the admission result (0 or 1) and x are the exams scores.
# We have in our example x1 and x2 (two exams).
# Our next step is to find the correct beta parameters for the model.
# And we will do it by using our historical data as a training set, like we did for the linear regression, using a gradient descent algorithm (see [the blog post](https://mashimo.wordpress.com/) for details).
# The algorithm will find the optimal beta parameters that **minimise** the cost. We need to define a function to calculate the cost and the gradient:
def getCostGradient(beta, X, y):
"""
Compute the cost of a particular choice of beta as the
parameter for logistic regression and the gradient of the cost
w.r.t. to the parameters.
Returns cost and gradient
Arguments:
beta: parameters, list
X: input data points, array
y : output data points, array
Returns:
float - the cost
array of float - the gradient (same dimension as beta parameters)
"""
# Initialize some useful values
y = np.squeeze(y) # this is to avoid broadcasting when element-wise multiply
m = len(y) # number of training examples
grad = np.zeros(beta.shape) # grad should have the same dimensions as beta
# Compute the partial derivatives and set grad to the partial
# derivatives of the cost w.r.t. each parameter in theta
h = sigmoid(np.dot(X, beta))
# J cost function
y0 = y * np.log(h)
y1 = (1 - y) * np.log(1 - h)
cost = -np.sum(y0 + y1) / m
# gradient
error = h - y
grad = np.dot(error, X) / m
return (cost, grad)
# Unit test:
getCostGradient(np.array([-1,0.2]), np.array([[1,34], [1,35]]), np.array([0,1]))
# ## Split data into X (training data) and y (target variable)
cols = data.shape[1]
cols
# add the intercept
data.insert(0, 'Ones', 1)
X = data.iloc[:,0:cols] # the first columns but the last are X
X = np.array(X.values)
y = data.iloc[:,cols:cols+1] # last column is the y
y = np.array(y.values)
initialBeta = np.zeros(cols) # could be random also
# what is the cost given these initial beta parameters?
getCostGradient(initialBeta, X, y)
# Initial cost is 0.69
# ## Fit the beta parameters
# To find the optimal beta parameters we use a highly tuned function (*minimize*) from the package *SciPy*.
# We need to provide the cost and the gradient function, the input data and which method to use (we use the classic Newton). The argument Jac=True tells that cost and gradient are together in the same function.
import scipy.optimize as opt
result = opt.minimize(fun = getCostGradient, x0 = initialBeta, args = (X, y),
method = 'Newton-CG',jac = True)
result.message
optimalBeta = result.x
# and here we have our final beta parameters:
optimalBeta
# $$P(y=1) = \frac{1}{1 + e^{25.17 - 0.21 \cdot x_{1} - 0.20 \cdot x_{2} }} $$
# ## Plot the decision boundary
# We can use these beta parameters to plot the decision boundary on the training data.
# We only need two points to plot a line, so we choose two endpoints: the min and the max among the X training data (we add a small margin of 2 to have a longer line in the plot, looks better).
plot_x = np.array([min(X[:,2])-2, max(X[:,2])+2])
plot_x
# The boundary lies where the P(y=1) = P(y=0) = 0.5
# which means that beta * X shall be zero
plot_y = (-1./optimalBeta[2]) * (optimalBeta[1] * plot_x + optimalBeta[0])
plot_y
fig,ax = plt.subplots()
ax.scatter(data.Exam1, data.Exam2, c=colours)
ax.plot(plot_x, plot_y)
ax.grid(True)
ax.set_xlabel("Exam 1 score")
ax.set_ylabel("Exam 2 score")
fig.suptitle("Student admission vs. past two exams")
# The blue line is our decision boundary: when your exams score lie below the line then probably (that is the prediction) you will not be admitted to University. If they lie above, probably you will.
# As you can see, the boundary is not predicting perfectly on the training historical data. It's a model. Not perfect but useful.
# What we can do is to measure its accuracy.
# ## Accuracy
def predict(beta, X):
probabilities = sigmoid(np.dot(X, beta))
return [1 if x >= 0.5 else 0 for x in probabilities]
predictions = predict(optimalBeta, X)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0))
else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy) )
# Just for fun, let's say that my scores are 40 in the first exam and 78 in the second one:
myExams = np.array([1., 40., 78.])
sigmoid(np.dot(myExams, optimalBeta))
# Uh oh, looks's like my probability to be admitted at University is only 23% ...
|
01-Regression/LogisticRegression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_p36
# language: python
# name: conda_pytorch_p36
# ---
# + [markdown] nbpresent={"id": "62d4851b-e85e-419e-901a-d5c03db59166"}
# # Population Segmentation with SageMaker
#
# In this notebook, you'll employ two, unsupervised learning algorithms to do **population segmentation**. Population segmentation aims to find natural groupings in population data that reveal some feature-level similarities between different regions in the US.
#
# Using **principal component analysis** (PCA) you will reduce the dimensionality of the original census data. Then, you'll use **k-means clustering** to assign each US county to a particular cluster based on where a county lies in component space. How each cluster is arranged in component space can tell you which US counties are most similar and what demographic traits define that similarity; this information is most often used to inform targeted, marketing campaigns that want to appeal to a specific group of people. This cluster information is also useful for learning more about a population by revealing patterns between regions that you otherwise may not have noticed.
#
# ### US Census Data
#
# You'll be using data collected by the [US Census](https://en.wikipedia.org/wiki/United_States_Census), which aims to count the US population, recording demographic traits about labor, age, population, and so on, for each county in the US. The bulk of this notebook was taken from an existing SageMaker example notebook and [blog post](https://aws.amazon.com/blogs/machine-learning/analyze-us-census-data-for-population-segmentation-using-amazon-sagemaker/), and I've broken it down further into demonstrations and exercises for you to complete.
#
# ### Machine Learning Workflow
#
# To implement population segmentation, you'll go through a number of steps:
# * Data loading and exploration
# * Data cleaning and pre-processing
# * Dimensionality reduction with PCA
# * Feature engineering and data transformation
# * Clustering transformed data with k-means
# * Extracting trained model attributes and visualizing k clusters
#
# These tasks make up a complete, machine learning workflow from data loading and cleaning to model deployment. Each exercise is designed to give you practice with part of the machine learning workflow, and to demonstrate how to use SageMaker tools, such as built-in data management with S3 and built-in algorithms.
#
# ---
# -
# First, import the relevant libraries into this SageMaker notebook.
# + nbpresent={"id": "41d6f28b-3c7e-4d68-a8cb-4e063ec6fe27"}
# data managing and display libs
import pandas as pd
import numpy as np
import os
import io
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
# -
# sagemaker libraries
import boto3
import sagemaker
# ## Loading the Data from Amazon S3
#
# This particular dataset is already in an Amazon S3 bucket; you can load the data by pointing to this bucket and getting a data file by name.
#
# > You can interact with S3 using a `boto3` client.
# boto3 client to get S3 data
s3_client = boto3.client('s3')
# S3 bucket name
bucket_name='aws-ml-blog-sagemaker-census-segmentation'
# Take a look at the contents of this bucket; get a list of objects that are contained within the bucket and print out the names of the objects. You should see that there is one file, 'Census_Data_for_SageMaker.csv'.
# +
# get a list of objects in the bucket
obj_list=s3_client.list_objects(Bucket=bucket_name)
# print object(s)in S3 bucket
files=[]
for contents in obj_list['Contents']:
files.append(contents['Key'])
print(files)
# +
# there is one file --> one key
file_name=files[0]
print(file_name)
# -
# Retrieve the data file from the bucket with a call to `client.get_object()`.
# +
# get an S3 object by passing in the bucket and file name
data_object = s3_client.get_object(Bucket=bucket_name, Key=file_name)
# what info does the object contain?
display(data_object)
# -
# information is in the "Body" of the object
data_body = data_object["Body"].read()
print('Data type: ', type(data_body))
# This is a `bytes` datatype, which you can read it in using [io.BytesIO(file)](https://docs.python.org/3/library/io.html#binary-i-o).
# + nbpresent={"id": "97a46770-dbe0-40ea-b454-b15bdec20f53"}
# read in bytes data
data_stream = io.BytesIO(data_body)
# create a dataframe
counties_df = pd.read_csv(data_stream, header=0, delimiter=",")
counties_df.head()
# + [markdown] nbpresent={"id": "c2f7177c-9a56-46a7-8e51-53c1ccdac759"}
# ## Exploratory Data Analysis (EDA)
#
# Now that you've loaded in the data, it is time to clean it up, explore it, and pre-process it. Data exploration is one of the most important parts of the machine learning workflow because it allows you to notice any initial patterns in data distribution and features that may inform how you proceed with modeling and clustering the data.
#
# ### EXERCISE: Explore data & drop any incomplete rows of data
#
# When you first explore the data, it is good to know what you are working with. How many data points and features are you starting with, and what kind of information can you get at a first glance? In this notebook, you're required to use complete data points to train a model. So, your first exercise will be to investigate the shape of this data and implement a simple, data cleaning step: dropping any incomplete rows of data.
#
# You should be able to answer the **question**: How many data points and features are in the original, provided dataset? (And how many points are left after dropping any incomplete rows?)
# +
# print out stats about data
# rows = data, cols = features
print('(orig) rows, cols: ', counties_df.shape)
# drop any incomplete data
clean_counties_df = counties_df.dropna(axis=0)
print('(clean) rows, cols: ', clean_counties_df.shape)
# + [markdown] nbpresent={"id": "fdd10c00-53ba-405d-8622-fbfeac17d3bb"}
# ### EXERCISE: Create a new DataFrame, indexed by 'State-County'
#
# Eventually, you'll want to feed these features into a machine learning model. Machine learning models need numerical data to learn from and not categorical data like strings (State, County). So, you'll reformat this data such that it is indexed by region and you'll also drop any features that are not useful for clustering.
#
# To complete this task, perform the following steps, using your *clean* DataFrame, generated above:
# 1. Combine the descriptive columns, 'State' and 'County', into one, new categorical column, 'State-County'.
# 2. Index the data by this unique State-County name.
# 3. After doing this, drop the old State and County columns and the CensusId column, which does not give us any meaningful demographic information.
#
# After completing this task, you should have a DataFrame with 'State-County' as the index, and 34 columns of numerical data for each county. You should get a resultant DataFrame that looks like the following (truncated for display purposes):
# ```
# TotalPop Men Women Hispanic ...
#
# Alabama-Autauga 55221 26745 28476 2.6 ...
# Alabama-Baldwin 195121 95314 99807 4.5 ...
# Alabama-Barbour 26932 14497 12435 4.6 ...
# ...
#
# ```
# -
# index data by 'State-County'
clean_counties_df.index=clean_counties_df['State'] + "-" + clean_counties_df['County']
clean_counties_df.head()
# drop the old State and County columns, and the CensusId column
# clean df should be modified or created anew
drop=["CensusId" , "State" , "County"]
clean_counties_df = clean_counties_df.drop(columns=drop)
clean_counties_df.head()
# Now, what features do you have to work with?
# features
features_list = clean_counties_df.columns.values
print('Features: \n', features_list)
# ## Visualizing the Data
#
# In general, you can see that features come in a variety of ranges, mostly percentages from 0-100, and counts that are integer values in a large range. Let's visualize the data in some of our feature columns and see what the distribution, over all counties, looks like.
#
# The below cell displays **histograms**, which show the distribution of data points over discrete feature ranges. The x-axis represents the different bins; each bin is defined by a specific range of values that a feature can take, say between the values 0-5 and 5-10, and so on. The y-axis is the frequency of occurrence or the number of county data points that fall into each bin. I find it helpful to use the y-axis values for relative comparisons between different features.
#
# Below, I'm plotting a histogram comparing methods of commuting to work over all of the counties. I just copied these feature names from the list of column names, printed above. I also know that all of these features are represented as percentages (%) in the original data, so the x-axes of these plots will be comparable.
# + nbpresent={"id": "7e847244-7b42-490f-8945-46e234a3af75"}
# transportation (to work)
transport_list = ['Drive', 'Carpool', 'Transit', 'Walk', 'OtherTransp']
n_bins = 50 # can decrease to get a wider bin (or vice versa)
for column_name in transport_list:
ax=plt.subplots(figsize=(6,3))
# get data by column_name and display a histogram
ax = plt.hist(clean_counties_df[column_name], bins=n_bins)
title="Histogram of " + column_name
plt.title(title, fontsize=12)
plt.show()
# -
# ### EXERCISE: Create histograms of your own
#
# Commute transportation method is just one category of features. If you take a look at the 34 features, you can see data on profession, race, income, and more. Display a set of histograms that interest you!
#
# +
# create a list of features that you want to compare or examine
# employment types
my_list = ['PrivateWork', 'PublicWork', 'SelfEmployed', 'FamilyWork', 'Unemployment']
n_bins = 30 # define n_bins
# histogram creation code is similar to above
for column_name in my_list:
ax=plt.subplots(figsize=(6,3))
# get data by column_name and display a histogram
ax = plt.hist(clean_counties_df[column_name], bins=n_bins)
title="Histogram of " + column_name
plt.title(title, fontsize=12)
plt.show()
# -
# ### EXERCISE: Normalize the data
#
# You need to standardize the scale of the numerical columns in order to consistently compare the values of different features. You can use a [MinMaxScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) to transform the numerical values so that they all fall between 0 and 1.
# +
# scale numerical features into a normalized range, 0-1
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler()
# store them in this dataframe
counties_scaled=pd.DataFrame(scaler.fit_transform(clean_counties_df.astype(float)))
# get same features and State-County indices
counties_scaled.columns=clean_counties_df.columns
counties_scaled.index=clean_counties_df.index
counties_scaled.head()
# -
counties_scaled.describe()
# ---
# # Data Modeling
#
#
# Now, the data is ready to be fed into a machine learning model!
#
# Each data point has 34 features, which means the data is 34-dimensional. Clustering algorithms rely on finding clusters in n-dimensional feature space. For higher dimensions, an algorithm like k-means has a difficult time figuring out which features are most important, and the result is, often, noisier clusters.
#
# Some dimensions are not as important as others. For example, if every county in our dataset has the same rate of unemployment, then that particular feature doesn’t give us any distinguishing information; it will not help t separate counties into different groups because its value doesn’t *vary* between counties.
#
# > Instead, we really want to find the features that help to separate and group data. We want to find features that cause the **most variance** in the dataset!
#
# So, before I cluster this data, I’ll want to take a dimensionality reduction step. My aim will be to form a smaller set of features that will better help to separate our data. The technique I’ll use is called PCA or **principal component analysis**
#
# ## Dimensionality Reduction
#
# PCA attempts to reduce the number of features within a dataset while retaining the “principal components”, which are defined as *weighted*, linear combinations of existing features that are designed to be linearly independent and account for the largest possible variability in the data! You can think of this method as taking many features and combining similar or redundant features together to form a new, smaller feature set.
#
# We can reduce dimensionality with the built-in SageMaker model for PCA.
# ### Roles and Buckets
#
# > To create a model, you'll first need to specify an IAM role, and to save the model attributes, you'll need to store them in an S3 bucket.
#
# The `get_execution_role` function retrieves the IAM role you created at the time you created your notebook instance. Roles are essentially used to manage permissions and you can read more about that [in this documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). For now, know that we have a FullAccess notebook, which allowed us to access and download the census data stored in S3.
#
# You must specify a bucket name for an S3 bucket in your account where you want SageMaker model parameters to be stored. Note that the bucket must be in the same region as this notebook. You can get a default S3 bucket, which automatically creates a bucket for you and in your region, by storing the current SageMaker session and calling `session.default_bucket()`.
# +
from sagemaker import get_execution_role
session = sagemaker.Session() # store the current SageMaker session
# get IAM role
role = get_execution_role()
print(role)
# -
# get default bucket
bucket_name = session.default_bucket()
print(bucket_name)
print()
# ## Define a PCA Model
#
# To create a PCA model, I'll use the built-in SageMaker resource. A SageMaker estimator requires a number of parameters to be specified; these define the type of training instance to use and the model hyperparameters. A PCA model requires the following constructor arguments:
#
# * role: The IAM role, which was specified, above.
# * train_instance_count: The number of training instances (typically, 1).
# * train_instance_type: The type of SageMaker instance for training.
# * num_components: An integer that defines the number of PCA components to produce.
# * sagemaker_session: The session used to train on SageMaker.
#
# Documentation on the PCA model can be found [here](http://sagemaker.readthedocs.io/en/latest/pca.html).
#
# Below, I first specify where to save the model training data, the `output_path`.
# +
# define location to store model artifacts
prefix = 'counties'
output_path='s3://{}/{}/'.format(bucket_name, prefix)
print('Training artifacts will be uploaded to: {}'.format(output_path))
# +
# define a PCA model
from sagemaker import PCA
# this is current features - 1
# you'll select only a portion of these to use, later
N_COMPONENTS=33
pca_SM = PCA(role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path=output_path, # specified, above
num_components=N_COMPONENTS,
sagemaker_session=session)
# -
# ### Convert data into a RecordSet format
#
# Next, prepare the data for a built-in model by converting the DataFrame to a numpy array of float values.
#
# The *record_set* function in the SageMaker PCA model converts a numpy array into a **RecordSet** format that is the required format for the training input data. This is a requirement for _all_ of SageMaker's built-in models. The use of this data type is one of the reasons that allows training of models within Amazon SageMaker to perform faster, especially for large datasets.
# +
# convert df to np array
train_data_np = counties_scaled.values.astype('float32')
# convert to RecordSet format
formatted_train_data = pca_SM.record_set(train_data_np)
# -
# ## Train the model
#
# Call the fit function on the PCA model, passing in our formatted, training data. This spins up a training instance to perform the training job.
#
# Note that it takes the longest to launch the specified training instance; the fitting itself doesn't take much time.
# +
# %%time
# train the PCA mode on the formatted data
pca_SM.fit(formatted_train_data)
# -
# ## Accessing the PCA Model Attributes
#
# After the model is trained, we can access the underlying model parameters.
#
# ### Unzip the Model Details
#
# Now that the training job is complete, you can find the job under **Jobs** in the **Training** subsection in the Amazon SageMaker console. You can find the job name listed in the training jobs. Use that job name in the following code to specify which model to examine.
#
# Model artifacts are stored in S3 as a TAR file; a compressed file in the output path we specified + 'output/model.tar.gz'. The artifacts stored here can be used to deploy a trained model.
# +
# Get the name of the training job, it's suggested that you copy-paste
# from the notebook or from a specific job in the AWS console
training_job_name='pca-2019-03-07-22-53-18-299'
# where the model is saved, by default
model_key = os.path.join(prefix, training_job_name, 'output/model.tar.gz')
print(model_key)
# download and unzip model
boto3.resource('s3').Bucket(bucket_name).download_file(model_key, 'model.tar.gz')
# unzipping as model_algo-1
os.system('tar -zxvf model.tar.gz')
os.system('unzip model_algo-1')
# -
# ### MXNet Array
#
# Many of the Amazon SageMaker algorithms use MXNet for computational speed, including PCA, and so the model artifacts are stored as an array. After the model is unzipped and decompressed, we can load the array using MXNet.
#
# You can take a look at the MXNet [documentation, here](https://aws.amazon.com/mxnet/).
# +
import mxnet as mx
# loading the unzipped artifacts
pca_model_params = mx.ndarray.load('model_algo-1')
# what are the params
print(pca_model_params)
# -
# ## PCA Model Attributes
#
# Three types of model attributes are contained within the PCA model.
#
# * **mean**: The mean that was subtracted from a component in order to center it.
# * **v**: The makeup of the principal components; (same as ‘components_’ in an sklearn PCA model).
# * **s**: The singular values of the components for the PCA transformation. This does not exactly give the % variance from the original feature space, but can give the % variance from the projected feature space.
#
# We are only interested in v and s.
#
# From s, we can get an approximation of the data variance that is covered in the first `n` principal components. The approximate explained variance is given by the formula: the sum of squared s values for all top n components over the sum over squared s values for _all_ components:
#
# \begin{equation*}
# \frac{\sum_{n}^{ } s_n^2}{\sum s^2}
# \end{equation*}
#
# From v, we can learn more about the combinations of original features that make up each principal component.
#
# get selected params
s=pd.DataFrame(pca_model_params['s'].asnumpy())
v=pd.DataFrame(pca_model_params['v'].asnumpy())
# ## Data Variance
#
# Our current PCA model creates 33 principal components, but when we create new dimensionality-reduced training data, we'll only select a few, top n components to use. To decide how many top components to include, it's helpful to look at how much **data variance** the components capture. For our original, high-dimensional data, 34 features captured 100% of our data variance. If we discard some of these higher dimensions, we will lower the amount of variance we can capture.
#
# ### Tradeoff: dimensionality vs. data variance
#
# As an illustrative example, say we have original data in three dimensions. So, three dimensions capture 100% of our data variance; these dimensions cover the entire spread of our data. The below images are taken from the PhD thesis, [“Approaches to analyse and interpret biological profile data”](https://publishup.uni-potsdam.de/opus4-ubp/frontdoor/index/index/docId/696) by <NAME>, (2006, University of Potsdam, Germany).
#
# <img src='notebook_ims/3d_original_data.png' width=35% />
#
# Now, you may also note that most of this data seems related; it falls close to a 2D plane, and just by looking at the spread of the data, we can visualize that the original, three dimensions have some correlation. So, we can instead choose to create two new dimensions, made up of linear combinations of the original, three dimensions. These dimensions are represented by the two axes/lines, centered in the data.
#
# <img src='notebook_ims/pca_2d_dim_reduction.png' width=70% />
#
# If we project this in a new, 2D space, we can see that we still capture most of the original data variance using *just* two dimensions. There is a tradeoff between the amount of variance we can capture and the number of component-dimensions we use to represent our data.
#
# When we select the top n components to use in a new data model, we'll typically want to include enough components to capture about 80-90% of the original data variance. In this project, we are looking at generalizing over a lot of data and we'll aim for about 80% coverage.
# **Note**: The _top_ principal components, with the largest s values, are actually at the end of the s DataFrame. Let's print out the s values for the top n, principal components.
# +
# looking at top 5 components
n_principal_components = 5
start_idx = N_COMPONENTS - n_principal_components # 33-n
# print a selection of s
print(s.iloc[start_idx:, :])
# -
# ### EXERCISE: Calculate the explained variance
#
# In creating new training data, you'll want to choose the top n principal components that account for at least 80% data variance.
#
# Complete a function, `explained_variance` that takes in the entire array `s` and a number of top principal components to consider. Then return the approximate, explained variance for those top n components.
#
# For example, to calculate the explained variance for the top 5 components, calculate s squared for *each* of the top 5 components, add those up and normalize by the sum of *all* squared s values, according to this formula:
#
# \begin{equation*}
# \frac{\sum_{5}^{ } s_n^2}{\sum s^2}
# \end{equation*}
#
# > Using this function, you should be able to answer the **question**: What is the smallest number of principal components that captures at least 80% of the total variance in the dataset?
# Calculate the explained variance for the top n principal components
# you may assume you have access to the global var N_COMPONENTS
def explained_variance(s, n_top_components):
'''Calculates the approx. data variance that n_top_components captures.
:param s: A dataframe of singular values for top components;
the top value is in the last row.
:param n_top_components: An integer, the number of top components to use.
:return: The expected data variance covered by the n_top_components.'''
start_idx = N_COMPONENTS - n_top_components ## 33-3 = 30, for example
# calculate approx variance
exp_variance = np.square(s.iloc[start_idx:,:]).sum()/np.square(s).sum()
return exp_variance[0]
# ### Test Cell
#
# Test out your own code by seeing how it responds to different inputs; does it return a reasonable value for the single, top component? What about for the top 5 components?
# +
# test cell
n_top_components = 7 # select a value for the number of top components
# calculate the explained variance
exp_variance = explained_variance(s, n_top_components)
print('Explained variance: ', exp_variance)
# -
# As an example, you should see that the top principal component accounts for about 32% of our data variance! Next, you may be wondering what makes up this (and other components); what linear combination of features make these components so influential in describing the spread of our data?
#
# Below, let's take a look at our original features and use that as a reference.
# features
features_list = counties_scaled.columns.values
print('Features: \n', features_list)
# ## Component Makeup
#
# We can now examine the makeup of each PCA component based on **the weightings of the original features that are included in the component**. The following code shows the feature-level makeup of the first component.
#
# Note that the components are again ordered from smallest to largest and so I am getting the correct rows by calling N_COMPONENTS-1 to get the top, 1, component.
# +
import seaborn as sns
def display_component(v, features_list, component_num, n_weights=10):
# get index of component (last row - component_num)
row_idx = N_COMPONENTS-component_num
# get the list of weights from a row in v, dataframe
v_1_row = v.iloc[:, row_idx]
v_1 = np.squeeze(v_1_row.values)
# match weights to features in counties_scaled dataframe, using list comporehension
comps = pd.DataFrame(list(zip(v_1, features_list)),
columns=['weights', 'features'])
# we'll want to sort by the largest n_weights
# weights can be neg/pos and we'll sort by magnitude
comps['abs_weights']=comps['weights'].apply(lambda x: np.abs(x))
sorted_weight_data = comps.sort_values('abs_weights', ascending=False).head(n_weights)
# display using seaborn
ax=plt.subplots(figsize=(10,6))
ax=sns.barplot(data=sorted_weight_data,
x="weights",
y="features",
palette="Blues_d")
ax.set_title("PCA Component Makeup, Component #" + str(component_num))
plt.show()
# -
# display makeup of first component
num=2
display_component(v, counties_scaled.columns.values, component_num=num, n_weights=10)
# # Deploying the PCA Model
#
# We can now deploy this model and use it to make "predictions". Instead of seeing what happens with some test data, we'll actually want to pass our training data into the deployed endpoint to create principal components for each data point.
#
# Run the cell below to deploy/host this model on an instance_type that we specify.
# %%time
# this takes a little while, around 7mins
pca_predictor = pca_SM.deploy(initial_instance_count=1,
instance_type='ml.t2.medium')
# We can pass the original, numpy dataset to the model and transform the data using the model we created. Then we can take the largest n components to reduce the dimensionality of our data.
# pass np train data to the PCA model
train_pca = pca_predictor.predict(train_data_np)
# check out the first item in the produced training features
data_idx = 0
print(train_pca[data_idx])
# ### EXERCISE: Create a transformed DataFrame
#
# For each of our data points, get the top n component values from the list of component data points, returned by our predictor above, and put those into a new DataFrame.
#
# You should end up with a DataFrame that looks something like the following:
# ```
# c_1 c_2 c_3 c_4 c_5 ...
# Alabama-Autauga -0.060274 0.160527 -0.088356 0.120480 -0.010824 ...
# Alabama-Baldwin -0.149684 0.185969 -0.145743 -0.023092 -0.068677 ...
# Alabama-Barbour 0.506202 0.296662 0.146258 0.297829 0.093111 ...
# ...
# ```
# create dimensionality-reduced data
def create_transformed_df(train_pca, counties_scaled, n_top_components):
''' Return a dataframe of data points with component features.
The dataframe should be indexed by State-County and contain component values.
:param train_pca: A list of pca training data, returned by a PCA model.
:param counties_scaled: A dataframe of normalized, original features.
:param n_top_components: An integer, the number of top components to use.
:return: A dataframe, indexed by State-County, with n_top_component values as columns.
'''
# create new dataframe to add data to
counties_transformed=pd.DataFrame()
# for each of our new, transformed data points
# append the component values to the dataframe
for data in train_pca:
# get component values for each data point
components=data.label['projection'].float32_tensor.values
counties_transformed=counties_transformed.append([list(components)])
# index by county, just like counties_scaled
counties_transformed.index=counties_scaled.index
# keep only the top n components
start_idx = N_COMPONENTS - n_top_components
counties_transformed = counties_transformed.iloc[:,start_idx:]
# reverse columns, component order
return counties_transformed.iloc[:, ::-1]
# Now we can create a dataset where each county is described by the top n principle components that we analyzed earlier. Each of these components is a linear combination of the original feature space. We can interpret each of these components by analyzing the makeup of the component, shown previously.
# +
# specify top n
top_n = 7
# call your function and create a new dataframe
counties_transformed = create_transformed_df(train_pca, counties_scaled, n_top_components=top_n)
# add descriptive columns
PCA_list=['c_1', 'c_2', 'c_3', 'c_4', 'c_5', 'c_6', 'c_7']
counties_transformed.columns=PCA_list
# print result
counties_transformed.head()
# -
# ### Delete the Endpoint!
#
# Now that we've deployed the model and created our new, transformed training data, we no longer need the PCA endpoint.
#
# As a clean up step, you should always delete your endpoints after you are done using them (and if you do not plan to deploy them to a website, for example).
# delete predictor endpoint
session.delete_endpoint(pca_predictor.endpoint)
# ---
# # Population Segmentation
#
# Now, you’ll use the unsupervised clustering algorithm, k-means, to segment counties using their PCA attributes, which are in the transformed DataFrame we just created. K-means is a clustering algorithm that identifies clusters of similar data points based on their component makeup. Since we have ~3000 counties and 34 attributes in the original dataset, the large feature space may have made it difficult to cluster the counties effectively. Instead, we have reduced the feature space to 7 PCA components, and we’ll cluster on this transformed dataset.
# ### EXERCISE: Define a k-means model
#
# Your task will be to instantiate a k-means model. A `KMeans` estimator requires a number of parameters to be instantiated, which allow us to specify the type of training instance to use, and the model hyperparameters.
#
# You can read about the required parameters, in the [`KMeans` documentation](https://sagemaker.readthedocs.io/en/stable/kmeans.html); note that not all of the possible parameters are required.
#
# ### Choosing a "Good" K
#
# One method for choosing a "good" k, is to choose based on empirical data. A bad k would be one so *high* that only one or two very close data points are near it, and another bad k would be one so *low* that data points are really far away from the centers.
#
# You want to select a k such that data points in a single cluster are close together but that there are enough clusters to effectively separate the data. You can approximate this separation by measuring how close your data points are to each cluster center; the average centroid distance between cluster points and a centroid. After trying several values for k, the centroid distance typically reaches some "elbow"; it stops decreasing at a sharp rate and this indicates a good value of k. The graph below indicates the average centroid distance for value of k between 5 and 12.
#
# <img src='notebook_ims/elbow_graph.png' width=50% />
#
# A distance elbow can be seen around 8 when the distance starts to increase and then decrease at a slower rate. This indicates that there is enough separation to distinguish the data points in each cluster, but also that you included enough clusters so that the data points aren’t *extremely* far away from each cluster.
# +
# define a KMeans estimator
from sagemaker import KMeans
NUM_CLUSTERS = 8
kmeans = KMeans(role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path=output_path, # using the same output path as was defined, earlier
k=NUM_CLUSTERS)
# -
# ### EXERCISE: Create formatted, k-means training data
#
# Just as before, you should convert the `counties_transformed` df into a numpy array and then into a RecordSet. This is the required format for passing training data into a `KMeans` model.
# convert the transformed dataframe into record_set data
kmeans_train_data_np = counties_transformed.values.astype('float32')
kmeans_formatted_data = kmeans.record_set(kmeans_train_data_np)
# ### EXERCISE: Train the k-means model
#
# Pass in the formatted training data and train the k-means model.
# %%time
# train kmeans
kmeans.fit(kmeans_formatted_data)
# ### EXERCISE: Deploy the k-means model
#
# Deploy the trained model to create a `kmeans_predictor`.
#
# %%time
# deploy the model to create a predictor
kmeans_predictor = kmeans.deploy(initial_instance_count=1,
instance_type='ml.t2.medium')
# ### EXERCISE: Pass in the training data and assign predicted cluster labels
#
# After deploying the model, you can pass in the k-means training data, as a numpy array, and get resultant, predicted cluster labels for each data point.
# get the predicted clusters for all the kmeans training data
cluster_info=kmeans_predictor.predict(kmeans_train_data_np)
# ## Exploring the resultant clusters
#
# The resulting predictions should give you information about the cluster that each data point belongs to.
#
# You should be able to answer the **question**: which cluster does a given data point belong to?
# +
# print cluster info for first data point
data_idx = 0
print('County is: ', counties_transformed.index[data_idx])
print()
print(cluster_info[data_idx])
# -
# ### Visualize the distribution of data over clusters
#
# Get the cluster labels for each of our data points (counties) and visualize the distribution of points over each cluster.
# get all cluster labels
cluster_labels = [c.label['closest_cluster'].float32_tensor.values[0] for c in cluster_info]
# +
# count up the points in each cluster
cluster_df = pd.DataFrame(cluster_labels)[0].value_counts()
print(cluster_df)
# +
# another method of visualizing the distribution
# display a histogram of cluster counts
ax =plt.subplots(figsize=(6,3))
ax = plt.hist(cluster_labels, bins=8, range=(-0.5, 7.5), color='blue', rwidth=0.5)
title="Histogram of Cluster Counts"
plt.title(title, fontsize=12)
plt.show()
# -
# Now, you may be wondering, what do each of these clusters tell us about these data points? To improve explainability, we need to access the underlying model to get the cluster centers. These centers will help describe which features characterize each cluster.
# ### Delete the Endpoint!
#
# Now that you've deployed the k-means model and extracted the cluster labels for each data point, you no longer need the k-means endpoint.
# delete kmeans endpoint
session.delete_endpoint(kmeans_predictor.endpoint)
# ---
# # Model Attributes & Explainability
#
# Explaining the result of the modeling is an important step in making use of our analysis. By combining PCA and k-means, and the information contained in the model attributes within a SageMaker trained model, you can learn about a population and remark on some patterns you've found, based on the data.
# ### EXERCISE: Access the k-means model attributes
#
# Extract the k-means model attributes from where they are saved as a TAR file in an S3 bucket.
#
# You'll need to access the model by the k-means training job name, and then unzip the file into `model_algo-1`. Then you can load that file using MXNet, as before.
# +
# download and unzip the kmeans model file
kmeans_job_name = 'kmeans-2019-03-08-00-37-22-788'
model_key = os.path.join(prefix, kmeans_job_name, 'output/model.tar.gz')
# download the model file
boto3.resource('s3').Bucket(bucket_name).download_file(model_key, 'model.tar.gz')
os.system('tar -zxvf model.tar.gz')
os.system('unzip model_algo-1')
# +
# get the trained kmeans params using mxnet
kmeans_model_params = mx.ndarray.load('model_algo-1')
print(kmeans_model_params)
# -
# There is only 1 set of model parameters contained within the k-means model: the cluster centroid locations in PCA-transformed, component space.
#
# * **centroids**: The location of the centers of each cluster in component space, identified by the k-means algorithm.
#
# +
# get all the centroids
cluster_centroids=pd.DataFrame(kmeans_model_params[0].asnumpy())
cluster_centroids.columns=counties_transformed.columns
display(cluster_centroids)
# -
# ### Visualizing Centroids in Component Space
#
# You can't visualize 7-dimensional centroids in space, but you can plot a heatmap of the centroids and their location in the transformed feature space.
#
# This gives you insight into what characteristics define each cluster. Often with unsupervised learning, results are hard to interpret. This is one way to make use of the results of PCA + clustering techniques, together. Since you were able to examine the makeup of each PCA component, you can understand what each centroid represents in terms of the PCA components.
# generate a heatmap in component space, using the seaborn library
plt.figure(figsize = (12,9))
ax = sns.heatmap(cluster_centroids.T, cmap = 'YlGnBu')
ax.set_xlabel("Cluster")
plt.yticks(fontsize = 16)
plt.xticks(fontsize = 16)
ax.set_title("Attribute Value by Centroid")
plt.show()
# If you've forgotten what each component corresponds to at an original-feature-level, that's okay! You can use the previously defined `display_component` function to see the feature-level makeup.
# what do each of these components mean again?
# let's use the display function, from above
component_num=4
display_component(v, counties_scaled.columns.values, component_num=component_num)
# ### Natural Groupings
#
# You can also map the cluster labels back to each individual county and examine which counties are naturally grouped together.
# +
# add a 'labels' column to the dataframe
counties_transformed['labels']=list(map(int, cluster_labels))
# sort by cluster label 0-6
sorted_counties = counties_transformed.sort_values('labels', ascending=True)
# view some pts in cluster 0
sorted_counties.head(20)
# -
# You can also examine one of the clusters in more detail, like cluster 1, for example. A quick glance at the location of the centroid in component space (the heatmap) tells us that it has the highest value for the `comp_6` attribute. You can now see which counties fit that description.
# get all counties with label == 1
cluster=counties_transformed[counties_transformed['labels']==1]
cluster.head()
# ## Final Cleanup!
#
# * Double check that you have deleted all your endpoints.
# * I'd also suggest manually deleting your S3 bucket, models, and endpoint configurations directly from your AWS console.
#
# You can find thorough cleanup instructions, [in the documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html).
# ---
# # Conclusion
#
# You have just walked through a machine learning workflow for unsupervised learning, specifically, for clustering a dataset using k-means after reducing the dimensionality using PCA. By accessing the underlying models created within SageMaker, you were able to improve the explainability of your model and draw insights from the resultant clusters.
#
# Using these techniques, you have been able to better understand the essential characteristics of different counties in the US and segment them into similar groups, accordingly.
|
Population_Segmentation/Pop_Segmentation_Solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#import jax.numpy as np
#from jax import pmap
import numpy as np
from maxnorm.maxnorm_completion import *
from maxnorm.tenalg import *
from maxnorm.graphs import *
import sparse
from itertools import product
import networkx as nx
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# %load_ext autoreload
# -
# create random, low-rank tensor
t = 5
n = 30
r = 3
#delta = 0.1
const = 24
sigma = 0.005
ndata = const * r * t * n * np.log10(n)
U = kr_random(n, t, r, rvs='unif')
U = kr_rescale(U, np.sqrt(n**t), 'hs')
norm_true = np.sqrt(kr_dot(U, U) / n**t)
#U = [np.random.randn(n, r) for i in range(t)]
print("n data: %.2e" % ndata)
print("n data ** t/2: %.2e" % int(const * r * n**(t/2) * np.log10(n)))
print("true norm: %.2e" % norm_true)
qnorm_true = max_qnorm_ub(U)
print(qnorm_true)
print(r**(t/2))
expander = nx.random_regular_graph(6, n)
#expander = nx.chordal_cycle_graph(n)
observation_mask = obs_mask_expander(expander, t)
#observation_mask = obs_mask_iid(tuple([n for i in range(t)]), ndata * n**(-t))
# +
from run_sweep_iid import generate_data
data = generate_data(observation_mask, U, sigma)
clean_data_rmse = np.sqrt(loss(U, data) / data.nnz)
print(data.nnz)
print(ndata)
print(n**t)
print("%0.1e%%" % (float(data.nnz) / n**t * 100))
# -
np.sqrt(data.sum()**2 / data.nnz)
data.coords
# +
delta = 1.5 * sigma
#np.sqrt(np.sum((data.data - clean_data)**2)) * 2 / np.sqrt(data.nnz) * 1.5
print("rms of data: %f" % clean_data_rmse)
print("delta parameter: %f" % delta)
print("rmse of U true: %f" % np.sqrt(loss(U, data) / data.nnz))
# +
# from tensorly.contrib.sparse.decomposition import parafac
# mask = data != 0
# core, factors = parafac(data, r, mask=mask, init='random', verbose=True, tol=1e-3, n_iter_max=8)
# scale_mat = np.diag(core.todense()**(1/t))
# U = [factors[i].todense() @ scale_mat for i in range(t)]
#Unew = [np.hstack((U, np.random.randn(n,r**(t-1)))) for U in Unew2]
# -
# %autoreload
Unew1, cost_arr = tensor_completion_alt_min(data, 2 * r**t,
#sgd=True, sgd_batch_size=2000,
#U0 = Unew2,
init='svdrand',
tol=1e-10, max_iter=10*n, verbosity=2)
# +
def print_factor_norms(U):
print("fro: " + str([np.linalg.norm(Ui,'fro')**2 for Ui in U]))
print("2-inf: " + str([np.max(np.linalg.norm(Ui, axis=1)) for Ui in U]))
print_factor_norms(kr_balance_factors(U))
# -
12**4
#Uinit = kr_rescale(Unew1, np.sqrt(np.product(data.shape) * data.sum() ** 2 / data.nnz), 'hs')
Uinit = kr_balance_factors(Unew1)
[np.linalg.norm(Ui, axis=1) for Ui in Uinit]
# %autoreload
Unew2, cost_arr = tensor_completion_maxnorm(data, 4 * r**t, 0.1 * np.sqrt(data.nnz), epsilon=1e-2,
#sgd=True, sgd_batch_size=2000,
U0 = kr_balance_factors(Unew1),
init='svdrand',
kappa=100, beta=1,
tol=1e-10, inner_tol=1e-12, max_iter=10*n, inner_max_iter=10,
verbosity=2, inner_line_iter=40,
rebalance=True)
qnorm_max = max_qnorm_ub(Unew2)
print(qnorm_max)
Unew1
expander_eigs = np.abs(nx.linalg.adjacency_spectrum(expander))
expander_eigs.sort()
print(expander_eigs[-10:])
bound = 2**(2*t-4) * 1.8**(t-1) * (qnorm_max + qnorm_true)**2 * ((1 + expander_eigs[-2]/expander_eigs[-1])**(t-1) - 1)
print(bound)
print(bound / n**t)
#print("loss of U parafac: %f" % np.sqrt(loss(U,data) / data.nnz))
print("loss of U true: %.3e" % np.sqrt(loss(U, data) / data.nnz))
#print("loss of U alt-min: %.3e" % np.sqrt(loss(Unew1, data) / data.nnz))
print("loss of U max: %.3e" % np.sqrt(loss(Unew2, data) / data.nnz))
# +
import matplotlib.pyplot as plt
plt.semilogy(cost_arr)
plt.xlabel('iterate')
plt.ylabel('cost')
# -
# ## Generalization error
# +
def gen_err(Upred, Utrue):
norm_true = kr_dot(Utrue, Utrue)
mse_gen = kr_dot(Upred, Upred) + norm_true - 2 * kr_dot(Upred, Utrue)
return np.sqrt(mse_gen / norm_true)
def mse_gen_err(Upred, Utrue):
norm_true = kr_dot(Utrue, Utrue)
mse_gen = kr_dot(Upred, Upred) + norm_true - 2 * kr_dot(Upred, Utrue)
return np.sqrt(mse_gen / Upred[0].shape[0] ** len(Upred))
print("relative RMSE max: %1.4e" % gen_err(Unew2, U))
print("MSE max: %1.4e" % mse_gen_err(Unew2, U))
# +
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, axs = plt.subplots(1,t, figsize=(6, 20))
for i in range(t):
im = axs[i].imshow(Unew2[i])
divider = make_axes_locatable(axs[i])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
if i > 0:
axs[i].set_yticks([])
# +
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, axs = plt.subplots(1,t, figsize=(6, 20))
for i in range(t):
im = axs[i].imshow(U[i])
divider = make_axes_locatable(axs[i])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
if i > 0:
axs[i].set_yticks([])
# -
|
Test tensor completion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Linear Algebra
#
# This is a tutorial designed to introduce you to the basics of linear algebra.
# Linear algebra is a branch of mathematics dedicated to studying the properties of matrices and vectors,
# which are used extensively in quantum computing to represent quantum states and operations on them.
# This tutorial doesn't come close to covering the full breadth of the topic, but it should be enough to get you comfortable with the main concepts of linear algebra used in quantum computing.
#
# This tutorial assumes familiarity with complex numbers; if you need a review of this topic, we recommend that you complete the [Complex Arithmetic](../ComplexArithmetic/ComplexArithmetic.ipynb) tutorial before tackling this one.
#
# This tutorial covers the following topics:
# * Matrices and vectors
# * Basic matrix operations
# * Operations and properties of complex matrices
# * Inner and outer vector products
# * Tensor product
# * Eigenvalues and eigenvectors
#
# If you need to look up some formulas quickly, you can find them in [this cheatsheet](https://github.com/microsoft/QuantumKatas/blob/master/quickref/qsharp-quick-reference.pdf).
# This notebook has several tasks that require you to write Python code to test your understanding of the concepts. If you are not familiar with Python, [here](https://docs.python.org/3/tutorial/index.html) is a good introductory tutorial for it.
#
# > The exercises use Python's built-in representation of complex numbers. Most of the operations (addition, multiplication, etc.) work as you expect them to. Here are a few notes on Python-specific syntax:
# >
# > * If `z` is a complex number, `z.real` is the real component, and `z.imag` is the coefficient of the imaginary component.
# > * To represent an imaginary number, put `j` after a real number: $3.14i$ would be `3.14j`.
# > * To represent a complex number, simply add a real number and an imaginary number.
# > * The built-in function `abs` computes the modulus of a complex number.
# >
# > You can find more information in the [official documentation](https://docs.python.org/3/library/cmath.html).
#
# Let's start by importing some useful mathematical functions and constants, and setting up a few things necessary for testing the exercises. **Do not skip this step.**
#
# Click the cell with code below this block of text and press `Ctrl+Enter` (`⌘+Enter` on Mac).
# +
# Run this cell using Ctrl+Enter (⌘+Enter on Mac).
from testing import exercise, create_empty_matrix
from typing import List
import math, cmath
Matrix = List[List[complex]]
# -
# # Part I. Matrices and Basic Operations
#
# ## Matrices and Vectors
#
# A **matrix** is set of numbers arranged in a rectangular grid. Here is a $2$ by $2$ matrix:
#
# $$A =
# \begin{bmatrix} 1 & 2 \\ 3 & 4 \end{bmatrix}$$
#
# $A_{i,j}$ refers to the element in row $i$ and column $j$ of matrix $A$ (all indices are 0-based). In the above example, $A_{0,1} = 2$.
#
# An $n \times m$ matrix will have $n$ rows and $m$ columns, like so:
#
# $$\begin{bmatrix}
# x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
# x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
# \end{bmatrix}$$
#
# A $1 \times 1$ matrix is equivalent to a scalar:
#
# $$\begin{bmatrix} 3 \end{bmatrix} = 3$$
#
# Quantum computing uses complex-valued matrices: the elements of a matrix can be complex numbers. This, for example, is a valid complex-valued matrix:
#
# $$\begin{bmatrix}
# 1 & i \\
# -2i & 3 + 4i
# \end{bmatrix}$$
#
# Finally, a **vector** is an $n \times 1$ matrix. Here, for example, is a $3 \times 1$ vector:
#
# $$V = \begin{bmatrix} 1 \\ 2i \\ 3 + 4i \end{bmatrix}$$
#
# Since vectors always have a width of $1$, vector elements are sometimes written using only one index. In the above example, $V_0 = 1$ and $V_1 = 2i$.
# ## Matrix Addition
#
# The easiest matrix operation is **matrix addition**. Matrix addition works between two matrices of the same size, and adds each number from the first matrix to the number in the same position in the second matrix:
#
# $$\begin{bmatrix}
# x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
# x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
# \end{bmatrix}
# +
# \begin{bmatrix}
# y_{0,0} & y_{0,1} & \dotsb & y_{0,m-1} \\
# y_{1,0} & y_{1,1} & \dotsb & y_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# y_{n-1,0} & y_{n-1,1} & \dotsb & y_{n-1,m-1}
# \end{bmatrix}
# =
# \begin{bmatrix}
# x_{0,0} + y_{0,0} & x_{0,1} + y_{0,1} & \dotsb & x_{0,m-1} + y_{0,m-1} \\
# x_{1,0} + y_{1,0} & x_{1,1} + y_{1,1} & \dotsb & x_{1,m-1} + y_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{n-1,0} + y_{n-1,0} & x_{n-1,1} + y_{n-1,1} & \dotsb & x_{n-1,m-1} + y_{n-1,m-1}
# \end{bmatrix}$$
#
# Similarly, we can compute $A - B$ by subtracting elements of $B$ from corresponding elements of $A$.
#
# Matrix addition has the following properties:
#
# * Commutativity: $A + B = B + A$
# * Associativity: $(A + B) + C = A + (B + C)$
# ### <span style="color:blue">Exercise 1</span>: Matrix addition.
#
# **Inputs:**
#
# 1. An $n \times m$ matrix $A$, represented as a two-dimensional list.
# 2. An $n \times m$ matrix $B$, represented as a two-dimensional list.
#
# **Output:** Return the sum of the matrices $A + B$ - an $n \times m$ matrix, represented as a two-dimensional list.
#
# > When representing matrices as lists, each sub-list represents a row.
# >
# > For example, list `[[1, 2], [3, 4]]` represents the following matrix:
# >
# > $$\begin{bmatrix}
# 1 & 2 \\
# 3 & 4
# \end{bmatrix}$$
#
# Fill in the missing code and run the cell below to test your work.
#
# <br/>
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=WR9qCSXJlyY">here</a>.
# </details>
@exercise
def matrix_add(a : Matrix, b : Matrix) -> Matrix:
# You can get the size of a matrix like this:
rows = len(a)
columns = len(a[0])
# You can use the following function to initialize a rows×columns matrix filled with 0s to store your answer
c = create_empty_matrix(rows, columns)
# You can use a for loop to execute its body several times;
# in this loop variable i will take on each value from 0 to n-1, inclusive
for i in range(rows):
# Loops can be nested
for j in range(columns):
# You can access elements of a matrix like this:
x = a[i][j]
y = b[i][j]
# You can modify the elements of a matrix like this:
c[i][j] = x + y
return c
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-1:-Matrix-addition.).*
# ## Scalar Multiplication
#
# The next matrix operation is **scalar multiplication** - multiplying the entire matrix by a scalar (real or complex number):
#
# $$a \cdot
# \begin{bmatrix}
# x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
# x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
# \end{bmatrix}
# =
# \begin{bmatrix}
# a \cdot x_{0,0} & a \cdot x_{0,1} & \dotsb & a \cdot x_{0,m-1} \\
# a \cdot x_{1,0} & a \cdot x_{1,1} & \dotsb & a \cdot x_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# a \cdot x_{n-1,0} & a \cdot x_{n-1,1} & \dotsb & a \cdot x_{n-1,m-1}
# \end{bmatrix}$$
#
# Scalar multiplication has the following properties:
#
# * Associativity: $x \cdot (yA) = (x \cdot y)A$
# * Distributivity over matrix addition: $x(A + B) = xA + xB$
# * Distributivity over scalar addition: $(x + y)A = xA + yA$
# ### <span style="color:blue">Exercise 2</span>: Scalar multiplication.
#
# **Inputs:**
#
# 1. A scalar $x$.
# 2. An $n \times m$ matrix $A$.
#
# **Output:** Return the $n \times m$ matrix $x \cdot A$.
#
# <br/>
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=TbaltFbJ3wE">here</a>.
# </details>
@exercise
def scalar_mult(x : complex, a : Matrix) -> Matrix:
# Fill in the missing code and run the cell to check your work.
return [[x * elem for elem in row] for row in a]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-2:-Scalar-multiplication.).*
# ## Matrix Multiplication
#
# **Matrix multiplication** is a very important and somewhat unusual operation. The unusual thing about it is that neither its operands nor its output are the same size: an $n \times m$ matrix multiplied by an $m \times k$ matrix results in an $n \times k$ matrix.
# That is, for matrix multiplication to be applicable, the number of columns in the first matrix must equal the number of rows in the second matrix.
#
# Here is how matrix product is calculated: if we are calculating $AB = C$, then
#
# $$C_{i,j} = A_{i,0} \cdot B_{0,j} + A_{i,1} \cdot B_{1,j} + \dotsb + A_{i,m-1} \cdot B_{m-1,j} = \sum_{t = 0}^{m-1} A_{i,t} \cdot B_{t,j}$$
#
# Here is a small example:
#
# $$\begin{bmatrix}
# \color{blue} 1 & \color{blue} 2 & \color{blue} 3 \\
# \color{red} 4 & \color{red} 5 & \color{red} 6
# \end{bmatrix}
# \begin{bmatrix}
# 1 \\
# 2 \\
# 3
# \end{bmatrix}
# =
# \begin{bmatrix}
# (\color{blue} 1 \cdot 1) + (\color{blue} 2 \cdot 2) + (\color{blue} 3 \cdot 3) \\
# (\color{red} 4 \cdot 1) + (\color{red} 5 \cdot 2) + (\color{red} 6 \cdot 3)
# \end{bmatrix}
# =
# \begin{bmatrix}
# 14 \\
# 32
# \end{bmatrix}$$
# Matrix multiplication has the following properties:
#
# * Associativity: $A(BC) = (AB)C$
# * Distributivity over matrix addition: $A(B + C) = AB + AC$ and $(A + B)C = AC + BC$
# * Associativity with scalar multiplication: $xAB = x(AB) = A(xB)$
#
# > Note that matrix multiplication is **not commutative:** $AB$ rarely equals $BA$.
#
# Another very important property of matrix multiplication is that a matrix multiplied by a vector produces another vector.
#
# An **identity matrix** $I_n$ is a special $n \times n$ matrix which has $1$s on the main diagonal, and $0$s everywhere else:
#
# $$I_n =
# \begin{bmatrix}
# 1 & 0 & \dotsb & 0 \\
# 0 & 1 & \dotsb & 0 \\
# \vdots & \vdots & \ddots & \vdots \\
# 0 & 0 & \dotsb & 1
# \end{bmatrix}$$
#
# What makes it special is that multiplying any matrix (of compatible size) by $I_n$ returns the original matrix. To put it another way, if $A$ is an $n \times m$ matrix:
#
# $$AI_m = I_nA = A$$
#
# This is why $I_n$ is called an identity matrix - it acts as a **multiplicative identity**. In other words, it is the matrix equivalent of the number $1$.
# ### <span style="color:blue">Exercise 3</span>: Matrix multiplication.
#
# **Inputs:**
#
# 1. An $n \times m$ matrix $A$.
# 2. An $m \times k$ matrix $B$.
#
# **Output:** Return the $n \times k$ matrix equal to the matrix product $AB$.
#
# <br/>
# <details>
# <summary><strong>Need a hint? Click here</strong></summary>
# To solve this exercise, you will need 3 <code>for</code> loops: one to go over $n$ rows of the output matrix, one to go over $k$ columns, and one to add up $m$ products that form each element of the output:
# <pre>
# <code>
# for i in range(n):
# for j in range(k):
# sum = 0
# for t in range(m):
# sum = sum + ...
# c[i][j] = sum
# </code>
# </pre>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=OMA2Mwo0aZg">here</a>.
# </details>
@exercise
def matrix_mult(a : Matrix, b : Matrix) -> Matrix:
n = len(a)
m = len(a[0])
k = len(b[0])
return [[sum(a[i][k] * b[k][j] for k in range(m)) for j in range(k)] for i in range(n)]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-3:-Matrix-multiplication.).*
# ## Inverse Matrices
#
# A square $n \times n$ matrix $A$ is **invertible** if it has an inverse $n \times n$ matrix $A^{-1}$ with the following property:
#
# $$AA^{-1} = A^{-1}A = I_n$$
#
# In other words, $A^{-1}$ acts as the **multiplicative inverse** of $A$.
#
# Another, equivalent definition highlights what makes this an interesting property. For any matrices $B$ and $C$ of compatible sizes:
#
# $$A^{-1}(AB) = A(A^{-1}B) = B \\
# (CA)A^{-1} = (CA^{-1})A = C$$
#
# A square matrix has a property called the **determinant**, with the determinant of matrix $A$ being written as $|A|$. A matrix is invertible if and only if its determinant isn't equal to $0$.
#
# For a $2 \times 2$ matrix $A$, the determinant is defined as $|A| = (A_{0,0} \cdot A_{1,1}) - (A_{0,1} \cdot A_{1,0})$.
#
# For larger matrices, the determinant is defined through determinants of sub-matrices. You can learn more from [Wikipedia](https://en.wikipedia.org/wiki/Determinant) or from [Wolfram MathWorld](http://mathworld.wolfram.com/Determinant.html).
# ### <span style="color:blue">Exercise 4</span>: Matrix Inversion.
#
# **Input:** An invertible $2 \times 2$ matrix $A$.
#
# **Output:** Return the inverse of $A$, a $2 \times 2$ matrix $A^{-1}$.
#
# <br/>
# <details>
# <summary><strong>Need a hint? Click here</strong></summary>
# Try to come up with a general method of doing it by hand first. If you get stuck, you may find <a href="https://en.wikipedia.org/wiki/Invertible_matrix#Inversion_of_2_%C3%97_2_matrices">this Wikipedia article</a> useful. For this exercise, $|A|$ is guaranteed to be non-zero. <br>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=01c12NaUQDw">here</a>.
# </details>
@exercise
def matrix_inverse(a : Matrix) -> Matrix:
[p, q], [r, s] = a
return scalar_mult(1 / (p * s - q * r), [[s, -q], [-r, p]])
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-4:-Matrix-Inversion.).*
# ## Transpose
#
# The **transpose** operation, denoted as $A^T$, is essentially a reflection of the matrix across the diagonal: $(A^T)_{i,j} = A_{j,i}$.
#
# Given an $n \times m$ matrix $A$, its transpose is the $m \times n$ matrix $A^T$, such that if:
#
# $$A =
# \begin{bmatrix}
# x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
# x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
# \end{bmatrix}$$
#
# then:
#
# $$A^T =
# \begin{bmatrix}
# x_{0,0} & x_{1,0} & \dotsb & x_{n-1,0} \\
# x_{0,1} & x_{1,1} & \dotsb & x_{n-1,1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{0,m-1} & x_{1,m-1} & \dotsb & x_{n-1,m-1}
# \end{bmatrix}$$
#
# For example:
#
# $$\begin{bmatrix}
# 1 & 2 \\
# 3 & 4 \\
# 5 & 6
# \end{bmatrix}^T
# =
# \begin{bmatrix}
# 1 & 3 & 5 \\
# 2 & 4 & 6
# \end{bmatrix}$$
#
# A **symmetric** matrix is a square matrix which equals its own transpose: $A = A^T$. To put it another way, it has reflection symmetry (hence the name) across the main diagonal. For example, the following matrix is symmetric:
#
# $$\begin{bmatrix}
# 1 & 2 & 3 \\
# 2 & 4 & 5 \\
# 3 & 5 & 6
# \end{bmatrix}$$
#
# The transpose of a matrix product is equal to the product of transposed matrices, taken in reverse order:
#
# $$(AB)^T = B^TA^T$$
# ### <span style="color:blue">Exercise 5</span>: Transpose.
#
# **Input:** An $n \times m$ matrix $A$.
#
# **Output:** Return an $m \times n$ matrix $A^T$, the transpose of $A$.
#
# <br/>
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=TZrKrNVhbjI">here</a>.
# </details>
@exercise
def transpose(a : Matrix) -> Matrix:
n = len(a)
m = len(a[0])
return [[a[j][i] for j in range(n)] for i in range(m)]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-5:-Transpose.).*
# ## Conjugate
#
# The next important single-matrix operation is the **matrix conjugate**, denoted as $\overline{A}$. This, as the name might suggest, involves taking the [complex conjugate](../ComplexArithmetic/ComplexArithmetic.ipynb#Complex-Conjugate) of every element of the matrix: if
#
# $$A =
# \begin{bmatrix}
# x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
# x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
# \end{bmatrix}$$
#
# Then:
#
# $$\overline{A} =
# \begin{bmatrix}
# \overline{x}_{0,0} & \overline{x}_{0,1} & \dotsb & \overline{x}_{0,m-1} \\
# \overline{x}_{1,0} & \overline{x}_{1,1} & \dotsb & \overline{x}_{1,m-1} \\
# \vdots & \vdots & \ddots & \vdots \\
# \overline{x}_{n-1,0} & \overline{x}_{n-1,1} & \dotsb & \overline{x}_{n-1,m-1}
# \end{bmatrix}$$
#
# The conjugate of a matrix product equals to the product of conjugates of the matrices:
#
# $$\overline{AB} = (\overline{A})(\overline{B})$$
# ### <span style="color:blue">Exercise 6</span>: Conjugate.
#
# **Input:** An $n \times m$ matrix $A$.
#
# **Output:** Return an $n \times m$ matrix $\overline{A}$, the conjugate of $A$.
#
# > As a reminder, you can get the real and imaginary components of complex number `z` using `z.real` and `z.imag`, respectively.
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# To calculate the conjugate of a matrix take the conjugate of each element, check the <a href="../ComplexArithmetic/ComplexArithmetic.ipynb#Exercise-4:-Complex-conjugate.">complex arithmetic tutorial</a> to see how to calculate the conjugate of a complex number.
# </details>
@exercise
def conjugate(a : Matrix) -> Matrix:
return [[elem.conjugate() for elem in row] for row in a]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-6:-Conjugate.).*
# ## Adjoint
#
# The final important single-matrix operation is a combination of the above two. The **conjugate transpose**, also called the **adjoint** of matrix $A$, is defined as $A^\dagger = \overline{(A^T)} = (\overline{A})^T$.
#
# A matrix is known as **Hermitian** or **self-adjoint** if it equals its own adjoint: $A = A^\dagger$. For example, the following matrix is Hermitian:
#
# $$\begin{bmatrix}
# 1 & i \\
# -i & 2
# \end{bmatrix}$$
#
# The adjoint of a matrix product can be calculated as follows:
#
# $$(AB)^\dagger = B^\dagger A^\dagger$$
# ### <span style="color:blue">Exercise 7</span>: Adjoint.
#
# **Input:** An $n \times m$ matrix $A$.
#
# **Output:** Return an $m \times n$ matrix $A^\dagger$, the adjoint of $A$.
#
# > Don't forget, you can re-use functions you've written previously.
@exercise
def adjoint(a : Matrix) -> Matrix:
return conjugate(transpose(a))
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-7:-Adjoint.).*
# ## Unitary Matrices
#
# **Unitary matrices** are very important for quantum computing. A matrix is unitary when it is invertible, and its inverse is equal to its adjoint: $U^{-1} = U^\dagger$. That is, an $n \times n$ square matrix $U$ is unitary if and only if $UU^\dagger = U^\dagger U = I_n$.
#
# For example, the following matrix is unitary:
#
# $$\begin{bmatrix}
# \frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
# \frac{i}{\sqrt{2}} & \frac{-i}{\sqrt{2}} \\
# \end{bmatrix}$$
# ### <span style="color:blue">Exercise 8</span>: Unitary Verification.
#
# **Input:** An $n \times n$ matrix $A$.
#
# **Output:** Check if the matrix is unitary and return `True` if it is, or `False` if it isn't.
#
# > Because of inaccuracy when dealing with floating point numbers on a computer (rounding errors), you won't always get the exact result you are expecting from a long series of calculations. To get around this, Python has a function `approx` which can be used to check if two numbers are "close enough:" `a == approx(b)`.
#
# <br/>
# <details>
# <summary><strong>Need a hint? Click here</strong></summary>
# Keep in mind, you have only implemented matrix inverses for $2 \times 2$ matrices, and this exercise may give you larger inputs. There is a way to solve this without taking the inverse.
# </details>
# +
from pytest import approx
@exercise
def is_matrix_unitary(a : Matrix) -> bool:
n = len(a)
product = matrix_mult(a, adjoint(a))
return all(all(approx(product[i][j]) == (1 if i == j else 0) for j in range(n)) for i in range(n))
# -
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-8:-Unitary-Verification.).*
# ## Next Steps
#
# Congratulations! At this point, you should understand enough linear algebra to be able to get started with the tutorials on [the concept of qubit](../Qubit/Qubit.ipynb) and on [single-qubit quantum gates](../SingleQubitGates/SingleQubitGates.ipynb). The next section covers more advanced matrix operations that help explain the properties of qubits and quantum gates.
# # Part II. Advanced Operations
#
# ## Inner Product
#
# The **inner product** is yet another important matrix operation that is only applied to vectors. Given two vectors $V$ and $W$ of the same size, their inner product $\langle V , W \rangle$ is defined as a product of matrices $V^\dagger$ and $W$:
#
# $$\langle V , W \rangle = V^\dagger W$$
#
# Let's break this down so it's a bit easier to understand. A $1 \times n$ matrix (the adjoint of an $n \times 1$ vector) multiplied by an $n \times 1$ vector results in a $1 \times 1$ matrix (which is equivalent to a scalar). The result of an inner product is that scalar.
#
# To put it another way, to calculate the inner product of two vectors, take the corresponding elements $V_k$ and $W_k$, multiply the complex conjugate of $V_k$ by $W_k$, and add up those products:
#
# $$\langle V , W \rangle = \sum_{k=0}^{n-1}\overline{V_k}W_k$$
#
# Here is a simple example:
#
# $$\langle
# \begin{bmatrix}
# -6 \\
# 9i
# \end{bmatrix}
# ,
# \begin{bmatrix}
# 3 \\
# -8
# \end{bmatrix}
# \rangle =
# \begin{bmatrix}
# -6 \\
# 9i
# \end{bmatrix}^\dagger
# \begin{bmatrix}
# 3 \\
# -8
# \end{bmatrix}
# =
# \begin{bmatrix} -6 & -9i \end{bmatrix}
# \begin{bmatrix}
# 3 \\
# -8
# \end{bmatrix}
# = (-6) \cdot (3) + (-9i) \cdot (-8) = -18 + 72i$$
# If you are familiar with the **dot product**, you will notice that it is equivalent to inner product for real-numbered vectors.
#
# > We use our definition for these tutorials because it matches the notation used in quantum computing. You might encounter other sources which define the inner product a little differently: $\langle V , W \rangle = W^\dagger V = V^T\overline{W}$, in contrast to the $V^\dagger W$ that we use. These definitions are almost equivalent, with some differences in the scalar multiplication by a complex number.
#
# An immediate application for the inner product is computing the **vector norm**. The norm of vector $V$ is defined as $||V|| = \sqrt{\langle V , V \rangle}$. This condenses the vector down to a single non-negative real value. If the vector represents coordinates in space, the norm happens to be the length of the vector. A vector is called **normalized** if its norm is equal to $1$.
#
# The inner product has the following properties:
#
# * Distributivity over addition: $\langle V + W , X \rangle = \langle V , X \rangle + \langle W , X \rangle$ and $\langle V , W + X \rangle = \langle V , W \rangle + \langle V , X \rangle$
# * Partial associativity with scalar multiplication: $x \cdot \langle V , W \rangle = \langle \overline{x}V , W \rangle = \langle V , xW \rangle$
# * Skew symmetry: $\langle V , W \rangle = \overline{\langle W , V \rangle}$
# * Multiplying a vector by a unitary matrix **preserves the vector's inner product with itself** (and therefore the vector's norm): $\langle UV , UV \rangle = \langle V , V \rangle$
#
# > Note that just like matrix multiplication, the inner product is **not commutative**: $\langle V , W \rangle$ won't always equal $\langle W , V \rangle$.
# ### <span style="color:blue">Exercise 9</span>: Inner product.
#
# **Inputs:**
#
# 1. An $n \times 1$ vector $V$.
# 2. An $n \times 1$ vector $W$.
#
# **Output:** Return a complex number - the inner product $\langle V , W \rangle$.
#
# <br/>
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=FCmH4MqbFGs">here</a>.
# </details>
@exercise
def inner_prod(v : Matrix, w : Matrix) -> complex:
return matrix_mult(adjoint(v), w)[0][0]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-9:-Inner-product.).*
# ### <span style="color:blue">Exercise 10</span>: Normalized vectors.
#
# **Input:** A non-zero $n \times 1$ vector $V$.
#
# **Output:** Return an $n \times 1$ vector $\frac{V}{||V||}$ - the normalized version of the vector $V$.
#
# <br/>
# <details>
# <summary><strong>Need a hint? Click here</strong></summary>
# You might need the square root function to solve this exercise. As a reminder, <a href=https://docs.python.org/3/library/math.html#math.sqrt>Python's square root function</a> is available in the <code>math</code> library.<br>
# A video explanation can be found <a href="https://www.youtube.com/watch?v=7fn03DIW3Ak">here</a>. Note that when this method is used with complex vectors, you should take the modulus of the complex number for the division.
# </details>
@exercise
def normalize(v : Matrix) -> Matrix:
return scalar_mult(1 / cmath.sqrt(inner_prod(v, v)), v)
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-10:-Normalized-vectors.).*
# ## Outer Product
#
# The **outer product** of two vectors $V$ and $W$ is defined as $VW^\dagger$. That is, the outer product of an $n \times 1$ vector and an $m \times 1$ vector is an $n \times m$ matrix. If we denote the outer product of $V$ and $W$ as $X$, then $X_{i,j} = V_i \cdot \overline{W_j}$.
#
# Here is a simple example:
# outer product of $\begin{bmatrix} -3i \\ 9 \end{bmatrix}$ and $\begin{bmatrix} 9i \\ 2 \\ 7 \end{bmatrix}$ is:
#
# $$\begin{bmatrix} \color{blue} {-3i} \\ \color{blue} 9 \end{bmatrix}
# \begin{bmatrix} \color{red} {9i} \\ \color{red} 2 \\ \color{red} 7 \end{bmatrix}^\dagger
# =
# \begin{bmatrix} \color{blue} {-3i} \\ \color{blue} 9 \end{bmatrix}
# \begin{bmatrix} \color{red} {-9i} & \color{red} 2 & \color{red} 7 \end{bmatrix}
# =
# \begin{bmatrix}
# \color{blue} {-3i} \cdot \color{red} {(-9i)} & \color{blue} {-3i} \cdot \color{red} 2 & \color{blue} {-3i} \cdot \color{red} 7 \\
# \color{blue} 9 \cdot \color{red} {(-9i)} & \color{blue} 9 \cdot \color{red} 2 & \color{blue} 9 \cdot \color{red} 7
# \end{bmatrix}
# =
# \begin{bmatrix}
# -27 & -6i & -21i \\
# -81i & 18 & 63
# \end{bmatrix}$$
# ### <span style="color:blue">Exercise 11</span>: Outer product.
#
# **Inputs:**
#
# 1. An $n \times 1$ vector $V$.
# 2. An $m \times 1$ vector $W$.
#
# **Output:** Return an $n \times m$ matrix that represents the outer product of $V$ and $W$.
@exercise
def outer_prod(v : Matrix, w : Matrix) -> Matrix:
return matrix_mult(v, adjoint(w))
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-11:-Outer-product.).*
# ## Tensor Product
#
# The **tensor product** is a different way of multiplying matrices. Rather than multiplying rows by columns, the tensor product multiplies the second matrix by every element of the first matrix.
#
# Given $n \times m$ matrix $A$ and $k \times l$ matrix $B$, their tensor product $A \otimes B$ is an $(n \cdot k) \times (m \cdot l)$ matrix defined as follows:
#
# $$A \otimes B =
# \begin{bmatrix}
# A_{0,0} \cdot B & A_{0,1} \cdot B & \dotsb & A_{0,m-1} \cdot B \\
# A_{1,0} \cdot B & A_{1,1} \cdot B & \dotsb & A_{1,m-1} \cdot B \\
# \vdots & \vdots & \ddots & \vdots \\
# A_{n-1,0} \cdot B & A_{n-1,1} \cdot B & \dotsb & A_{n-1,m-1} \cdot B
# \end{bmatrix}
# =
# \begin{bmatrix}
# A_{0,0} \cdot \color{red} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & b_{k-1,l-1} \end{bmatrix}} & \dotsb &
# A_{0,m-1} \cdot \color{blue} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & B_{k-1,l-1} \end{bmatrix}} \\
# \vdots & \ddots & \vdots \\
# A_{n-1,0} \cdot \color{blue} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & B_{k-1,l-1} \end{bmatrix}} & \dotsb &
# A_{n-1,m-1} \cdot \color{red} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & B_{k-1,l-1} \end{bmatrix}}
# \end{bmatrix}
# = \\
# =
# \begin{bmatrix}
# A_{0,0} \cdot \color{red} {B_{0,0}} & \dotsb & A_{0,0} \cdot \color{red} {B_{0,l-1}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{0,0}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{0,l-1}} \\
# \vdots & \ddots & \vdots & \dotsb & \vdots & \ddots & \vdots \\
# A_{0,0} \cdot \color{red} {B_{k-1,0}} & \dotsb & A_{0,0} \cdot \color{red} {B_{k-1,l-1}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{k-1,0}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{k-1,l-1}} \\
# \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\
# A_{n-1,0} \cdot \color{blue} {B_{0,0}} & \dotsb & A_{n-1,0} \cdot \color{blue} {B_{0,l-1}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{0,0}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{0,l-1}} \\
# \vdots & \ddots & \vdots & \dotsb & \vdots & \ddots & \vdots \\
# A_{n-1,0} \cdot \color{blue} {B_{k-1,0}} & \dotsb & A_{n-1,0} \cdot \color{blue} {B_{k-1,l-1}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{k-1,0}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{k-1,l-1}}
# \end{bmatrix}$$
#
# Here is a simple example:
#
# $$\begin{bmatrix} 1 & 2 \\ 3 & 4 \end{bmatrix} \otimes \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} =
# \begin{bmatrix}
# 1 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} & 2 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} \\
# 3 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} & 4 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix}
# \end{bmatrix}
# =
# \begin{bmatrix}
# 1 \cdot 5 & 1 \cdot 6 & 2 \cdot 5 & 2 \cdot 6 \\
# 1 \cdot 7 & 1 \cdot 8 & 2 \cdot 7 & 2 \cdot 8 \\
# 3 \cdot 5 & 3 \cdot 6 & 4 \cdot 5 & 4 \cdot 6 \\
# 3 \cdot 7 & 3 \cdot 8 & 4 \cdot 7 & 4 \cdot 8
# \end{bmatrix}
# =
# \begin{bmatrix}
# 5 & 6 & 10 & 12 \\
# 7 & 8 & 14 & 16 \\
# 15 & 18 & 20 & 24 \\
# 21 & 24 & 28 & 32
# \end{bmatrix}$$
#
# Notice that the tensor product of two vectors is another vector: if $V$ is an $n \times 1$ vector, and $W$ is an $m \times 1$ vector, $V \otimes W$ is an $(n \cdot m) \times 1$ vector.
# The tensor product has the following properties:
#
# * Distributivity over addition: $(A + B) \otimes C = A \otimes C + B \otimes C$, $A \otimes (B + C) = A \otimes B + A \otimes C$
# * Associativity with scalar multiplication: $x(A \otimes B) = (xA) \otimes B = A \otimes (xB)$
# * Mixed-product property (relation with matrix multiplication): $(A \otimes B) (C \otimes D) = (AC) \otimes (BD)$
# ### <span style="color:blue">Exercise 12</span>*: Tensor Product.
#
# **Inputs:**
#
# 1. An $n \times m$ matrix $A$.
# 2. A $k \times l$ matrix $B$.
#
# **Output:** Return an $(n \cdot k) \times (m \cdot l)$ matrix $A \otimes B$, the tensor product of $A$ and $B$.
@exercise
def tensor_product(a : Matrix, b : Matrix) -> Matrix:
n = len(a)
m = len(a[0])
k = len(b)
l = len(b[0])
return [[a[i // k][j // l] * b[i % k][j % l] for j in range(m * l)] for i in range(n * k)]
# *Can't come up with a solution? See the explained solution in the* <i><a href="./Workbook_LinearAlgebra.ipynb#Exercise-12*:-Tensor-Product.">Linear Algebra Workbook</a></i>.
# ## Next Steps
#
# At this point, you know enough to complete the tutorials on [the concept of qubit](../Qubit/Qubit.ipynb), [single-qubit gates](../SingleQubitGates/SingleQubitGates.ipynb), [multi-qubit systems](../MultiQubitSystems/MultiQubitSystems.ipynb), and [multi-qubit gates](../MultiQubitGates/MultiQubitGates.ipynb).
# The last part of this tutorial is a brief introduction to eigenvalues and eigenvectors, which are used for more advanced topics in quantum computing.
# Feel free to move on to the next tutorials, and come back here once you encounter eigenvalues and eigenvectors elsewhere.
# # Part III: Eigenvalues and Eigenvectors
#
# Consider the following example of multiplying a matrix by a vector:
#
# $$\begin{bmatrix}
# 1 & -3 & 3 \\
# 3 & -5 & 3 \\
# 6 & -6 & 4
# \end{bmatrix}
# \begin{bmatrix}
# 1 \\
# 1 \\
# 2
# \end{bmatrix}
# =
# \begin{bmatrix}
# 4 \\
# 4 \\
# 8
# \end{bmatrix}$$
#
# Notice that the resulting vector is just the initial vector multiplied by a scalar (in this case 4). This behavior is so noteworthy that it is described using a special set of terms.
#
# Given a nonzero $n \times n$ matrix $A$, a nonzero vector $V$, and a scalar $x$, if $AV = xV$, then $x$ is an **eigenvalue** of $A$, and $V$ is an **eigenvector** of $A$ corresponding to that eigenvalue.
#
# The properties of eigenvalues and eigenvectors are used extensively in quantum computing. You can learn more about eigenvalues, eigenvectors, and their properties at [Wolfram MathWorld](http://mathworld.wolfram.com/Eigenvector.html) or on [Wikipedia](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors).
# ### <span style="color:blue">Exercise 13</span>: Finding an eigenvalue.
#
# **Inputs:**
#
# 1. An $n \times n$ matrix $A$.
# 2. An eigenvector $V$ of matrix $A$.
#
# **Output:** Return a real number - the eigenvalue of $A$ that is associated with the given eigenvector.
#
# > Note that in this task the matrices are real-valued.
#
# <br/>
# <details>
# <summary><strong>Need a hint? Click here</strong></summary>
# Multiply the matrix by the vector, then divide the elements of the result by the elements of the original vector. Don't forget though, some elements of the vector may be $0$.
# </details>
@exercise
def find_eigenvalue(a : Matrix, v : Matrix) -> float:
product = matrix_mult(a, v)
for i in range(len(v)):
if v[i][0] != 0:
return product[i][0] / v[i][0]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-13:-Finding-an-eigenvalue.).*
# ### <span style="color:blue">Exercise 14</span>**: Finding an eigenvector.
#
# **Inputs:**
#
# 1. A $2 \times 2$ matrix $A$.
# 2. An eigenvalue $x$ of matrix $A$.
#
#
# **Output:** Return any non-zero eigenvector of $A$ that is associated with $x$.
#
# <br/>
# <details>
# <summary><strong>Need a hint? Click here</strong></summary>
# A matrix and an eigenvalue will have multiple eigenvectors (infinitely many, in fact), but you only need to find one.<br/>
# Try treating the elements of the vector as variables in a system of two equations. Watch out for division by $0$!
# </details>
@exercise
def find_eigenvector(a : Matrix, x : float) -> Matrix:
[p, q], [r, s] = a
if q != 0 or p != x:
return [[-q], [p - x]]
elif s != x or r != 0:
return [[s - x], [-r]]
else:
return [[1], [0]]
# *Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-14**:-Finding-an-eigenvector.).*
|
tutorials/LinearAlgebra/LinearAlgebra.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py38_tensorflow
# language: python
# name: conda-env-py38_tensorflow-py
# ---
# # Notebook for Predicting WFQC
# Import the modules
import pandas as pd
from datetime import datetime, timedelta
import glob
import os
import wave
import pylab
#from matplotlib import plt
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import multiprocessing
import gc
import random
from obspy.core import read, UTCDateTime
# Directories setup
current_dir = "/home/roberto/notebooks/WFQCMODEL/"
data_dir = current_dir + "Data/"
seism_dir = '/home/roberto/notebooks/DATAFIL/'
output_spectrogram_dir = data_dir + "Extracted_Spectrogram_Full_Analysis/"
if not os.path.exists(output_spectrogram_dir):
os.makedirs(output_spectrogram_dir)
seis_filenames = glob.glob(seism_dir + '/????')
# Preparing the DataFrame
new_list=[]
i=1
for subs in seis_filenames:
new2=os.path.basename(subs)
velfil=glob.glob(subs+'/*.vel')
for file in velfil:
sub=os.path.basename(file)
cont=os.path.dirname(file)
#
last=os.path.basename(cont)
new_list.append([i,sub,last])
i=i+1
df = pd.DataFrame(new_list, columns =['Num', 'Waveform','Sub'])
# Plot a spectrogra using OBSPY
# +
def graph_spectrogram(wav_file, sub, serial):
# fig=pyplot.figure(num=None, figsize=(19, 12))
# ax = pyplot.axes()
# ax.set_axis_off()
st = read(wav_file)
tr=st[0]
tr.normalize()
fig = plt.figure()
#ax = plt.Axes(fig,[0.,0.,.8,.8])
ax = plt.Axes(fig,[0.1, 0.1, 0.7, 0.6])
ax.set_axis_off()
fig.add_axes(ax)
ax.set_axis_off()
begin_TimeStamp=tr.stats.starttime
audio_begin_TimeStamp=str(begin_TimeStamp.year)+'-'+str(begin_TimeStamp.month)+'-'+str(begin_TimeStamp.day)+'T'+str(begin_TimeStamp.hour)+'-'+str(begin_TimeStamp.minute)
start_second=str(begin_TimeStamp.second)
filename=output_spectrogram_dir + serial + '-' + audio_begin_TimeStamp + '-' + str(start_second) + '.png'
# print(filename)
fig = tr.spectrogram(show=False,axes=ax)
# # filename=output_spectrogram_dir+"AAA"+".png"
plt.savefig(filename)
plt.close()
# -
def generate_spectrogram(i):
try:
Filenam = df.loc[i, 'Waveform']
Sub = df.loc[i, 'Sub']
Fileall=seism_dir+Sub+'/'+Filenam
serial,comp,ty=Filenam.split('.')
serial=serial+'_'+comp+'_'+Sub+'_'
# print([serial,Sub,Fileall])
return graph_spectrogram(Fileall,Sub,serial)
except:
pass
# Parallel multiprocess for creating spectrograms in the Output_spectrogram folder
num_cores = multiprocessing.cpu_count()
output_spectrogram_dir = data_dir + "Extracted_Spectrogram_Full_Analysis/"
spectrograms = Parallel(n_jobs=num_cores)(delayed(generate_spectrogram)(i) for i in (range(len(df))))
|
notebooks/Extract_SPECTROGRAMS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Results 2 - Powercell - More Capacity
#
# * Results from various models
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import auc
from tensorflow.keras.models import load_model
from RESULTS_pcell_basic_HELPERS import Helpers
from RESULTS_pcell_basic_HELPERS2 import Helpers as Helpers2
helpers = Helpers()
helpers2 = Helpers2()
# Load models and get scores
filenames = ['model_2249', 'model_3599', 'run_2_model_2199', '3_layers_model_5399']
a_score, BH, a_auc = helpers.inference(load_model(filenames[0]))
b_score, _, b_auc = helpers.inference(load_model(filenames[1]))
c_score, _, c_auc = helpers2.inference(load_model(filenames[2]))
d_score, _, d_auc = helpers2.inference(load_model(filenames[3]))
scores = [a_score, b_score, c_score, d_score]
aucs = [a_auc, b_auc, c_auc, d_auc]
# Plot
_, _, train, test = helpers.get_data()
test_len = 98
ticks_test = [i for i in range(0, test_len, test_len // 5 )]
dates_test = [test['date'].iloc[i] for i in range(0, test_len, test_len // 5)]
sns.set_style('whitegrid')
plt.figure(figsize=(14, 8))
for score, name in zip(scores, filenames):
plt.plot(score, linewidth=4, label=name)
plt.plot(BH, 'k', linewidth=4, label='BH')
plt.legend(loc='best')
plt.xticks(ticks_test, dates_test)
plt.show()
# AUC
x = np.arange(len(BH))
print('A R E A - U N D E R - C U R V E - S C O R E')
for value, name in zip(aucs, filenames):
print(name, int(value))
print('BH', int(auc(x, BH)))
# -
plt.figure(figsize=(14, 8))
for score, name in zip([a_score, c_score, d_score], [filenames[0], filenames[2], filenames[3]]):
plt.plot(score, linewidth=5, label=name)
plt.plot(BH, 'k', linewidth=5, label='BH')
plt.legend(loc='best')
plt.xticks(ticks_test, dates_test)
plt.show()
print('A R E A - U N D E R - C U R V E - S C O R E')
for value, name in zip(aucs[:2], filenames[:2]):
print(name, int(value))
print('BH', int(auc(x, BH)))
|
notebooks/results_4_powercell.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tree.ctutils as ctu
from tree import treeutils
import numpy as np
import pickle
# Calculate merger event parameters
def find_merger(atree, idx=None, aexp_min=0.0):
"""
find indices of merger event from a tree.
(Full tree or main progenitor trunk)
"""
if idx == None:
idx = atree['id'][0]
nprg = 1
merger_list=[]
i = 0
while nprg > 0:
idx = ctu.get_progenitors(atree, idx, main=True)[0]
ind = np.where(atree['id'] == idx)[0]
if atree['aexp'][ind] < aexp_min:
break
nprg = ctu.get_npr(atree, idx)
if nprg > 1:
merger_list.append(i)
i +=1
return merger_list
def merger_mass_ratio(atree, idx=None):
"""
return mass ratio of the given merger event
"""
if idx == None:
idx = atree['id'][0]
prgs = ctu.get_progenitors(atree, idx)
# only for mergers
if len(prgs) > 1:
i_prgs = [np.where(atree['id'] == i)[0] for i in prgs]
mass = []
for iprg in i_prgs:
mass.append(atree['m'])
else:
print("This is not a merger")
return 0
def merger_properties_main_prg(atree, idx):
"""
Calculate merger mass ratio for "one" merger event.
if idx == None:
if nout == None:
print("Both idx and nout are missing")
return
else:
if nout == None:
nout = np.where(atree['id'] == idx)[0]
idx = atree['id'][ind]
"""
#prgs = get_progenitors(atree, idx)
#if len(prgs) > 1:
# i_prgs = [np.where(atree['id'] == i)[0] for i in prgs]
i_prgs = np.where(atree['desc_id'] == idx)[0]
print(i_prgs)
id_prgs = atree['id'][i_prgs]
mass_prgs = atree['m'][i_prgs]
#mass_prgs_norm = mass_prgs / sum(mass_prgs)
return mass_prgs
def load_tree(wdir, is_gal=False, no_dump=False):
import pickle
from tree import treemodule
import tree.ctutils as ctu
alltrees = treemodule.CTree()
if is_gal:
# Galaxy tree
tree_path = 'GalaxyMaker/Trees/'
else:
# halo tree
tree_path = 'halo/Trees/'
try:
alltrees = pickle.load(open(wdir + tree_path + "extended_tree.pickle", "rb" ))
print("Loaded an extended tree")
except:
alltrees = treemodule.CTree()
alltrees.load(filename= wdir + tree_path + 'tree_0_0_0.dat')
if not no_dump:
# Fix nout -----------------------------------------------------
nout_max = alltrees.data['nout'].max()
alltrees.data['nout'] += 187 - nout_max
print("------ NOUT fixed")
alltrees.data = ctu.augment_tree(alltrees.data, wdir, is_gal=is_gal)
print("------ tree data extended")
return alltrees
# +
import utils.match as mtc
import matplotlib.pyplot as plt
import pandas as pd
is_gal = True
# Load tree
wdir = '/home/hoseung/Work/data/05427/'
alltrees = load_tree(wdir, is_gal=True)
# +
#fig, axs = plt.subplots(11) # 11 subplots in a row]
# Try to do it without catalog.
#catalog = pickle.load(open(wdir + '/catalog_GM/' + 'catalog187.pickle', 'rb'))
#allgals = catalog['id']
nout_fi = 187
ft = alltrees.data[alltrees.data['nout'] == nout_fi]
allgals = ft['id'][ft['m'] > 1e10]
# +
#fig.add_subplot(222) # == fig.add_subplot(2,2,2)
fig, axs = plt.subplots(2,2)
axr = axs.ravel()
gals = allgals[2:6]
#gals = [42]
verbose = False
cal_lambda = False # requires catalog.pickle
for iax, gal in enumerate(gals):
if verbose: print("analyzing merger events of galaxy ", gal)
# Convert halo id to tree id
#idx = ctu.id_to_idx(alltrees.data, gal, 187)
idx = gal
# full tree of a galaxy
atree = ctu.extract_a_tree(alltrees.data, idx)
# main progenitor tree
main = ctu.extract_main_tree(alltrees.data, idx)
if len(main) < 50:
continue
x_nout = main['nout'].flatten()
# all halo ids of main prgs
main_ids = main['Orig_halo_id']
# lambda evolution history
L_r_evol = np.zeros(len(x_nout))
mass_ratios_single = np.zeros(len(x_nout))
for i, nout in enumerate(x_nout):
if nout < 37:
break
snout = str(nout)
if cal_lambda:
catalog = pickle.load(open(wdir + '/catalog_GM/' + 'catalog' + snout + '.pickle', 'rb'))
ii = catalog['id'] == main_ids[i]
if sum(ii) > 0:
this_gal = catalog[catalog['id'] == main_ids[i]]
#gal_history.append(this_gal)
#L_r_evol[i] = gal_history[i][0]['lambda_r']
L_r_evol[i] = this_gal['lambda_r'][0]
else:
L_r_evol[i]=0
# merger ratio
i_prgs = np.where(atree['desc_id'] == main['id'][i])[0]
# multiple prgs = merger
if len(i_prgs) > 1:
if verbose: print(" {} mergers at nout = {} or {}".format(len(i_prgs), nout, main['nout'][i]))
id_prgs = atree['id'][i_prgs]
mass_prgs = atree['m'][i_prgs]
m_r = mass_prgs / max(mass_prgs)
if verbose:
print(" Mass ratios : ", m_r)
mass_ratios_single[i] = max([mass_prgs[1:] / max(mass_prgs)][0])
else:
mass_ratios_single[i] = 0
if sum(L_r_evol > 0) < 1:
# If the galaxy is not identified in any catalog, skip.
continue
#if len(gal_history) < 1:
ax = axr[iax]
ax.set_ylim([0,1.0])
ax.plot(x_nout, L_r_evol)#[::-1])
ax.set_title("ID: {0}, $ M_* $: {1:0.1e}".format(str(gal), gal_history[0]['mstar'][0]))
i_merger = mass_ratios_single > 0
if sum(i_merger) > 0:
# Because of the way df.groupby works, sort in increasing order.
merger_ratio_bins = [1/100, 1/10, 1/3] # 3:1, 10:1, 100:1 - Major, minor, tiny
df = pd.DataFrame(data=dict(x=x_nout[i_merger],
y=L_r_evol[i_merger] + 0.3,
q=mass_ratios_single[i_merger]))
grouped = df.groupby(np.digitize(df.q, merger_ratio_bins))
sizes = [100 * np.sqrt(i) for i in merger_ratio_bins]
labels = ["Tiny", "Minor", "Major"]
for i, (name, group) in enumerate(grouped):
ax.scatter(group.x, group.y, s=sizes[i], alpha=0.2 * (1+i), marker='v', label=labels[i], color='red')
ax.legend()
# print mass ratio only for major mergers
try:
a = grouped.get_group(len(merger_ratio_bins) -1) # Last bin = Major Merger
for x, y, mr in zip(a.x, a.y, a.q):
ax.annotate("{:.1f}".format(mr),
xy=(x, y), xycoords='data',
xytext=(0, 5), textcoords='offset points')
except:
# There is no Major Merger group.
pass
plt.show()
# -
main_ids
catalog['id']
# ## Last merger of galaxies
#
# 1) divide fast/slow rotators
# 2) find last merger (mass ration > 10:1)
# 3) plot rotation paramter Vs mass ratio of last merger..?
# +
#Last merger
import matplotlib.pyplot as plt
nout_ini = 100 # recent merger = nout = 140 or later.
nout_fi = 187
# Load tree
is_gal = True
fig, ax = plt.subplots(1)
# all catalogs
verbose=False
clusters = ['39990', '36415', '10002', '05427', '36413', '01605']
for cluster in clusters:
wdir = '/home/hoseung/Work/data/' + cluster + '/'
alltrees = load_tree(wdir, is_gal=True)
catalog = pickle.load(open(wdir + '/catalog_GM/' + 'catalog187.pickle', 'rb'))
print("Cluster ",cluster)
for cat in catalog:
gal = cat['id']
if verbose: print("analyzing merger events of galaxy ", gal)
# Convert halo id to tree id
#idx = id2idx(alltrees.data, gal, 187)
idx = cat['idx']
# full tree of a galaxy
atree = ctu.extract_a_tree(alltrees.data, idx)
# main progenitor tree
main = ctu.extract_main_tree(alltrees.data, idx)
x_nout = main['nout'].flatten()
x_nout = x_nout[x_nout > nout_ini]
mass_ratios_single = np.zeros(len(x_nout))
for i, nout in enumerate(x_nout):
# merger ratio
i_prgs = np.where(atree['desc_id'] == main['id'][i])[0]
# multiple prgs = merger
if len(i_prgs) > 1:
if verbose: print(" {} mergers at nout = {}".format(len(i_prgs), nout))
id_prgs = atree['id'][i_prgs]
mass_prgs = atree['m'][i_prgs]
m_r = mass_prgs / max(mass_prgs)
if verbose:
print(" Mass ratios : ", m_r)
mass_ratios_single[i] = max([mass_prgs[1:] / max(mass_prgs)][0])
else:
mass_ratios_single[i] = 0
ind_ok = np.where(mass_ratios_single > 0.1)[0]
#print("all ind_ok", ind_ok)
if len(ind_ok) > 0:
ind_ok = max(ind_ok)
print(" galaxy {}, Last nout {}, Merger ratio 1:{:.1f}".format(gal,
x_nout[ind_ok],
1./mass_ratios_single[ind_ok]))
mr = 1./mass_ratios_single[ind_ok]
#ax.scatter(x_nout[ind_ok], cat['lambda_r'])
ax.scatter(x_nout[ind_ok], cat['lambda_r'], s=100*mr)
ax.set_title("last merger Vs final lambda")
ax.set_ylabel(r"$\lambda _R$")
ax.set_xlabel("Last merger")
# -
plt.show()
# ## Delta lambda
|
scripts/notebooks/halo/Merger_property_plot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/artbrgn/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/module2-loadingdata/LS_DS_112_Loading_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-c0vWATuQ_Dn" colab_type="text"
# # Lambda School Data Science - Loading, Cleaning and Visualizing Data
#
# Objectives for today:
# - Load data from multiple sources into a Python notebook
# - From a URL (github or otherwise)
# - CSV upload method
# - !wget method
# - "Clean" a dataset using common Python libraries
# - Removing NaN values "Data Imputation"
# - Create basic plots appropriate for different data types
# - Scatter Plot
# - Histogram
# - Density Plot
# - Pairplot (if we have time)
# + [markdown] id="grUNOP8RwWWt" colab_type="text"
# # Part 1 - Loading Data
#
# Data comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.
#
# Data set sources:
#
# - https://archive.ics.uci.edu/ml/datasets.html
# - https://github.com/awesomedata/awesome-public-datasets
# - https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)
#
# Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags).
# + [markdown] id="wxxBTeHUYs5a" colab_type="text"
# ## Lecture example - flag data
# + id="nc-iamjyRWwe" colab_type="code" outputId="e2176db3-3f82-47fb-d19a-557fcd68a426" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#MADE GICERISH CHANGE
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
# !curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# + id="UKfOq1tlUvbZ" colab_type="code" colab={}
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# + id="exKPtcJyUyCX" colab_type="code" outputId="62efd887-fbf6-4761-b503-36e941031a6f" colab={"base_uri": "https://localhost:8080/", "height": 243}
# Step 3 - verify we've got *something*
flag_data.head()
# + id="rNmkv2g8VfAm" colab_type="code" outputId="e8e89137-ca58-44a6-f908-cff6b5f35988" colab={"base_uri": "https://localhost:8080/", "height": 597}
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
# + id="iqPEwx3aWBDR" colab_type="code" outputId="be530c6b-963a-4af1-c586-52cb7e092555" colab={"base_uri": "https://localhost:8080/", "height": 92}
# !curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# + id="5R1d1Ka2WHAY" colab_type="code" outputId="df41584d-a9a1-4ecc-a702-3e7fea74c33a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# + id="o-thnccIWTvc" colab_type="code" outputId="d2acacd3-e93a-43ed-d275-b4de9c88c701" colab={"base_uri": "https://localhost:8080/", "height": 206}
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
# + id="iG9ZOkSMWZ6D" colab_type="code" outputId="bf761fee-8343-4d9c-e297-169dc971fc0b" colab={"base_uri": "https://localhost:8080/", "height": 597}
flag_data.count()
# + id="gMcxnWbkWla1" colab_type="code" outputId="f770a903-f886-4c04-9efe-a66c45c0bb32" colab={"base_uri": "https://localhost:8080/", "height": 597}
flag_data.isna().sum()
# + [markdown] id="AihdUkaDT8We" colab_type="text"
# ### Yes, but what does it *mean*?
#
# This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).
#
# ```
# 1. name: Name of the country concerned
# 2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania
# 3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW
# 4. area: in thousands of square km
# 5. population: in round millions
# 6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others
# 7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others
# 8. bars: Number of vertical bars in the flag
# 9. stripes: Number of horizontal stripes in the flag
# 10. colours: Number of different colours in the flag
# 11. red: 0 if red absent, 1 if red present in the flag
# 12. green: same for green
# 13. blue: same for blue
# 14. gold: same for gold (also yellow)
# 15. white: same for white
# 16. black: same for black
# 17. orange: same for orange (also brown)
# 18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)
# 19. circles: Number of circles in the flag
# 20. crosses: Number of (upright) crosses
# 21. saltires: Number of diagonal crosses
# 22. quarters: Number of quartered sections
# 23. sunstars: Number of sun or star symbols
# 24. crescent: 1 if a crescent moon symbol present, else 0
# 25. triangle: 1 if any triangles present, 0 otherwise
# 26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 0
# 27. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise
# 28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise
# 29. topleft: colour in the top-left corner (moving right to decide tie-breaks)
# 30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)
# ```
#
# Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
# + id="okEjAUHwEZtE" colab_type="code" colab={}
# + [markdown] id="XUgOnmc_0kCL" colab_type="text"
# ## Steps of Loading and Exploring a Dataset:
#
# - Find a dataset that looks interesting
# - Learn what you can about it
# - What's in it?
# - How many rows and columns?
# - What types of variables?
# - Look at the raw contents of the file
# - Load it into your workspace (notebook)
# - Handle any challenges with headers
# - Handle any problems with missing values
# - Then you can start to explore the data
# - Look at the summary statistics
# - Look at counts of different categories
# - Make some plots to look at the distribution of the data
# + [markdown] id="U8gXBy_NX_GY" colab_type="text"
# ## 3 ways of loading a dataset
# + [markdown] id="EcOkhyxZYlN0" colab_type="text"
# ### From its URL
# + id="LGFEA44KYcBi" colab_type="code" colab={}
# + [markdown] id="_3_po1C3Yo_s" colab_type="text"
# ### From a local file
# + id="kuZtM98oYcKX" colab_type="code" colab={}
# + [markdown] id="Kxq5l_9CYrAI" colab_type="text"
# ### Using the `!wget` command
# + id="SZ_Tyt9NYcMr" colab_type="code" colab={}
# + [markdown] id="tmJSfyXJ1x6f" colab_type="text"
# # Part 2 - Deal with Missing Values
# + [markdown] id="bH46YMHEDzpD" colab_type="text"
# ## Diagnose Missing Values
#
# Lets use the Adult Dataset from UCI. <https://github.com/ryanleeallred/datasets>
# + id="5ScSPPDbY_iX" colab_type="code" colab={}
# + [markdown] id="SYK5vXqt7zp1" colab_type="text"
# ## Fill Missing Values
# + id="qzEunYExZAE3" colab_type="code" colab={}
# + [markdown] id="CFnUMJy6Zbjc" colab_type="text"
# # Part 3 - Explore the Dataset:
# + [markdown] id="rQIiPTZfZsqQ" colab_type="text"
# ## Look at "Summary Statistics
# + [markdown] id="SFIqoURnZ5jD" colab_type="text"
# ### Numeric
# + id="dlrSyfb8Z9-n" colab_type="code" colab={}
# + [markdown] id="qkJZBFBTZ7m3" colab_type="text"
# ###Non-Numeric
# + id="6LYDwJ62Z46o" colab_type="code" colab={}
# + [markdown] id="NhrQ0qWlZyzw" colab_type="text"
# ## Look at Categorical Values
# + [markdown] id="aI2oN4kj1uVQ" colab_type="text"
# # Part 4 - Basic Visualizations (using the Pandas Library)
# + [markdown] id="y2NB4XQwbuvB" colab_type="text"
# ## Histogram
# + id="qWIO8zuhArEr" colab_type="code" colab={}
# Pandas Histogram
# + [markdown] id="VBDMJsUQbxE9" colab_type="text"
# ## Density Plot (KDE)
# + id="NyeZPpxRD1BA" colab_type="code" colab={}
# Pandas Density Plot
# + [markdown] id="54unTcvhb0u5" colab_type="text"
# ## Scatter Plot
# + id="zxEajNvjAvfB" colab_type="code" colab={}
# Pandas Scatterplot
|
module2-loadingdata/LS_DS_112_Loading_Data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Top Movies Data Processing
#
# This notebook contains steps to aggregate [top_movies_data.csv](https://github.com/the-pudding/data/tree/master/film-or-digital/top_movies_data.csv) to the ones visualized in The Pudding essay [Film or Digital: Breaking Down Hollywood's Choice of Shooting Medium](https://pudding.cool/2018/08/film-or-digital/).
#import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
from IPython.core.display import display
#load data
dataset = pd.read_csv("top_movies_data.csv")
# +
#exclude genre Animation and Documentary
dataset=dataset[~dataset['genres'].str.contains("Animation|Documentary")]
#exclude movies with unknown medium
dataset=dataset[~dataset["film_type"].str.contains("U")]
# -
#expand film type to new columns
dummies=pd.get_dummies(dataset,columns=["film_type"])
dataset=pd.concat([dataset,dummies[["film_type_D","film_type_D|F","film_type_F"]]],axis=1)
# ------
# ## Prepare some functions to map values/formatting later
#Function to calculate percentage of film/digital/both per aggregation group.
def get_percentage(series):
sumtotal=series.sum()
return series/sumtotal*100
# +
#Function to round percentage breakdown using Largest Remainder Method
#useful when making waffle chart, to make sure the percentage breakdown add up to exactly 100 percent.
#https://stackoverflow.com/questions/13483430/how-to-make-rounded-percentages-add-up-to-100
def largest_remainder(series):
series_pct=get_percentage(series) #calculate percentage breakdown
floored_series=series_pct.apply(np.floor)
remainder_series=series_pct-floored_series
sumtotal=floored_series.sum()
remainder=100-sumtotal
for index,values in remainder_series.sort_values(ascending=False).iteritems():
if remainder>0:
floored_series[index]+=1
remainder-=1
return floored_series
# -
#Function to map year to a three-years period.
def calculate_period(series):
#Categorize production year to period.
#period 1: 2006-2008, period 2: 2009-2012, etc
condition=[
series.between(2006,2008),
series.between(2009,2011),
series.between(2012,2014),
series.between(2015,2017),
]
category=[
"2006 - 2008", "2009 - 2011", "2012 - 2014", "2015 - 2017"
]
return np.select(condition,category)
# ------
# # Compute Mediums of Top Movies by Year
#select relevant columns for this analysis
movie_list=dataset[["production_year","title","film_type_D","film_type_D|F","film_type_F"]]
#group movies medium per year
movie_medium_peryear=movie_list.groupby("production_year").sum()
movie_medium_peryear.columns=["Digital","Both","Film"]
movie_medium_peryear["Total"]=movie_medium_peryear["Digital"]+movie_medium_peryear["Both"]+movie_medium_peryear["Film"]
#round result using Largest Remainder Method
finalresult=movie_medium_peryear[["Digital","Both","Film"]].apply(lambda x:largest_remainder(x), axis=1)
#display result
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1):
display(finalresult)
# ------
# # Compute Medium of Top Movies by Genre and Period
#select relevant columns for this analysis
movie_list=dataset[["production_year","title","genres","film_type_D","film_type_D|F","film_type_F"]]
#Map production year to period.
movie_list["period"]=movie_list[["production_year"]].apply(calculate_period)
movie_list.drop(["production_year"],axis=1,inplace=True)
#compute stats for all genres
movie_medium_allgenre=movie_list.groupby(["period"]).sum()
movie_medium_allgenre.columns=["Digital","Both","Film"]
movie_medium_allgenre=pd.concat([movie_medium_allgenre], keys=['All'], names=['Genres'])
#split movie genres in multiple rows, to use in group by method later
genre_split = movie_list['genres'].str.split('|').apply(pd.Series, 1).stack()
genre_split.index=genre_split.index.droplevel(-1)
genre_split.name = 'genres_split'
movie_list=movie_list.join(genre_split)
# +
#compute stats per genre
movie_medium_bygenre=movie_list.groupby(["genres_split","period"]).sum()
movie_medium_bygenre.columns=["Digital","Both","Film"]
# and combine them with stats across all genres
movie_medium_bygenre=pd.concat([movie_medium_allgenre,movie_medium_bygenre])
# -
#round result using Largest Remainder Method
finalresult=movie_medium_bygenre.apply(lambda x:largest_remainder(x), axis=1)
#display result
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1):
display(finalresult)
# ------
# # Medium of Top Movies, by Budget Range and Period
#select relevant columns for this analysis
movie_list=dataset[["production_year","title","budget","film_type_D","film_type_D|F","film_type_F"]]
# +
#Categorize production year to period
movie_list["period"]=movie_list[["production_year"]].apply(calculate_period)
#and drop production year column
movie_list.drop(["production_year"],axis=1,inplace=True)
# +
#drop movies with unknown budget
movie_list=movie_list[movie_list["budget"]>0]
#Map budget to budget range
condition=[
movie_list['budget']<20000000,
movie_list['budget'].between(20000000, 39999999),
movie_list['budget'].between(40000000, 79999999),
movie_list['budget'].between(80000000, 159999999),
movie_list['budget']>=160000000
]
category=[
"a. <20M","b. 20M-40M","c. 40M-80M","d. 80M-160M","e. >=160M"
]
movie_list["budget_range"]=np.select(condition,category)
#and drop budget column
movie_list.drop(["budget"],axis=1,inplace=True)
# -
#group movie medium per period and budget range
movie_medium_bybudget=movie_list.groupby(["period","budget_range"]).sum()
movie_medium_bybudget.columns=["Digital","Both","Film"]
#round result using Largest Remainder Method
tempresult=movie_medium_bybudget.apply(lambda x:largest_remainder(x), axis=1)
#transform the table to the ones shown in the essay
finalresult=pd.DataFrame()
for medium in ["Digital","Film","Both"]:
for period in movie_list["period"].unique():
finalresult=finalresult.append(tempresult[[medium]].transpose()[period].set_index([[period]],append=True))
finalresult.index.set_names(["medium","period"],inplace=True)
#display result
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1):
display(finalresult)
# ------
# # Medium of Top Movies, by Budget and Period
#select relevant columns for this analysis
movie_list=dataset[["id","production_year","title","budget","film_type"]]
#Categorize production year to period
movie_list["period"]=movie_list[["production_year"]].apply(calculate_period)
#drop movies with unknown budget
movie_list=movie_list[movie_list["budget"]>0]
#rearrange data
movie_list.sort_values(by=["period","production_year","budget"],inplace=True)
movie_list.set_index("period",inplace=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(movie_list)
|
filmordigital/top_movies_data_processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="rRv0FICB8DZi" executionInfo={"status": "ok", "timestamp": 1629217651220, "user_tz": -540, "elapsed": 3872, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}}
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 460} id="MhQfy90o8FKu" executionInfo={"status": "error", "timestamp": 1629217653807, "user_tz": -540, "elapsed": 283, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="a3cba785-edd5-4b83-a570-a4ca0fdd8bc4"
# 데이터 입력
df = pd.read_csv('sample_data/sonar.csv',header=None)
# 데이터 분류
dataset = df.values
X = dataset[:,0:60].astype(float)
Y_obj = dataset[:,60]
# 문자열을 숫자로 변환
e = LabelEncoder()
e.fit(Y_obj)
Y = e.transform(Y_obj)
# + colab={"base_uri": "https://localhost:8080/", "height": 241} id="K4OD0PzF8ID7" executionInfo={"status": "error", "timestamp": 1629217660943, "user_tz": -540, "elapsed": 261, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="4ca828cb-637a-4f86-b727-0855c7467a4d"
# 전체 데이터에서 학습 데이터와 테스트 데이터로 구분
X_train1, X_test, Y_train1, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=True) ## shuffle=True로 하면 데이터를 섞어서 나눔
## 학습 셋에서 학습과 검증 데이터로 구분
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train1, Y_train1, test_size=0.2, shuffle=True) ## shuffle=True로 하면 데이터를 섞어서 나눔
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="ocWzjJiP8J17" executionInfo={"status": "error", "timestamp": 1629217664993, "user_tz": -540, "elapsed": 287, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="abb94776-ee92-4946-f99a-785c781c5e08"
model=tf.keras.models.load_model('sonar_model.h5')
# + colab={"base_uri": "https://localhost:8080/", "height": 276} id="MkUvJ-zy8Kz1" executionInfo={"status": "error", "timestamp": 1629217687611, "user_tz": -540, "elapsed": 266, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="96ec113b-a946-4a48-dec9-5a19422e4419"
# 모델 컴파일
loss=tf.keras.losses.binary_crossentropy
optimizer = tf.keras.optimizers.SGD(lr=0.01)
metrics=tf.keras.metrics.binary_accuracy
model.compile(loss=loss,
optimizer=optimizer,
metrics=[metrics])
## model fit은 histoy를 반환한다. 훈련중의 발생하는 모든 정보를 담고 있는 딕셔너리.
result=model.fit(X_train, Y_train, epochs=50, batch_size=50, validation_data=(X_valid,Y_valid)) # validation_data=(X_valid,Y_valid)을 추가하여 학습시 검증을 해줌.
## result는 딕셔너리이므로 keys()를 통해 출력의 key(카테고리)를 확인하여 무엇을 받고 있는지 확인.
print(result.history.keys())
# + colab={"base_uri": "https://localhost:8080/", "height": 241} id="Up3uwamq8QV7" executionInfo={"status": "error", "timestamp": 1629217704919, "user_tz": -540, "elapsed": 291, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="8c1379ec-c586-47f7-8fe5-b917839e0259"
### history에서 loss와 val_loss의 key를 가지는 값들만 추출
loss = result.history['loss']
val_loss = result.history['val_loss']
### loss와 val_loss를 그래프화
epochs = range(1, len(loss) + 1)
plt.subplot(211) ## 2x1 개의 그래프 중에 1번째
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
### result에서 binary_accuracy와 val_binary_accuracy key를 가지는 값들만 추출
acc = result.history['binary_accuracy']
val_acc = result.history['val_binary_accuracy']
### binary_accuracy와 val_binary_accuracy key를 그래프화
plt.subplot(212) ## 2x1 개의 그래프 중에 2번째
plt.plot(epochs, acc, 'ro', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
## 그래프 띄우기
plt.show()
# + id="O_hROX_x8Ukk"
# model.evalueate를 통해 테스트 데이터로 정확도 확인하기.
## model.evaluate(X_test, Y_test)의 리턴값은 [loss, binary_acuuracy ] -> 위 model.compile에서 metrics=[ keras.metrics.binary_accuracy]옵션을 주어서 binary acuuracy 출력됨.
print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1]))
|
tensorflow/day3/answer/A_03_05_sonar_retrain.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # rank
# ## same
# +
from my_happy_graphviz import pydot
from my_happy_jupyter_utils import (
image_utils
)
G = pydot.Dot(graph_type='digraph')
sub_g = pydot.Subgraph('', rank='same')
sub_g.add_node('A')
sub_g.add_node('X')
G.add_node('C')
G.add_subgraph(sub_g)
sub_g = pydot.Subgraph('', rank='same')
sub_g.add_node('B')
sub_g.add_node('D')
sub_g.add_node('Y')
G.add_subgraph(sub_g)
G.add_edge_str('A', 'B')
G.add_edge_str('A', 'C')
G.add_edge_str('C', 'D')
G.add_edge_str('X', 'Y')
print(G.to_string())
file_name = '/'.join([
'data/output/images',
'0800_0101_rank_same.svg'
])
G.draw(file_name)
image_utils.show_image_with_title_by_url({
'file_path': file_name,
'title': 'rank same',
})
# -
|
examples/0800_0101_rank.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras.layers import Input, Conv2D, Lambda, merge, Dense, Flatten,MaxPooling2D
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.optimizers import SGD,Adam
from keras.losses import binary_crossentropy
import numpy.random as rng
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.utils import shuffle
# %matplotlib inline
def W_init(shape,name=None):
"""Initialize weights as in paper"""
values = rng.normal(loc=0,scale=1e-2,size=shape)
return K.variable(values,name=name)
#//TODO: figure out how to initialize layer biases in keras.
def b_init(shape,name=None):
"""Initialize bias as in paper"""
values=rng.normal(loc=0.5,scale=1e-2,size=shape)
return K.variable(values,name=name)
input_shape = (105, 105, 1)
left_input = Input(input_shape)
right_input = Input(input_shape)
#build convnet to use in each siamese 'leg'
convnet = Sequential()
convnet.add(Conv2D(64,(10,10),activation='relu',input_shape=input_shape,
kernel_initializer=W_init,kernel_regularizer=l2(2e-4)))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(128,(7,7),activation='relu',
kernel_regularizer=l2(2e-4),kernel_initializer=W_init,bias_initializer=b_init))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(128,(4,4),activation='relu',kernel_initializer=W_init,kernel_regularizer=l2(2e-4),bias_initializer=b_init))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(256,(4,4),activation='relu',kernel_initializer=W_init,kernel_regularizer=l2(2e-4),bias_initializer=b_init))
convnet.add(Flatten())
convnet.add(Dense(4096,activation="sigmoid",kernel_regularizer=l2(1e-3),kernel_initializer=W_init,bias_initializer=b_init))
#call the convnet Sequential model on each of the input tensors so params will be shared
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
#layer to merge two encoded inputs with the l1 distance between them
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
#call this layer on list of two input tensors.
L1_distance = L1_layer([encoded_l, encoded_r])
prediction = Dense(1,activation='sigmoid',bias_initializer=b_init)(L1_distance)
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
optimizer = Adam(0.00006)
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
siamese_net.count_params()
# -
# ## Data
# The data is pickled as an N_classes x n_examples x width x height array, and there is an accompanyng dictionary to specify which indexes belong to which languages.
# +
PATH = "/home/soren/Desktop/keras-oneshot" #CHANGE THIS - path where the pickled data is stored
with open(os.path.join(PATH, "train.pickle"), "rb") as f:
(X,c) = pickle.load(f)
with open(os.path.join(PATH, "val.pickle"), "rb") as f:
(Xval,cval) = pickle.load(f)
print("training alphabets")
print(c.keys())
print("validation alphabets:")
print(cval.keys())
# +
class Siamese_Loader:
"""For loading batches and testing tasks to a siamese net"""
def __init__(self, path, data_subsets = ["train", "val"]):
self.data = {}
self.categories = {}
self.info = {}
for name in data_subsets:
file_path = os.path.join(path, name + ".pickle")
print("loading data from {}".format(file_path))
with open(file_path,"rb") as f:
(X,c) = pickle.load(f)
self.data[name] = X
self.categories[name] = c
def get_batch(self,batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
X=self.data[s]
n_classes, n_examples, w, h = X.shape
#randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=False)
#initialize 2 empty arrays for the input image batch
pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]
#initialize vector for the targets, and make one half of it '1's, so 2nd half of batch has same class
targets=np.zeros((batch_size,))
targets[batch_size//2:] = 1
for i in range(batch_size):
category = categories[i]
idx_1 = rng.randint(0, n_examples)
pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 1)
idx_2 = rng.randint(0, n_examples)
#pick images of same class for 1st half, different for 2nd
if i >= batch_size // 2:
category_2 = category
else:
#add a random number to the category modulo n classes to ensure 2nd image has
# ..different category
category_2 = (category + rng.randint(1,n_classes)) % n_classes
pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,1)
return pairs, targets
def generate(self, batch_size, s="train"):
"""a generator for batches, so model.fit_generator can be used. """
while True:
pairs, targets = self.get_batch(batch_size,s)
yield (pairs, targets)
def make_oneshot_task(self,N,s="val",language=None):
"""Create pairs of test image, support set for testing N way one-shot learning. """
X=self.data[s]
n_classes, n_examples, w, h = X.shape
indices = rng.randint(0,n_examples,size=(N,))
if language is not None:
low, high = self.categories[s][language]
if N > high - low:
raise ValueError("This language ({}) has less than {} letters".format(language, N))
categories = rng.choice(range(low,high),size=(N,),replace=False)
else:#if no language specified just pick a bunch of random letters
categories = rng.choice(range(n_classes),size=(N,),replace=False)
true_category = categories[0]
ex1, ex2 = rng.choice(n_examples,replace=False,size=(2,))
test_image = np.asarray([X[true_category,ex1,:,:]]*N).reshape(N, w, h,1)
support_set = X[categories,indices,:,:]
support_set[0,:,:] = X[true_category,ex2]
support_set = support_set.reshape(N, w, h,1)
targets = np.zeros((N,))
targets[0] = 1
targets, test_image, support_set = shuffle(targets, test_image, support_set)
pairs = [test_image,support_set]
return pairs, targets
def test_oneshot(self,model,N,k,s="val",verbose=0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
n_correct = 0
if verbose:
print("Evaluating model on {} random {} way one-shot learning tasks ...".format(k,N))
for i in range(k):
inputs, targets = self.make_oneshot_task(N,s)
probs = model.predict(inputs)
if np.argmax(probs) == np.argmax(targets):
n_correct+=1
percent_correct = (100.0*n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy".format(percent_correct,N))
return percent_correct
def train(self, model, epochs, verbosity):
model.fit_generator(self.generate(batch_size),
)
#Instantiate the class
loader = Siamese_Loader(PATH)
# +
def concat_images(X):
"""Concatenates a bunch of images into a big matrix for plotting purposes."""
nc,h,w,_ = X.shape
X = X.reshape(nc,h,w)
n = np.ceil(np.sqrt(nc)).astype("int8")
img = np.zeros((n*w,n*h))
x = 0
y = 0
for example in range(nc):
img[x*w:(x+1)*w,y*h:(y+1)*h] = X[example]
y += 1
if y >= n:
y = 0
x += 1
return img
def plot_oneshot_task(pairs):
"""Takes a one-shot task given to a siamese net and """
fig,(ax1,ax2) = plt.subplots(2)
ax1.matshow(pairs[0][0].reshape(105,105),cmap='gray')
img = concat_images(pairs[1])
ax1.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax2.matshow(img,cmap='gray')
plt.xticks([])
plt.yticks([])
plt.show()
#example of a one-shot learning task
pairs, targets = loader.make_oneshot_task(20,"train","Japanese_(katakana)")
plot_oneshot_task(pairs)
# +
#Training loop
print("!")
evaluate_every = 1 # interval for evaluating on one-shot tasks
loss_every=50 # interval for printing loss (iterations)
batch_size = 32
n_iter = 90000
N_way = 20 # how many classes for testing one-shot tasks>
n_val = 250 #how mahy one-shot tasks to validate on?
best = 9999
weights_path = os.path.join(PATH, "weights")
print("training")
for i in range(1, n_iter):
(inputs,targets)=loader.get_batch(batch_size)
loss=siamese_net.train_on_batch(inputs,targets)
print(loss)
if i % evaluate_every == 0:
print("evaluating")
val_acc = loader.test_oneshot(siamese_net,N_way,n_val,verbose=True)
if val_acc >= best:
print("saving")
siamese_net.save(weights_path)
best=val_acc
if i % loss_every == 0:
print("iteration {}, training loss: {:.2f},".format(i,loss))
# +
def nearest_neighbour_correct(pairs,targets):
"""returns 1 if nearest neighbour gets the correct answer for a one-shot task
given by (pairs, targets)"""
L2_distances = np.zeros_like(targets)
for i in range(len(targets)):
L2_distances[i] = np.sum(np.sqrt(pairs[0][i]**2 - pairs[1][i]**2))
if np.argmin(L2_distances) == np.argmax(targets):
return 1
return 0
def test_nn_accuracy(N_ways,n_trials,loader):
"""Returns accuracy of one shot """
print("Evaluating nearest neighbour on {} unique {} way one-shot learning tasks ...".format(n_trials,N_ways))
n_right = 0
for i in range(n_trials):
pairs,targets = loader.make_oneshot_task(N_ways,"val")
correct = nearest_neighbour_correct(pairs,targets)
n_right += correct
return 100.0 * n_right / n_trials
ways = np.arange(1, 60, 2)
resume = False
val_accs, train_accs,nn_accs = [], [], []
trials = 450
for N in ways:
val_accs.append(loader.test_oneshot(siamese_net, N,trials, "val", verbose=True))
train_accs.append(loader.test_oneshot(siamese_net, N,trials, "train", verbose=True))
nn_accs.append(test_nn_accuracy(N,trials, loader))
#plot the accuracy vs num categories for each
plt.plot(ways, val_accs, "m")
plt.plot(ways, train_accs, "y")
plt.plot(ways, nn_accs, "c")
plt.plot(ways,100.0/ways,"r")
plt.show()
# +
fig,ax = plt.subplots(1)
ax.plot(ways,val_accs,"m",label="Siamese(val set)")
ax.plot(ways,train_accs,"y",label="Siamese(train set)")
plt.plot(ways,nn_accs,label="Nearest neighbour")
ax.plot(ways,100.0/ways,"g",label="Random guessing")
plt.xlabel("Number of possible classes in one-shot tasks")
plt.ylabel("% Accuracy")
plt.title("Omiglot One-Shot Learning Performance of a Siamese Network")
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
inputs,targets = loader.make_oneshot_task(20,"val")
plt.show()
print(inputs[0].shape)
plot_oneshot_task(inputs)
p=siamese_net.predict(inputs)
print(p)
# +
a=test_nn_accuracy(3,500,loader)
print(a)
# -
|
SiameseNet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
sys.path.append('../../lib')
sys.path.append('../')
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import json
import functools
import time
import string
import datetime as dt
from tqdm import tqdm_notebook as tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import global_utils
random_state_number = 967898
# +
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
get_available_gpus()
# -
# %pylab
# %matplotlib inline
# %load_ext line_profiler
# %load_ext memory_profiler
# %load_ext autoreload
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
color = sns.color_palette()
# # Data
store = pd.HDFStore('../../data_prep/processed/stage1/data_frames.h5')
train_df = store['train_df']
test_df = store['test_df']
display(train_df.head())
display(test_df.head())
corpus_vocab_list, corpus_vocab_wordidx = None, None
with open('../../data_prep/processed/stage1/vocab_words_wordidx.pkl', 'rb') as f:
(corpus_vocab_list, corpus_wordidx) = pickle.load(f)
print(len(corpus_vocab_list), len(corpus_wordidx))
# # Data Prep
# To control the vocabulary pass in updated corpus_wordidx
# +
from sklearn.model_selection import train_test_split
x_train_df, x_val_df = train_test_split(train_df,
test_size=0.10, random_state=random_state_number,
stratify=train_df.Class)
print(x_train_df.shape)
print(x_val_df.shape)
# -
from tensorflow.contrib.keras.python.keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
vocab_size=len(corpus_vocab_list)
# ## T:sent_words
# ### generate data
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "sentences",
"divide_document": "multiple_unit"
}
# %autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_21_T, x_train_21_G, x_train_21_V, x_train_21_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print(np.array(x_train_21_T).shape, x_train_21_T[0])
print(np.array(x_train_21_G).shape, x_train_21_G[0])
print(np.array(x_train_21_V).shape, x_train_21_V[0])
print(np.array(x_train_21_C).shape, x_train_21_C[0])
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_21_T, x_val_21_G, x_val_21_V, x_val_21_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_21_T).shape)
print("gene",np.array(x_val_21_G).shape, x_val_21_G[0])
print("variation",np.array(x_val_21_V).shape, x_val_21_V[0])
print("classes",np.array(x_val_21_C).shape, x_val_21_C[0])
# ### format data
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_SENT_LEN = 60
x_train_21_T = pad_sequences(x_train_21_T, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_21_T = pad_sequences(x_val_21_T, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_21_T.shape, x_val_21_T.shape)
# keras np_utils.to_categorical expects zero index categorical variables
#
# https://github.com/fchollet/keras/issues/570
x_train_21_C = np.array(x_train_21_C) - 1
x_val_21_C = np.array(x_val_21_C) - 1
x_train_21_C = np_utils.to_categorical(np.array(x_train_21_C), 9)
x_val_21_C = np_utils.to_categorical(np.array(x_val_21_C), 9)
print(x_train_21_C.shape, x_val_21_C.shape)
# + [markdown] heading_collapsed=true
# ## T:text_words
# + [markdown] heading_collapsed=true hidden=true
# ### generate data
# + hidden=true
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "text",
"divide_document": "single_unit"
}
# + hidden=true
# %autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_22_T, x_train_22_G, x_train_22_V, x_train_22_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Train data")
print("text",np.array(x_train_22_T).shape)
print("gene",np.array(x_train_22_G).shape, x_train_22_G[0])
print("variation",np.array(x_train_22_V).shape, x_train_22_V[0])
print("classes",np.array(x_train_22_C).shape, x_train_22_C[0])
# + hidden=true
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_22_T, x_val_22_G, x_val_22_V, x_val_22_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Val data")
print("text",np.array(x_val_22_T).shape)
print("gene",np.array(x_val_22_G).shape, x_val_22_G[0])
print("variation",np.array(x_val_22_V).shape, x_val_22_V[0])
print("classes",np.array(x_val_22_C).shape, x_val_22_C[0])
# + [markdown] hidden=true
# ### format data
# + hidden=true
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
# + hidden=true
MAX_TEXT_LEN = 5000
# + hidden=true
x_train_22_T = pad_sequences(x_train_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_22_T = pad_sequences(x_val_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_22_T.shape, x_val_22_T.shape)
# + hidden=true
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_train_22_G = pad_sequences(x_train_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_train_22_V = pad_sequences(x_train_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
x_val_22_G = pad_sequences(x_val_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_val_22_V = pad_sequences(x_val_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_train_22_G.shape, x_train_22_V.shape)
print(x_val_22_G.shape, x_val_22_V.shape)
# + [markdown] hidden=true
# keras np_utils.to_categorical expects zero index categorical variables
#
# https://github.com/fchollet/keras/issues/570
# + hidden=true
x_train_22_C = np.array(x_train_22_C) - 1
x_val_22_C = np.array(x_val_22_C) - 1
# + hidden=true
x_train_22_C = np_utils.to_categorical(np.array(x_train_22_C), 9)
x_val_22_C = np_utils.to_categorical(np.array(x_val_22_C), 9)
print(x_train_22_C.shape, x_val_22_C.shape)
# + [markdown] heading_collapsed=true hidden=true
# ### test Data setup
# + hidden=true
gen_data = global_utils.GenerateDataset(test_df, corpus_wordidx)
x_test_22_T, x_test_22_G, x_test_22_V, _ = gen_data.generate_data(custom_unit_dict,
has_class=False,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Test data")
print("text",np.array(x_test_22_T).shape)
print("gene",np.array(x_test_22_G).shape, x_test_22_G[0])
print("variation",np.array(x_test_22_V).shape, x_test_22_V[0])
# + hidden=true
x_test_22_T = pad_sequences(x_test_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_test_22_T.shape)
# + hidden=true
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_test_22_G = pad_sequences(x_test_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_test_22_V = pad_sequences(x_test_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_test_22_G.shape, x_test_22_V.shape)
# + [markdown] heading_collapsed=true
# ## T:text_chars
# + [markdown] heading_collapsed=true hidden=true
# ### generate data
# + hidden=true
custom_unit_dict = {
"gene_unit" : "raw_chars",
"variation_unit" : "raw_chars",
# text transformed to sentences attribute
"doc_unit" : "raw_chars",
"doc_form" : "text",
"divide_document" : "multiple_unit"
}
# + hidden=true
# %autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_33_T, x_train_33_G, x_train_33_V, x_train_33_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Train data")
print("text",np.array(x_train_33_T).shape, x_train_33_T[0])
print("gene",np.array(x_train_33_G).shape, x_train_33_G[0])
print("variation",np.array(x_train_33_V).shape, x_train_33_V[0])
print("classes",np.array(x_train_33_C).shape, x_train_33_C[0])
# + hidden=true
# %autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_33_T, x_val_33_G, x_val_33_V, x_val_33_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Val data")
print("text",np.array(x_val_33_T).shape, x_val_33_T[98])
print("gene",np.array(x_val_33_G).shape, x_val_33_G[0])
print("variation",np.array(x_val_33_V).shape, x_val_33_V[0])
print("classes",np.array(x_val_33_C).shape, x_val_33_C[0])
# + [markdown] hidden=true
# ### format data
# + hidden=true
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
# + hidden=true
MAX_CHAR_IN_SENT_LEN = 150
# + hidden=true
x_train_33_T = pad_sequences(x_train_33_T, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx,
padding="post",truncating="post")
x_val_33_T = pad_sequences(x_val_33_T, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_33_T.shape, x_val_33_T.shape)
# + hidden=true
x_train_33_G = pad_sequences(x_train_33_G, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_train_33_V = pad_sequences(x_train_33_V, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_val_33_G = pad_sequences(x_val_33_G, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_val_33_V = pad_sequences(x_val_33_V, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
print(x_train_33_G.shape, x_train_33_V.shape)
print(x_val_33_G.shape, x_val_33_V.shape)
# + [markdown] hidden=true
# keras np_utils.to_categorical expects zero index categorical variables
#
# https://github.com/fchollet/keras/issues/570
# + hidden=true
x_train_33_C = np.array(x_train_33_C) - 1
x_val_33_C = np.array(x_val_33_C) - 1
# + hidden=true
x_train_33_C = np_utils.to_categorical(np.array(x_train_33_C), 9)
x_val_33_C = np_utils.to_categorical(np.array(x_val_33_C), 9)
print(x_train_33_C.shape, x_val_33_C.shape)
# + [markdown] heading_collapsed=true
# ## T:text_sent_words
# + [markdown] hidden=true
# ### generate data
# + hidden=true
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "word_list",
"doc_form" : "text",
"divide_document" : "single_unit"
}
# + hidden=true
# %autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_34_T, x_train_34_G, x_train_34_V, x_train_34_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Train data")
print("text",np.array(x_train_34_T).shape, x_train_34_T[0][:1])
print("gene",np.array(x_train_34_G).shape, x_train_34_G[0])
print("variation",np.array(x_train_34_V).shape, x_train_34_V[0])
print("classes",np.array(x_train_34_C).shape, x_train_34_C[0])
# + hidden=true
# %autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_34_T, x_val_34_G, x_val_34_V, x_val_34_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
# + hidden=true
print("Val data")
print("text",np.array(x_val_34_T).shape, x_val_34_T[98][:1])
print("gene",np.array(x_val_34_G).shape, x_val_34_G[0])
print("variation",np.array(x_val_34_V).shape, x_val_34_V[0])
print("classes",np.array(x_val_34_C).shape, x_val_34_C[0])
# + [markdown] hidden=true
# ### format data
# + hidden=true
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
# + hidden=true
MAX_DOC_LEN = 500 # no of sentences in a document
MAX_SENT_LEN = 80 # no of words in a sentence
# + hidden=true
for doc_i, doc in enumerate(x_train_34_T):
x_train_34_T[doc_i] = x_train_34_T[doc_i][:MAX_DOC_LEN]
# padding sentences
if len(x_train_34_T[doc_i]) < MAX_DOC_LEN:
for not_used_i in range(0,MAX_DOC_LEN - len(x_train_34_T[doc_i])):
x_train_34_T[doc_i].append([word_unknown_tag_idx]*MAX_SENT_LEN)
# padding words
x_train_34_T[doc_i] = pad_sequences(x_train_34_T[doc_i], maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
for doc_i, doc in enumerate(x_val_34_T):
x_val_34_T[doc_i] = x_val_34_T[doc_i][:MAX_DOC_LEN]
# padding sentences
if len(x_val_34_T[doc_i]) < MAX_DOC_LEN:
for not_used_i in range(0,MAX_DOC_LEN - len(x_val_34_T[doc_i])):
x_val_34_T[doc_i].append([word_unknown_tag_idx]*MAX_SENT_LEN)
# padding words
x_val_34_T[doc_i] = pad_sequences(x_val_34_T[doc_i], maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_train_34_T = np.array(x_train_34_T)
x_val_34_T = np.array(x_val_34_T)
# + hidden=true
print(x_val_34_T.shape, x_train_34_T.shape)
# + hidden=true
x_train_34_G = pad_sequences(x_train_34_G, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_train_34_V = pad_sequences(x_train_34_V, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_val_34_G = pad_sequences(x_val_34_G, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_val_34_V = pad_sequences(x_val_34_V, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
print(x_train_34_G.shape, x_train_34_V.shape)
print(x_val_34_G.shape, x_val_34_V.shape)
# + [markdown] hidden=true
# keras np_utils.to_categorical expects zero index categorical variables
#
# https://github.com/fchollet/keras/issues/570
# + hidden=true
x_train_34_C = np.array(x_train_34_C) - 1
x_val_34_C = np.array(x_val_34_C) - 1
# + hidden=true
x_train_34_C = np_utils.to_categorical(np.array(x_train_34_C), 9)
x_val_34_C = np_utils.to_categorical(np.array(x_val_34_C), 9)
print(x_train_34_C.shape, x_val_34_C.shape)
# + [markdown] hidden=true
# Need to form 3 dimensional target data for rationale model training
# + hidden=true
temp = (x_train_34_C.shape[0],1,x_train_34_C.shape[1])
x_train_34_C_sent = np.repeat(x_train_34_C.reshape(temp[0],temp[1],temp[2]), MAX_DOC_LEN, axis=1)
#sentence test targets
temp = (x_val_34_C.shape[0],1,x_val_34_C.shape[1])
x_val_34_C_sent = np.repeat(x_val_34_C.reshape(temp[0],temp[1],temp[2]), MAX_DOC_LEN, axis=1)
print(x_train_34_C_sent.shape, x_val_34_C_sent.shape)
# -
# ## Embedding layer
# ### for words
WORD_EMB_SIZE = 200
# %autoreload
import global_utils
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_200d_50e.vec"
trained_embeddings1 = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE, corpus_vocab_list)
trained_embeddings1.shape
# %autoreload
import global_utils
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_200d_20e.vec"
trained_embeddings2 = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE, corpus_vocab_list)
trained_embeddings2.shape
# + [markdown] heading_collapsed=true
# ### for characters
# + hidden=true
CHAR_EMB_SIZE = 64
# + hidden=true
char_embeddings = np.random.randn(global_utils.CHAR_ALPHABETS_LEN, CHAR_EMB_SIZE)
char_embeddings.shape
# -
# # Models
# ## prep
# +
# %autoreload
import tensorflow.contrib.keras as keras
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer, InputSpec, InputLayer
from keras.models import Model, Sequential
from keras.layers import Dropout, Embedding, concatenate
from keras.layers import Conv1D, MaxPool1D, Conv2D, MaxPool2D, ZeroPadding1D
from keras.layers import Dense, Input, Flatten, BatchNormalization
from keras.layers import Concatenate, Dot, Merge, Multiply, RepeatVector
from keras.layers import Bidirectional, TimeDistributed
from keras.layers import SimpleRNN, LSTM, GRU, Lambda, Permute
from keras.layers.core import Reshape, Activation
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard
from keras.constraints import maxnorm
from keras.regularizers import l2
from paper_2_cnn_modelling_sentences.utils import KMaxPooling, Folding
# -
# ## model_1: paper
# refer https://github.com/bwallace/rationale-CNN
# +
text_seq_input = Input(shape=(MAX_SENT_LEN,), dtype='int32')
text_embedding1 = Embedding(vocab_size, WORD_EMB_SIZE, input_length=MAX_SENT_LEN,
weights=[trained_embeddings1], trainable=True)(text_seq_input)
text_embedding2 = Embedding(vocab_size, WORD_EMB_SIZE, input_length=MAX_SENT_LEN,
weights=[trained_embeddings2], trainable=True)(text_seq_input)
k_top = 4
filter_sizes = [3,5]
layer_1 = []
for text_embedding in [text_embedding1, text_embedding2]:
conv_pools = []
for filter_size in filter_sizes:
l_zero = ZeroPadding1D((filter_size-1,filter_size-1))(text_embedding)
l_conv = Conv1D(filters=128, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = KMaxPooling(k=30, axis=1)(l_conv)
conv_pools.append((filter_size,l_pool))
layer_1.append(conv_pools)
last_layer = []
for layer in layer_1: # no of embeddings used
for (filter_size, input_feature_maps) in layer:
l_zero = ZeroPadding1D((filter_size-1,filter_size-1))(input_feature_maps)
l_conv = Conv1D(filters=128, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = KMaxPooling(k=k_top, axis=1)(l_conv)
last_layer.append(l_pool)
l_merge = Concatenate(axis=1)(last_layer)
l_flat = Flatten()(l_merge)
l_dense = Dense(128, activation='relu')(l_flat)
l_out = Dense(9, activation='softmax')(l_dense)
model_1 = Model(inputs=[text_seq_input], outputs=l_out)
# -
# #### training
model_1.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['categorical_accuracy'])
model_1.summary()
# %rm -rf ./tb_graphs/*
tb_callback = keras.callbacks.TensorBoard(log_dir='./tb_graphs', histogram_freq=0, write_graph=True, write_images=True)
checkpointer = ModelCheckpoint(filepath="model_1_weights.hdf5",
verbose=1,
monitor="val_categorical_accuracy",
save_best_only=True,
mode="max")
with tf.Session() as sess:
# model = keras.models.load_model('current_model.h5')
sess.run(tf.global_variables_initializer())
try:
model_1.load_weights("model_1_weights.hdf5")
except IOError as ioe:
print("no checkpoints available !")
model_1.fit(x_train_21_T, x_train_21_C,
validation_data=(x_val_21_T, x_val_21_C),
epochs=5, batch_size=1024, shuffle=True,
callbacks=[tb_callback,checkpointer])
#model.save('current_sent_model.h5')
|
deep_models/paper_06_mvcnn/models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 離群值處理
# #### Detect
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
data_path = './data/house_train.csv'
df_train = pd.read_csv(data_path)
ori_series = df_train['土地移轉總面積(平方公尺)']
display(ori_series)
print(len(ori_series))
# +
qt1 = ori_series.quantile(q=0.25)
qt3 = ori_series.quantile(q=0.75)
iqr = qt3-qt1
display(ori_series[ori_series<(qt1 - 1.5*iqr)])
display(ori_series[ori_series>(qt3 + 1.5*iqr)])
plt.boxplot(ori_series)
plt.show()
# -
# #### drop outliers
mask = (ori_series>(qt1 - 1.5*iqr)).tolist() and (ori_series<(qt3 + 1.5*iqr)).tolist()
series_drop_outliers = ori_series[mask]
print(len(series_drop_outliers))
plt.boxplot(series_drop_outliers)
plt.show()
# #### fill outliers
series_fill = ori_series.copy()
series_fill[series_fill>(qt3 + 1.5*iqr)] = qt3 + 1.5*iqr
print(len(series_fill))
plt.boxplot(series_fill)
plt.show()
# # Normalize
# #### MinMaxScaler
# +
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import numpy as np
X = np.array([[ 1., -1., 2.],
[ 2., 0., 0.],
[ 0., 1., -1.]])
print(f"min of X: {X.min(axis=0)}")
print(f"max of X: {X.max(axis=0)}\n")
min_max_scaler = MinMaxScaler().fit(X)
x_minmax_sk = min_max_scaler.transform(X)
print(x_minmax_sk)
# -
# #### StandardScaler
# +
X = np.array([[ 1., -1., 2.],
[ 2., 0., 0.],
[ 0., 1., -1.]])
print(f"mean of X: {X.mean(axis=0)}")
print(f"std of X: {X.std(axis=0)}\n")
scaler = StandardScaler().fit(X)
# apply mean and std to standardize data
x_sc_sk = scaler.transform(X)
print(f"mean of scaler: {scaler.mean_}")
print(f"std of scaler: {scaler.scale_}\n")
print(x_sc_sk)
# -
# # 資料合併
# #### pd.concat()
# +
df1 = pd.DataFrame([['A1','B1'], ['A2','B2']],
columns=['A', 'B'],
index=[1,2])
df2 = pd.DataFrame([['A3','B3'], ['A4','B4']],
columns=['A', 'B'],
index=[1,2])
df3 = pd.DataFrame([['B5','C5'], ['B6','C6']],
columns=['B', 'C'],
index=[5,6])
df4 = pd.concat([df1, df2])
df5 = pd.concat([df1, df2],axis = 1)
df6 = pd.concat([df1, df3])
display(df1)
display(df2)
display(df3)
display(df4)
display(df5)
display(df6)
# -
# #### pd.merge()
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
'D': ['D2', 'D10000', 'D6', 'D7'],
'F': ['F2', 'F3', 'F6', 'F7']},
index=[2, 3, 6, 7])
display(df1)
display(df2)
result_inner = pd.merge(df1, df2, how='inner')
result_inner_B = pd.merge(df1, df2, how='inner', on = 'B')
result_outer = pd.merge(df1, df2, how='outer')
display(result_inner)
display(result_inner_B)
display(result_outer)
# #### groupby()
df_train.groupby('鄉鎮市區').mean()
# # 補充:
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df_melt = df1.melt()
display(df_melt)
df_pivot=df_melt.set_index(pd.Index([0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]))
df_pivot=df_pivot.pivot(columns='variable')
display(df_pivot)
|
.ipynb_checkpoints/mod04-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Passing a function as an argument to another function
l_factorial = lambda n: 1 if n == 0 else n*l_factorial(n-1)
# ## Timing
#
# ### The procedural way, going line by line
#
# Factorial is a recursive and hence time-consuming operation. Let's see how long it takes.
# +
import time
t0 = time.time()
l_factorial(1000)
t1 = time.time()
print('Took: %.15f s' % (t1-t0))
# -
# ### The functional way, with a wrapper function
#
# But a better way is to write a wrapper function that times every function that's passed onto it!
# +
def timer(fnc, arg):
t0 = time.time()
fnc(arg)
t1 = time.time()
return t1-t0
print('Took: %.5f s' % timer(l_factorial, 900))
# -
# ### The fully functional way, with lambda wrapper functions
#
# We can even turn `timer()` into a lambda function, although it takes a pretty functional state of mind to do so!
# +
l_timestamp = lambda fnc, arg: (time.time(), fnc(arg), time.time())
l_diff = lambda t0, retval, t1: t1-t0
l_timer = lambda fnc, arg: l_diff(*l_timestamp(fnc, arg))
print('Took: %.5f s' % l_timer(l_factorial, 900))
|
Functional_Thinking/Lab/26A-High_Order_Function.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Readme
#
# ---
#
# **Advanced Lane Finding Project**
#
# The goals / steps of this project are the following:
#
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * Use color transforms, gradients, etc., to create a thresholded binary image.
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
#
# [//]: # (Image References)
#
#
# ## [Rubric](https://review.udacity.com/#!/rubrics/571/view) Points
#
# ### Here I will consider the rubric points individually and describe how I addressed each point in my implementation.
#
# ---
#
# ### README
#
# All the codes are in Advanced_Lane_Line.ipynb.
#
# ## 1. Camera calibration
#
# #### 1.1 Get the images for calibation and test
#
# In this step, I read the chess board images in camera_cal folder to prepare for camera calibration.
#
# #### 1.2 Compute calibration
#
# I used the images and opencv function, mainly 'cv2.findChessboardCorners()' and 'cv2.calibrateCamera()' function to get the matrix used for calibration.
#
# ## 2. Undistortion
#
# Here, I test the chess board image and image in test_images folder.
# The following image is the test image after undistorted.
#
# 
#
# 
#
#
# ## 3. Color/gradient threshold
#
# There are some features of lane line that can be used for lane line detection.
# Here, I combined the color infomation and gradient to distiguish it from background.
# To reduce the light etc. effect, I used HLS color space and select the s channel.
#
# 
#
# ## 4. Perspective transform
#
# To get a bird view, I select the following source points and destination points to get the perspective transform matix.
#
# src points are [718,468],[1046,684],[248,684],[568,468]
#
# dst points are [920,0], [920,720], [220,720],[220,0]
#
# ## 5.Find the lines
#
# Function 'fit_line()' are defined to find the line. In it, the histogram infomation for bird-view image are used, and also it can be as the base to for silde window search.
#
# 
#
# ## 6. Calculate Curvature
#
# After the line is find, use second order polynomial curve to fit the lane line, and the radius of the lane line (pixel space and real world space) can be calcualted.
#
# ## 7. Process Frame
#
# In this step, a function that contained the methods in previous steps are defined, which used in the video processing.
#
# 
#
# The output video is 'test_videos_output/advanced_lane_line.mp4'
#
|
.ipynb_checkpoints/Writeup-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-pNWScUokvZ8"
# # Challenge: Analizando títulos de Netflix
# + [markdown] id="04ukMAXTkwVg"
# ¡Felicidades! Después de un largo pero divertido proceso de reclutamiento, has sido contratado como Analytic Engineer Jr. en Netflix dentro del equipo remoto de Marketing Data Science 🤓
#
# Tus primeras tareas consisten en explorar la base de datos, obteniendo información relevante para plantearte preguntas más complejas próximamente.
#
# En este ejercicio, te conectarás a una base de datos donde se alojan 100 títulos publicados en Netflix seleccionados aleatoriamente. ¡Esa muestra debería bastar para demostrar tus habilidades! 😉
# + [markdown] id="VEob4ygnwQrB"
# ## Preparación de los datos
#
# Antes de comenzar, jugarás a ser el ingeniero o ingeniera de datos que formó la base de datos en primer lugar. Sigue estos pasos para crear una nueva base de datos e importar un CSV en una tabla nueva.
# + [markdown] id="0rKp_z6t701h"
# 1. Conéctate a MySQL Server: abre MySQL Workbench y haz clic en la conexión a tu servidor local. Ingresa tu usuario y contraseña.
#
# ¿Aún no tienes hecha tu conexión? Averigua cómo hacerla [aquí](https://dev.mysql.com/doc/workbench/en/wb-mysql-connections-new.html)
# + [markdown] id="aDweLGvx8tnk"
# 2. Una vez conectado, acceda al menú de schemas del lado izquierdo:
#
# 
# + [markdown] id="0aIjj9muAAjq"
# 3. Una vez en la sección de Schemas, has clic en un espacio en blanco y selecciona 'Create Schema...'
#
# 
#
# 4. Del lado derecho verás algo como lo siguiente. Allí, deberás ingresar el nombre 'master' como el nombre de tu Schema.
# 
#
# 5. Sigue los pasos de la ventana que aparecerá hasta que veas la siguiente pantalla:
# 
#
# 6. Verás un nuevo ícono en Schemas con el nombre de tu schema, haz clic derecho sobre él y selecciona "Table Data Import Wizard"
# 
#
# 7. Ha llegado la hora de descargar el siguiente [archivo](https://drive.google.com/file/d/1QiyF1i0AlhbbijNh-7y9ecL07RYYE442/view?usp=sharing) en tu computadora. Una vez que lo hayas hecho, haz clic en "Browse" y selecciónalo.
# 
#
# 8. En esta pantalla solo haz clic en "Next"
# 
#
# 9. En esta pantalla, verifica que la pequeña muestra de la tabla se muestre correctamente y haz clic en "Next"
# 
#
# 10. Finalmente, clic en "Next" a esta pantalla y espera a que se importen los datos. Al terminar haz clic en "Finish".
# 
#
# 11. Ahora, en el editor de scripts del lado derecho, escribe:
#
# ```
# use master;
# show tables;
# ```
#
# Deberías ver el siguiente resultado:
#
# 
# + [markdown] id="nE4naQiA3wSW"
# ## Ahora sí, a explorar esa base de datos 🤠
# + [markdown] id="UyJcYPiikwj5"
# 1. Selecciona las columnas type, title, director y country. Devuélvelas en una misma tabla.
#
# Resultado esperado:
#
# 
# + [markdown] id="IFr6CSee-KZO"
# 2. En la columna 'type' parece haber dos categorías 'Movies' y 'TV Shows'. ¿Cuántos títulos tenemos de cada tipo?
#
# Resultado esperado:
#
# 
# + [markdown] id="6cCfUJU--K28"
# 3. ¿Cuál es el país con la película más reciente en la base de datos (año de lanzamiento más reciente)?
#
# Resultado esperado:
#
# 
# + [markdown] id="V-Z2JuYt-Lbt"
# 4. ¿Cuál es el título y el director de la película más antigua en la base de datos?
#
# Resultado esperado:
#
# 
# + [markdown] id="msDEW7rVzfas"
# 5. ¿Cuáles series tiene solo 1 temporada (1 Season)? Queremos conocer solo el título, cast y rating.
#
# Resultado esperado:
#
# 
# + [markdown] id="NoNeaLK-zf-c"
# 6. Ha sido un día largo y queremos ver algo que nos haga reír. ¿Acaso habrá algún título que solo esté en la categoría 'Comedies' y nada más?
#
# Resultado esperado:
#
# 
# + [markdown] id="3i405lryzgM-"
# 7. Juguemos a ser un sistema de recomendación: A nuestro usuario le encantan las películas, pero solo las más recientes. Así que debes obtener el título, duración, descripción y año de lanzamiento, pero solo de las primeras 30 películas más recientes (debes ordenar de mayor a menor tus resultados).
#
# Tip: Para obtener solo 30 resultados, puedes agregar `LIMIT 30` al final de tu consulta.
#
# Resultado esperado:
#
# 
|
0. Herramientas para la Ciencia de Datos/5. Intro a SQL/Challenge.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Taylor problem 5.50
#
# last revised: 21-Jan-2019 by <NAME> [<EMAIL>]
#
# Here we are exploring the Fourier series for a waveform defined to be odd about the origin, so $f(-t) = -f(t)$, with period $\tau$. That means that the integrand for the $a_m$ coefficients is odd and so all of the corresponding integrals vanish.
#
# The particular wave of interest here is a sawtooth, such that in the interval $-\tau/2 \leq t \leq \tau/2$, the function takes the form:
#
# $\newcommand{\fmax}{f_{\textrm{max}}}$
# $\begin{align}
# f(t) = \left\{ \begin{array}{ll}
# \fmax(t/\tau) & t < 0 \\
# \fmax(t/\tau) & t > 0
# \end{array}
# \right.
# \end{align}$
#
# (we wrote it this way so it looks like the function for problem 5.49).
#
#
# As already note, the $a_m$ coefficients are zero, so we only calculate the $b_m$ coefficients. Here $\omega \equiv 2\pi/\tau$. The result is:
#
#
# $\begin{align}
# b_m = \frac{2}{\tau} \int_{-\tau/2}^{\tau/2} \sin(m\omega t) f(t)\, dt =
# % 2 \fmax \int_0^1 \sin(m\pi t) t\, dt
# % &= - \frac{2\fmax}{(m\pi)^2)}\left[\sin(m\pi t)\right]^1_0 \\
# % =
# \left\{
# \begin{array}{ll}
# -\frac{ \fmax}{m\pi} & [m\ \mbox{even}] \\
# \frac{ \fmax}{m\pi} & [m\ \mbox{odd}]
# \end{array}
# \right.
# \end{align}$
#
# Note that the coefficients are independent of $\tau$. Is this a general result?
# ## Define the functions we'll need
import numpy as np
import sys
import matplotlib.pyplot as plt
from scipy.integrate import quad
# We start by defining a function for the sawtooth wave at any $t$. The definition here is for a scalar function. That is, it won't work to call it with $t$ and array of time points, unlike other functions we have defined. It is possible to make it work, but then the function will be much less clear. When we need to evaluate it for all elements of an array, we will use the construction: `np.array([sawtooth(t) for t in t_pts])` for the array `t_pts`.
def sawtooth(t, tau, f_max=1):
"""Returns the sawtooth wave of amplitude f_max and odd about the
origin at time t. The period is tau. It is defined as a scalar
function (i.e., only one value of t can be passed at a time).
"""
if np.floor(t) % 2 == 0:
t_adjust = t - np.floor(t + 1/2)
return t_adjust / tau
else:
t_adjust = t - (np.floor(t) - 1)
return t_adjust / tau
# Now a function that creates an array of Fourier coefficients for the sawtooth wave up to order N_max.
def sawtooth_coeffs_by_hand(N_max, tau=2., f_max=1.):
"""Fourier coefficients calculated by hand and loaded into an array.
Note that these are independent of tau, but we pass it for
consistency with other functions.
"""
coeffs_array = [(0., 0.)] # a_0 and b_0
for n in np.arange(1, N_max, 1):
#if (n % 2) == 0: # for even n
# b_n = -f_max * (n * np.pi)
#else: # for odd n
# b_n = f_max * (n * np.pi)
a_n = 4*f_max*(n*np.pi)**2
b_n = 0
coeffs_array.append((a_n, b_n))
return np.array(coeffs_array) # convert to a numpy array
# We would like a general way to construct the away of Fourier coefficients given any periodic function. Our first pass at that uses a class definition and the scipy integration function quad.
class FourierSeries():
"""
Fourier series class finds the coefficients in a Fourier series with
period tau up to a specified order.
Assume these imports:
from scipy.integrate import quad
import numpy as np
"""
def __init__(self,
function,
tau=2,
N_max=10
):
self.function = function
self.tau = tau
self.omega = 2. * np.pi / tau
self.N_max = N_max
try:
N_max
except N_max < 0:
sys.exit(1)
# add something to quit if N_max < 0 or not an integer (try and except)
def a0_calc(self):
"""Calculate the constant Fourier coefficient by integration"""
answer, error = quad(self.function, -tau/2., tau/2., args=(tau,))
return (1./self.tau) * answer
def an_integrand(self, t, n):
"""Integrand for the nth cosine coefficient"""
return self.function(t, tau) * np.cos(n * self.omega * t)
def an_calc(self, n):
"""Calculate the nth cosine coefficient (n > 0)"""
# note comma after n in args
answer, error = quad(self.an_integrand, -tau/2., tau/2., args=(n,))
return (2./self.tau) * answer
def bn_integrand(self, t, n):
"""Integrand for the nth cosine coefficient"""
return self.function(t, tau) * np.sin(n * self.omega * t)
def bn_calc(self, n):
"""Calculate the nth cosine coefficient (n > 0)"""
answer, error = quad(self.bn_integrand, -tau/2., tau/2., args=(n,))
return (2./self.tau) * answer
def coeffs_upto_Nmax(self):
"""Calculate the Fourier series up to Nmax"""
# first generate the coefficient
coeffs_array = [(self.a0_calc(), 0)] # a_0 and b_0
for n in np.arange(1, N_max, 1):
a_n = self.an_calc(n)
b_n = self.bn_calc(n)
coeffs_array.append((a_n, b_n)) # append a tuple of coefficients
return np.array(coeffs_array) # convert to a numpy array
# Finally, we need a function that can take as input an array of t values and an array of Fourier coefficients and return the function at those t values with terms up to order N_max.
# +
def Fourier_reconstruct(t_pts, coeffs_array, tau, N_max):
"""Sum up the Fourier series up to n = N_max terms."""
omega = 2. * np.pi / tau
result = 0.
# iterate over coefficients but only up to N_max
for n, (a,b) in enumerate(coeffs_array[:N_max+1]):
result = result + a * np.cos(n * omega * t_pts) \
+ b * np.sin(n * omega * t_pts)
return result
# -
# ## Problem 5.50
#
# Ok, now we can do problem 5.49. Calculate the coefficients both ways.
# +
N_max = 20
tau = 2.
f_max = 1.
coeffs_by_hand = sawtooth_coeffs_by_hand(N_max, tau, f_max)
fs = FourierSeries(sawtooth, tau, N_max)
coeffs_by_quad = fs.coeffs_upto_Nmax()
# -
# Let's check that the exact and numerical calculation of the coefficients agree.
#
# (Note the space in the formats, e.g., `{a1: .6f}`. This means to leave an extra space for a positive number so that it aligns at the decimal point with negative numbers.)
print(' n a_exact a_quad b_exact b_quad')
for n, ((a1,b1), (a2,b2)) in enumerate(zip(coeffs_by_hand,
coeffs_by_quad)):
print(f'{n:2d} {a1: .6f} {a2: .6f} {b1: .6f} {b2: .6f}')
# Make the comparison plot requested: N_max = 2 vs. N_max = 6.
# +
t_pts = np.arange(-2., 6., .01)
f_pts_2 = Fourier_reconstruct(t_pts, coeffs_by_quad, tau, 2)
f_pts_6 = Fourier_reconstruct(t_pts, coeffs_by_quad, tau, 6)
# Python way to evaluate the sawtooth function at an array of points:
# * np.array creates a numpy array;
# * note the []s around the inner statement;
# * sawtooth(t) for t in t_pts
# means step through each element of t_pts, call it t, and
# evaluate sawtooth at that t.
# * This is called a list comprehension. There are more compact ways,
# but this is clear and easy to debug.
sawtooth_t_pts = np.array([sawtooth(t, tau, f_max) for t in t_pts])
# +
fig_1 = plt.figure(figsize=(10,5))
ax_1 = fig_1.add_subplot(1,2,1)
ax_1.plot(t_pts, f_pts_2, label='N = 2', color='blue')
ax_1.plot(t_pts, sawtooth_t_pts, label='exact', color='red')
ax_1.set_xlim(-1.1,4.1)
ax_1.set_xlabel('t')
ax_1.set_ylabel('f(t)')
ax_1.set_title('N = 2')
ax_1.legend()
ax_2 = fig_1.add_subplot(1,2,2)
ax_2.plot(t_pts, f_pts_6, label='N = 6', color='blue')
ax_2.plot(t_pts, sawtooth_t_pts, label='exact', color='red')
ax_2.set_xlim(-1.1,4.1)
ax_2.set_xlabel('t')
ax_2.set_ylabel('f(t)')
ax_2.set_title('N = 6')
ax_2.legend();
fig_1.tight_layout()
fig_1.savefig('problem_5.50.png')
# -
|
2020_week_2/Taylor_problem_5.50_CDLCopy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementing Adaline Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# # Loading Iris Data set
column = ['sepal length', 'sepal width', 'petal length', 'petal width', 'class']
df = pd.read_csv('iris.data', names = column, header = None)
df.info()
# For easiness of using data we will select 2 features, one is sepal length and one is petal length and 2 classes only
X = df.iloc[:100,[0,2]].values
y = df.iloc[:100,-1].values
np.unique(y)
y = np.where(y == 'Iris-setosa',1,-1)
# # Model
# Adaline is a improvement on perceptron, it uses activation function to update weight rather than the threshold function, Adaline improves over unit function in a way that it uses cost function which is differentiable and optimizable because the cost function use is convex.
# +
class Adaline:
def __init__(self, epoch = 50, learning_rate = 0.001, random_state = 42):
self.epoch = epoch
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self,X,y):
random = np.random.RandomState(self.random_state)
self.w_ = random.normal(loc = 0.0, scale = 0.001, size = 1 + X.shape[1])
self.costs_ = [] #it will store cost function for every epoch
for _ in range(self.epoch):
net_input = self.net_input(X)
output = self.activation(net_input) #in that case our activation function will be an identity function
errors = (y - output)
self.w_[1:] += self.learning_rate * X.T.dot(errors)
self.w_[0] += self.learning_rate * errors.sum()
cost = (errors ** 2).sum() / 2.0
self.costs_.append(cost)
return self
def net_input(self,X):
return np.dot(X,self.w_[1:]) + self.w_[0]
def predict(self,X):
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
def activation(self,X):
return X #identity function f(x) = x
# -
# # Training our model
# we will use two learning rate 0.01, and 0.0001, learning rate finding is quite experimental.
# +
fig, ax = plt.subplots(nrows = 1, ncols = 2 , figsize = (10,4))
ada1 = Adaline(learning_rate=0.01, epoch = 10).fit(X,y)
ax[0].plot(range(1,len(ada1.costs_)+1), np.log10(ada1.costs_), marker = 'o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum of square Errors)')
ax[0].set_title('Adaline - Learning rate 0.01')
ada2 = Adaline(learning_rate = 0.0001, epoch = 10).fit(X,y)
ax[1].plot(range(1,len(ada2.costs_)+1), np.log10(ada2.costs_), marker = 'o')
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('log(sum of square Errors)')
ax[1].set_title('Adaline - Learning rate 0.0001')
# -
# you can observe that in learning_rate 0.01 our errors become large, that is because we overshoot our global minimum values that's why learning_rate should be carefully choosen
# # Decision Plot
# +
def decision_plot(X, y, classifier, resolution = 0.02):
color = ('red','green','blue','sky','violet','pink')
markers = ('^','v','s','o','*')
cmap = ListedColormap(color[:len(np.unique(y))])
x1_min, x1_max = X[:,0].min() - 1, X[:,0].max() + 1
x2_min, x2_max = X[:,1].min() - 1, X[:,1].max() + 1
xx1, xx2 = np.meshgrid(
np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution)
)
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, cmap = cmap, alpha = 0.7)
for ix , cl in enumerate(np.unique(y)):
plt.scatter(x = X[y==cl,0], y = X[y==cl,1], label = cl, edgecolor = 'black', marker = markers[ix], color = color[ix])
plt.legend(loc = 'upper left')
# +
plt.subplot(1,2,1)
decision_plot(X,y,ada1)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Adaline - Learning rate 0.01')
plt.subplot(1,2,2)
decision_plot(X,y,ada2)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Adaline - Learning rate 0.0001')
# -
# you can observe how bad the model1 works it was not able to classify our data set at all in its classes
#
ada3 = Adaline(epoch=50, learning_rate=0.0001).fit(X,y)
decision_plot(X,y, ada3)
# As you can conclude working on learning rate and epoch help in making our model perform better on the data
# # Feature Scaling
# Feature Scaling help Gradient Descent model to converge quickly. we will use Standard Scaler Scaling here
#
# X_new = X - mean / std
# +
X_new = np.copy(X)
X_new[:,0] = X[:,0] - X[:,0].mean() / X[:,0].std()
X_new[:,1] = X[:,1] - X[:,1].mean() / X[:,1].std()
# -
ada_scale_1 = Adaline(learning_rate=0.001, epoch= 100).fit(X_new,y)
ada_scale_2 = Adaline(learning_rate=0.0001, epoch = 10).fit(X_new,y)
# # Decision Boundary Plot
# +
plt.subplot(1,2,1)
decision_plot(X_new,y,ada_scale_1)
plt.subplot(1,2,2)
decision_plot(X_new, y, ada_scale_2)
# -
# # Stochastic Gradient Descent
# If our data set is too large doing <b>Batch Gradient Descent</b> will be costly. A popular alternative is <b>Stochastic Gradient Descent algorithm</b>, also called as <b><i>iterative or online gradient descent</i></b>. Instead of updating the weights based on the sum of the accumulated errors over all samples :
#
# learning_rate * (y(i) - phi(z(i))) x(i)
#
# Stochastic Gradient Descent reaches convergence much faster because of the more frequent weight update. Since each gradient is calculated based on a single training example, the error surface is noisier than in gradient descent, which can also have the advantage of stochastic gradient descent can escape shallow local minima more readily if we are working with nonlinear cost functions.
#
# ** To obtain satisfying results via stochastic gradient descent, it is important to prescent it training data in a random order, also, we want to shuffle the training set for every epoch to prevent cycles.
#
# Another advantage of stochastic gradient descent is that we can use it for <b>online learning.</b> In online learning, our model is trained on the fly as new training data arrives.
# # Building Adaline Model with Stochastic Gradient Descent
class AdalineSGD(object):
def __init__(self, eta = 0.01, n_iter = 10, shuffle = True, random_state = None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
self.random_state = random_state
def fit(self, X, y):
self._initialize_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
for xi, target in zip(X,y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
return self
def partial_fit(self, X, y):
if not self.w_initialized:
self._initialized_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X,y):
self._update_weights(xi, target)
else:
self._update_weights(X,y)
return self
def _shuffle(self, X, y):
r = self.rgen.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self,m):
self.rgen = np.random.RandomState(self.random_state)
self.w_ = self.rgen.normal(loc = 0.0, scale = 0.01, size = 1 + m)
self.w_initialized = True
def _update_weights(self,xi,target):
output = self.activation(self.net_input(xi))
error = (target - output)
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta * error
cost = 0.5 * error ** 2
return cost
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
return X
def predict(self, X):
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
ada = AdalineSGD(n_iter=15, eta = 0.01, random_state=1)
ada.fit(X_new,y)
# +
decision_plot(X_new, y , ada)
plt.title('Adaline - Stochastic Gradient Descent')
plt.xlabel('speal length [standarized]')
plt.ylabel('sepal width [ standarized]')
plt.show()
plt.plot(range(1,len(ada.cost_)+ 1), ada.cost_, marker = 'o')
plt.xlabel('Epochs')
plt.ylabel('Average cost')
plt.show()
# -
|
Adaline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
# from tempfile import TemporaryFile
import os
import pickle
import random
import operator
import math
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
import IPython.display as ipd
# Gather all features:
# * MFCC:
# * Spectral Centroid:
# * Spectral Bandwidth:
# * Zero Crossing Rate:
# * Rolloff-mean:
# * Harmony:
# * Perceptr:
# * Tempo:
# * Chroma:
# * RMS:https://librosa.org/doc/main/generated/librosa.feature.rms.html
# %%time
directory = "../data/"
data = []
classes = []
for folder in os.listdir(directory):
if folder=="mf_files":
continue
for file in os.listdir(directory+folder):
sig, rate = librosa.load(directory+folder+"/"+file)
mfcc_feat = librosa.feature.mfcc(y =sig, sr= rate)
mean_matrix = mfcc_feat.mean(1)
var_matrix = mfcc_feat.var(1)
data.append(np.array([mean_matrix,var_matrix]).flatten())
classes.append(folder)
# ### KNN
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
data = np.array(data)
classes = np.array(classes)
trainX, testX, trainY, testY = train_test_split(data,
classes,
test_size = 0.25,
random_state=29,
stratify = classes)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
clf = GridSearchCV(estimator=KNeighborsClassifier(n_jobs = -1),
param_grid = {"n_neighbors" : np.arange(4,30,1),
"weights" : ["uniform", "distance"]},
cv =5, n_jobs = -1)
clf.fit(trainX, trainY)
print(clf.score(testX, testY))
print(clf.best_estimator_)
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(y_true = testY, y_pred = clf.predict(testX),
xticks_rotation= "vertical")
# -
# ### Logistic Regression
# +
# %%time
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import train_test_split
data = np.array(data)
classes = np.array(classes)
trainX, testX, trainY, testY = train_test_split(data,
classes,
test_size = 0.25,
random_state=29,
stratify = classes)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
clf = GridSearchCV(estimator=LogisticRegressionCV(n_jobs = -1, max_iter = 10000),
param_grid = {"solver" : ["saga", "lbfgs"]},
cv =5, n_jobs = -1)
clf.fit(trainX, trainY)
print(clf.score(testX, testY))
print(clf.best_estimator_)
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(y_true = testY, y_pred = clf.predict(testX),
xticks_rotation= "vertical")
# -
# ### Random Forest
# +
# %%time
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
data = np.array(data)
classes = np.array(classes)
trainX, testX, trainY, testY = train_test_split(data,
classes,
test_size = 0.25,
random_state=29,
stratify = classes)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
clf = GridSearchCV(estimator=RandomForestClassifier(n_jobs = -1),
param_grid = {"criterion" : ["gini", "entropy"],
"n_estimators": np.arange(100,501, 100),
"max_depth": np.arange(1,20,1)},
cv =5, n_jobs = -1)
# clf = RandomForestClassifier()
clf.fit(trainX, trainY)
print(clf.score(testX, testY))
print(clf.best_estimator_)
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(y_true = testY, y_pred = clf.predict(testX),
xticks_rotation= "vertical")
# -
# ### AdaBoost
# +
# %%time
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
data = np.array(data)
classes = np.array(classes)
trainX, testX, trainY, testY = train_test_split(data,
classes,
test_size = 0.25,
random_state=29,
stratify = classes)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
clf = GridSearchCV(estimator=AdaBoostClassifier(base_estimator=RandomForestClassifier()),
param_grid = { "learning_rate": np.arange(0.1,2,.1),
"n_estimators": np.arange(50,300,10)},
cv =5, n_jobs = -1)
clf.fit(trainX, trainY)
print(clf.score(testX, testY))
print(clf.best_estimator_)
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(y_true = testY, y_pred = clf.predict(testX),
xticks_rotation= "vertical")
# -
# ### Gradient Boost
# +
# %%time
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
data = np.array(data)
classes = np.array(classes)
trainX, testX, trainY, testY = train_test_split(data,
classes,
test_size = 0.25,
random_state=29,
stratify = classes)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
clf = GridSearchCV(estimator=GradientBoostingClassifier(),
param_grid = { "learning_rate": np.arange(0.1,1,.1),
"n_estimators": np.arange(50,300,10)},
cv =5, n_jobs = -1)
clf.fit(trainX, trainY)
print(clf.score(testX, testY))
print(clf.best_estimator_)
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(y_true = testY, y_pred = clf.predict(testX),
xticks_rotation= "vertical")
# -
|
models/TraditionalModels_OnlyMFCCs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python3
# ---
# # Pretrained GPT2 Model Deployment Example
#
# In this notebook, we will run an example of text generation using GPT2 model exported from HuggingFace and deployed with Seldon's Triton pre-packed server. the example also covers converting the model to ONNX format.
# The implemented example below is of the Greedy approach for the next token prediction.
# more info: https://huggingface.co/transformers/model_doc/gpt2.html?highlight=gpt2
#
# After we have the module deployed to Kubernetes, we will run a simple load test to evaluate the module inference performance.
#
#
# ## Steps:
# - [Download pretrained GPT2 model from hugging face](#hf)
# - [Convert the model to ONNX](#onnx)
# - [Store model in Azure Storage Blob](#blob)
# - [Create PersistentVolume and PVC](#pv) mounting Azure Storage Blob
# - [Setup Seldon-Core](#seldon) in your kubernetes cluster
# - [Deploy the ONNX model](#sd) with Seldon’s prepackaged Triton server.
# - [Run model inference](#infer), run a greedy alg example (generate sentence completion)
# - [Monitor model with Azure Monitor](#azuremonitor)
# - [Run load test using vegeta](#vegeta)
# - [Clean-up](#cleanup)
#
# ## Basic requirements
# * Helm v3.0.0+
# * A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)
# * kubectl v1.14+
# * Python 3.6+
# %%writefile requirements.txt
transformers==4.5.1
torch==1.8.1
tokenizers<0.11,>=0.10.1
tensorflow==2.4.1
tf2onnx
# + tags=[]
# !pip install --trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org -r requirements.txt
# -
# ### Export HuggingFace TFGPT2LMHeadModel pre-trained model and save it locally <a id="hf"/>
# +
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = TFGPT2LMHeadModel.from_pretrained(
"gpt2", from_pt=True, pad_token_id=tokenizer.eos_token_id
)
model.save_pretrained("./tfgpt2model", saved_model=True)
# -
# ### Convert the TensorFlow saved model to ONNX <a id="onnx"/>
# !python -m tf2onnx.convert --saved-model ./tfgpt2model/saved_model/1 --opset 13 --output model.onnx
# ## Azure Setup
# We have provided [Azure Setup Notebook](https://docs.seldon.io/projects/seldon-core/en/latest/examples/triton_gpt2_example_azure_setup.html) that deploys AKS cluster, Azure storage account and installs Azure Blob CSI driver. If AKS cluster already exists skip to creation of Blob Storage and CSI driver installtion steps. Upon completion of Azure setup following infrastructure will be created:
# 
# +
resource_group = "seldon" # feel free to replace or use this default
aks_name = "modeltests"
storage_account_name = "modeltestsgpt" # fill in
storage_container_name = "gpt2onnx"
# -
# ### Copy your model to Azure Blob <a id="blob"/>
#
# %%time
# Copy model file
# !az extension add --name storage-preview
# !az storage azcopy blob upload --container {storage_container_name} \
# --account-name {storage_account_name} \
# --source ./model.onnx \
# --destination gpt2/1/model.onnx
#Verify Uploaded file
# !az storage blob list \
# --account-name {storage_account_name}\
# --container-name {storage_container_name} \
# --output table
# ## Add Azure PersistentVolume and Claim <a id="pv">
# For more details on creating PersistentVolume using CSI driver refer to https://github.com/kubernetes-sigs/blob-csi-driver/blob/master/deploy/example/e2e_usage.md
# - Create secret
# - Create PersistentVolume pointing to secret and Blob Container Name and `mountOptions` specifying user id for non-root containers
# - Creare PersistentVolumeClaim to bind to volume
# key = !az storage account keys list --account-name {storage_account_name} -g {resource_group} --query '[0].value' -o tsv
storage_account_key = key[0]
#
# Create secret to access storage account
# !kubectl create secret generic azure-blobsecret --from-literal azurestorageaccountname={storage_account_name} --from-literal azurestorageaccountkey="{storage_account_key}" --type=Opaque
# +
# %%writefile azure-blobfuse-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-gpt2blob
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain # "Delete" is not supported in static provisioning
csi:
driver: blob.csi.azure.com
readOnly: false
volumeHandle: trainingdata # make sure this volumeid is unique in the cluster
volumeAttributes:
containerName: gpt2onnx # Modify if changed in Notebook
nodeStageSecretRef:
name: azure-blobsecret
namespace: default
mountOptions: # Use same user id that is used by POD security context
- -o uid=8888
- -o allow_other
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-gpt2blob
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
volumeName: pv-gpt2blob
storageClassName: ""
# -
# !kubectl apply -f azure-blobfuse-pv.yaml
# Verify PVC is bound
# !kubectl get pv,pvc
# ### Run Seldon in your kubernetes cluster <a id="seldon"/>
#
# Follow the [Seldon-Core Setup notebook](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html) to Setup a cluster with Istio Ingress and install Seldon Core
# ### Deploy your model with Seldon pre-packaged Triton server <a id="sd"/>
# %%writefile gpt2-deploy.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
name: gpt2gpu
spec:
annotations:
prometheus.io/port: "8002" # we will explain below in Monitoring section
prometheus.io/path: "/metrics"
predictors:
- componentSpecs:
- spec:
containers:
- name: gpt2
resources:
requests:
memory: 2Gi
cpu: 2
nvidia.com/gpu: 1
limits:
memory: 4Gi
cpu: 4
nvidia.com/gpu: 1
tolerations:
- key: "nvidia.com" # to be able to run in GPU Nodepool
operator: "Equal"
value: "gpu"
effect: "NoSchedule"
graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: pvc://pvc-gpt2blob/
name: gpt2
type: MODEL
name: default
replicas: 1
protocol: kfserving
# !kubectl apply -f gpt2-deploy.yaml -n default
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=gpt2gpu -o jsonpath='{.items[0].metadata.name}')
# #### Interact with the model: get model metadata (a "test" request to make sure our model is available and loaded correctly)
# +
ingress_ip = !(kubectl get svc --namespace istio-system istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
ingress_ip = ingress_ip[0]
# !curl -v http://{ingress_ip}:80/seldon/default/gpt2gpu/v2/models/gpt2
# -
# ### Run prediction test: generate a sentence completion using GPT2 model - Greedy approach <a id="infer"/>
#
# +
import http
import json
import numpy as np
import requests
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
input_text = "I love Artificial Intelligence"
count = 0
max_gen_len = 8
gen_sentence = input_text
while count < max_gen_len:
input_ids = tokenizer.encode(gen_sentence, return_tensors="tf")
shape = input_ids.shape.as_list()
payload = {
"inputs": [
{
"name": "input_ids:0",
"datatype": "INT32",
"shape": shape,
"data": input_ids.numpy().tolist(),
},
{
"name": "attention_mask:0",
"datatype": "INT32",
"shape": shape,
"data": np.ones(shape, dtype=np.int32).tolist(),
},
]
}
tfserving_url = (
"http://" + str(ingress_ip) + "/seldon/default/gpt2gpu/v2/models/gpt2/infer"
)
print(f"sending request to {tfserving_url}")
with requests.post(tfserving_url, json=payload) as ret:
try:
res = ret.json()
except:
continue
# extract logits
logits = np.array(res["outputs"][1]["data"])
logits = logits.reshape(res["outputs"][1]["shape"])
# take the best next token probability of the last token of input ( greedy approach)
next_token = logits.argmax(axis=2)[0]
next_token_str = tokenizer.decode(
next_token[-1:], skip_special_tokens=True, clean_up_tokenization_spaces=True
).strip()
gen_sentence += " " + next_token_str
print(f"Sentence: {gen_sentence}")
count += 1
print(f"Input: {input_text}\nOutput: {gen_sentence}")
# -
# ## Configure Model Monitoring with Azure Monitor <a id="azuremonitor"/>
# The Azure Monitor Containers Insights provides functionality to allow collecting data from any Prometheus endpoints. It removes the need to install and operate Prometheus server and manage the monitoring data as Azure Monitor provides centralized point for collecting, displaying and alerting on monitoring data. To turn on Azure Monitor Container Insights follow steps described [here](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard) and you should that you have an “omsagent” pod running.
# !kubectl get pods -n kube-system | grep omsagent
# ### Configure Prometheus Metrics scraping
# Once `omsagent` is running we need to configure it to collect metrics from Prometheus endpoints. Azure Monitor Containers Insights allows configuration to be applied on a cluster or node-wide scope and configure endpoints for monitoring on one of the following ways:
# - Provide an array of URLs
# - Provide an Array of Kubernetes services
# - Enable monitoring of any pods with Prometheus annotations
# For more details on how to configure the scraping endpoints and query collected data refer to [MS Docs on Configure scraping of Prometheus metrics with Container insights](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-prometheus-integration)
#
# Our deployed model metrics are availble from couple infrasture layers - [Seldon model orchestrator metrics](https://docs.seldon.io/projects/seldon-core/en/latest/analytics/analytics.html) and [Nvidia Triton Server Metrics](https://github.com/triton-inference-server/server/blob/main/docs/metrics.md). To enable scraping for both endpoints we updated Microsoft provided default `ConfigMap` that configures `omsagent` [azure-metrics-cm.yaml](./azure-metrics-cm.yaml):
# - **Triton Server:** update `monitor_kubernetes_pods = true` to enable scrapting for Pods with `prometheus.io` annotations
# In SeldonDeployment shown above `prometheus.io/path` and `prometheus.io/port` point to default Triton metrics endpoint
# - **Seldon Orchestrator:** add our deployed model seldon service endpoint to list of Kubernetes services to be scraped:
# ```yaml
# kubernetes_services = ["http://gpt2gpu-default.default:8000/prometheus"]
# ```
# !kubectl apply -f azure-metrics-cm.yaml
# ## Query and Visualize collected data
# Collected metrics are available in Logs blade of Azure Monitor in a table **InsightsMetrics**, you could see all metrics gathered by running query
#
# ```yaml
# InsightsMetrics
# | where Namespace == "prometheus"
# ```
#
# To get Model Inference Requests per minute from Seldon Metrics run the following query and pin it to Dashboard or add to Azure Monitor Workbook:
#
# ```yaml
# InsightsMetrics
# | where Namespace == "prometheus"
# | where Name == "seldon_api_executor_server_requests_seconds_count"
# | extend Model = parse_json(Tags).deployment_name
# | where parse_json(Tags).service == "predictions"
# | order by TimeGenerated asc
# | extend RequestsPerMin = Val - prev(Val,1)
# | project TimeGenerated, RequestsPerMin
# | render areachart
# ```
#
#
# To get Inference Duration from Triton Metrics:
#
# ```yaml
# InsightsMetrics
# | where Namespace == "prometheus"
# | where Name in ("nv_inference_request_duration_us")
# | order by TimeGenerated asc
# | extend QueueDurationSec = (Val - prev(Val, 1)) / 1000
# | project TimeGenerated, Name, QueueDurationSec
# | render areachart
# ```
#
# Here is example dashboard we created using queries above
#
# 
#
# ### Run Load Test / Performance Test using vegeta <a id="vegeta"/>
# #### Install vegeta, for more details take a look in [vegeta](https://github.com/tsenart/vegeta#install) official documentation
# !wget https://github.com/tsenart/vegeta/releases/download/v12.8.3/vegeta-12.8.3-linux-arm64.tar.gz
# !tar -zxvf vegeta-12.8.3-linux-arm64.tar.gz
# !chmod +x vegeta
# #### Generate vegeta [target file](https://github.com/tsenart/vegeta#-targets) contains "post" cmd with payload in the requiered structure
# +
import base64
import json
from subprocess import PIPE, Popen, run
import numpy as np
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
input_text = "I enjoy working in Seldon"
input_ids = tokenizer.encode(input_text, return_tensors="tf")
shape = input_ids.shape.as_list()
payload = {
"inputs": [
{
"name": "input_ids:0",
"datatype": "INT32",
"shape": shape,
"data": input_ids.numpy().tolist(),
},
{
"name": "attention_mask:0",
"datatype": "INT32",
"shape": shape,
"data": np.ones(shape, dtype=np.int32).tolist(),
},
]
}
tfserving_url = (
"http://" + str(ingress_ip) + "/seldon/default/gpt2gpu/v2/models/gpt2/infer"
)
print(f"preparing request to {tfserving_url}")
cmd = {
"method": "POST",
"header": {"Content-Type": ["application/json"]},
"url": tfserving_url,
"body": base64.b64encode(bytes(json.dumps(payload), "utf-8")).decode("utf-8"),
}
with open("vegeta_target.json", mode="w") as file:
json.dump(cmd, file)
file.write("\n\n")
# -
# !./vegeta attack -targets=vegeta_target.json -rate=1 -duration=60s -format=json | ./vegeta report -type=text
# ### Clean-up <a id="cleanup"/>
# !kubectl delete -f gpt2-deploy.yaml -n default
|
examples/triton_gpt2/GPT2-ONNX-Azure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] run_control={"marked": true}
# # Introduction to Machine Learning and Toolkit Exercises
# + [markdown] run_control={"marked": true}
# ## Introduction
#
# We will be using the iris data set for this tutorial. This is a well-known data set containing iris species and sepal and petal measurements. The data we will use are in a file called `Iris_Data.csv` found in the [data](../../data) directory.
# -
from __future__ import print_function
import os
data_path = ['c:/users/dsj/data']
print (data_path)
# + [markdown] run_control={"marked": true}
# ## Question 1
#
# Load the data from the file using the techniques learned today. Examine it.
#
# Determine the following:
#
# * The number of data points (rows). (*Hint:* check out the dataframe `.shape` attribute.)
# * The column names. (*Hint:* check out the dataframe `.columns` attribute.)
# * The data types for each column. (*Hint:* check out the dataframe `.dtypes` attribute.)
# + run_control={"marked": true}
import numpy as np
import pandas as pd
filepath = os.sep.join(data_path + ['Iris_Data.csv'])
print(filepath)
data = pd.read_csv(filepath)
data.head()
# + run_control={"marked": true}
# Number of rows
print(data.shape[0])
# Column names
print(data.columns.tolist())
# Data types
print(data.dtypes)
# + [markdown] run_control={"marked": true}
# ## Question 2
#
# Examine the species names and note that they all begin with 'Iris-'. Remove this portion of the name so the species name is shorter.
#
# *Hint:* there are multiple ways to do this, but you could use either the [string processing methods](http://pandas.pydata.org/pandas-docs/stable/text.html) or the [apply method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.apply.html).
# + run_control={"marked": true}
# The str method maps the following function to each entry as a string
data['species'] = data.species.str.replace('Iris-', '')
# alternatively
# data['species'] = data.species.apply(lambda r: r.replace('Iris-', ''))
data.head()
# + [markdown] run_control={"marked": true}
# ## Question 3
#
# Determine the following:
# * The number of each species present. (*Hint:* check out the series `.value_counts` method.)
# * The mean, median, and quantiles and ranges (max-min) for each petal and sepal measurement.
#
# *Hint:* for the last question, the `.describe` method does have median, but it's not called median. It's the *50%* quantile. `.describe` does not have range though, and in order to get the range, you will need to create a new entry in the `.describe` table, which is `max - min`.
# + run_control={"marked": true}
#Student writes code here
group_sizes = (data.groupby('species').size())
print(group_sizes)
# + run_control={"marked": true}
# + [markdown] run_control={"marked": true}
# ## Question 4
#
# Calculate the following **for each species** in a separate dataframe:
#
# * The mean of each measurement (sepal_length, sepal_width, petal_length, and petal_width).
# * The median of each of these measurements.
#
# *Hint:* you may want to use Pandas [`groupby` method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.groupby.html) to group by species before calculating the statistic.
#
# If you finish both of these, try calculating both statistics (mean and median) in a single table (i.e. with a single groupby call). See the section of the Pandas documentation on [applying multiple functions at once](http://pandas.pydata.org/pandas-docs/stable/groupby.html#applying-multiple-functions-at-once) for a hint.
# -
# The mean calculation
data.groupby('species').mean()
# The median calculation
data.groupby('species').median()
# +
# applying multiple functions at once - 2 methods
data.groupby('species').agg(['mean', 'median']) # passing a list of recognized strings
data.groupby('species').agg([np.mean, np.median]) # passing a list of explicit aggregation functions
# +
# If certain fields need to be aggregated differently, we can do:
from pprint import pprint
agg_dict = {field: ['mean', 'median'] for field in data.columns if field != 'species'}
agg_dict['petal_length'] = 'max'
pprint(agg_dict)
data.groupby('species').agg(agg_dict)
# -
# ## Question 5
#
# Make a scatter plot of `sepal_length` vs `sepal_width` using Matplotlib. Label the axes and give the plot a title.
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# A simple scatter plot with Matplotlib
ax = plt.axes()
ax.scatter(data.sepal_length, data.sepal_width)
# Label the axes
ax.set(xlabel='Sepal Length (cm)',
ylabel='Sepal Width (cm)',
title='Sepal Length vs Width');
# -
# ## Question 6
#
# Make a histogram of any one of the four features. Label axes and title it as appropriate.
# +
#Student writes code here
# -
plt.hist(data.sepal_length, bins=25)
# ## Question 7
#
# Now create a single plot with histograms for each feature (`petal_width`, `petal_length`, `sepal_width`, `sepal_length`) overlayed. If you have time, next try to create four individual histogram plots in a single figure, where each plot contains one feature.
#
# For some hints on how to do this with Pandas plotting methods, check out the [visualization guide](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html) for Pandas.
# +
import seaborn as sns
sns.set_context('notebook')
# This uses the `.plot.hist` method
ax = data.plot.hist(bins=25, alpha=0.5)
ax.set_xlabel('Size (cm)');
# +
# To create four separate plots, use Pandas `.hist` method
axList = data.hist(bins=25)
# Add some x- and y- labels to first column and last row
for ax in axList.flatten():
if ax.is_last_row():
ax.set_xlabel('Size (cm)')
if ax.is_first_col():
ax.set_ylabel('Frequency')
# -
# ## Question 8
#
# Using Pandas, make a boxplot of each petal and sepal measurement. Here is the documentation for [Pandas boxplot method](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html#visualization-box).
#Student writes code here
data.plot.box()
# ## Question 9
#
# Now make a single boxplot where the features are separated in the x-axis and species are colored with different hues.
#
# *Hint:* you may want to check the documentation for [Seaborn boxplots](http://seaborn.pydata.org/generated/seaborn.boxplot.html).
#
# Also note that Seaborn is very picky about data format--for this plot to work, the input dataframe will need to be manipulated so that each row contains a single data point (a species, a measurement type, and the measurement value). Check out Pandas [stack](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.stack.html) method as a starting place.
#
# Here is an example of a data format that will work:
#
# | | species | measurement | size |
# | - | ------- | ------------ | ---- |
# | 0 | setosa | sepal_length | 5.1 |
# | 1 | setosa | sepal_width | 3.5 |
# +
# First we have to reshape the data so there is
# only a single measurement in each column
plot_data = (data
.set_index('species')
.stack()
.to_frame()
.reset_index()
.rename(columns={0:'size', 'level_1':'measurement'})
)
plot_data.head()
# +
# Now plot the dataframe from above using Seaborn
sns.set_style('white')
sns.set_context('notebook')
sns.set_palette('dark')
f = plt.figure(figsize=(6,4))
sns.boxplot(x='measurement', y='size',
hue='species', data=plot_data);
# -
# ## Question 10
#
# Make a [pairplot](http://seaborn.pydata.org/generated/seaborn.pairplot.html) with Seaborn to examine the correlation between each of the measurements.
#
# *Hint:* this plot may look complicated, but it is actually only a single line of code. This is the power of Seaborn and dataframe-aware plotting! See the lecture notes for reference.
# +
#Student writes code here
# -
sns.pairplot(data, hue='species', size=3)
|
week1/Week1_Introduction_to_Machine_Learning_and_Toolkit_HW.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# get_ipython().magic('matplotlib notebook')
get_ipython().magic('matplotlib inline')
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
#___________________________________________________________________________________________________________________
import os
import tripyview as tpv
import shapefile as shp
import numpy as np
# + tags=["parameters"]
# Parameters
mesh_path ='/work/ollie/projects/clidyn/FESOM2/meshes/core2/'
save_path = None #'~/figures/test_papermill/'
save_fname= None
#_____________________________________________________________________________________
which_cycl= 3
which_mode= 'zmeantransects_clim'
#_____________________________________________________________________________________
input_paths= list()
input_paths.append('/home/ollie/pscholz/results/trr181_tke_ctrl_ck0.1/')
input_paths.append('/home/ollie/pscholz/results/trr181_tke_ctrl_ck0.3/')
input_paths.append('/home/ollie/pscholz/results/trr181_tke+idemix_orig_ck0.1/')
input_paths.append('/home/ollie/pscholz/results/trr181_tke+idemix_jayne09_ck0.1/')
input_paths.append('/home/ollie/pscholz/results/old_trr181_tke+idemix_nycander05_ck0.1/')
input_paths.append('/home/ollie/pscholz/results/old_trr181_tke+idemix_stormtide2_ck0.1/')
input_paths.append('/home/ollie/pscholz/results/trr181_tke+idemix_jayne09_ck0.3/')
input_paths.append('/home/ollie/pscholz/results/old_trr181_tke+idemix_nycander05_ck0.3/')
input_paths.append('/home/ollie/pscholz/results/old_trr181_tke+idemix_stormtide2_ck0.3/')
input_names= list()
input_names.append('TKE, ck=0.1')
input_names.append('TKE, ck=0.3')
input_names.append('TKE+IDEMIX, ck=0.1, jayne (old param)')
input_names.append('TKE+IDEMIX, ck=0.1, jayne (new param)')
input_names.append('TKE+IDEMIX, ck=0.1, nycander (new param)')
input_names.append('TKE+IDEMIX, ck=0.1, stormtide (new param)')
input_names.append('TKE+IDEMIX, ck=0.3, jayne (new param)')
input_names.append('TKE+IDEMIX, ck=0.3, nycander (new param)')
input_names.append('TKE+IDEMIX, ck=0.3, stormtide (new param)')
vname = 'temp'
year = [1979,2019]
mon, day, record, box, depth = None, None, None, None, None
#_____________________________________________________________________________________
# do anomaly plots in case ref_path is not None
ref_path = None #'/home/ollie/pscholz/results/trr181_tke_ctrl_ck0.1/' # None
ref_name = None #'TKE, ck=0.1' # None
ref_year = None # [2009,2019]
ref_mon, ref_day, ref_record = None, None, None
#_____________________________________________________________________________________
box_region = list()
# box_region.append('global')
# box_region.append('ocean_basins/Arctic_Basin.shp')
#box_region.append('ocean_basins/Eurasian_Basin.shp')
box_region.append('ocean_basins/Atlantic_Basin.shp')
#box_region.append('mpas_region/Canada_Basin.shp')
#box_region.append('mpas_region/North_Atlantic_Ocean.shp')
#box_region.append('mpas_region/Greenland_Sea.shp')
#box_region.append('mpas_region/Irminger_Sea.shp')
#box_region.append('mpas_region/Norwegian_Sea.shp')
#box_region.append('mpas_region/Labrador_Sea.shp')
#box_region.append('mpas_region/North_Pacific_Ocean.shp')
#box_region.append('mpas_region/South_Pacific_Ocean.shp')
#box_region.append('mpas_region/Southern_Ocean.shp')
#box_region.append('mpas_region/Western_Weddell_Sea.shp')
#_____________________________________________________________________________________
which_clim= 'phc3'
clim_path = '/work/ollie/pscholz/INIT_HYDRO/phc3.0/phc3.0_annual.nc'
#_____________________________________________________________________________________
cstr = 'blue2red'
cnum = 20
cref = 0
crange, cmin, cmax, cfac, climit = None, None, None, None, None
chist, ctresh = True, 0.995
#_____________________________________________________________________________________
ncolumn = 3
do_rescale= None
which_dpi = 300
do_contour= True
do_ylog = True
# +
#___LOAD FESOM2 MESH___________________________________________________________________________________
mesh=tpv.load_mesh_fesom2(mesh_path, do_rot='None', focus=0, do_info=True, do_pickle=True,
do_earea=True, do_narea=True, do_eresol=[True,'mean'], do_nresol=[True,'eresol'])
#______________________________________________________________________________________________________
if which_cycl is not None:
for ii,ipath in enumerate(input_paths):
# print(ii, ipath, ipath[0])
input_paths[ii] = os.path.join(ipath,'{:d}/'.format(which_cycl))
print(ii, input_paths[ii])
if ref_path is not None:
ref_path = os.path.join(ref_path,'{:d}/'.format(which_cycl))
print('R', ref_path)
#______________________________________________________________________________________________________
cinfo=dict({'cstr':cstr, 'cnum':cnum})
if crange is not None: cinfo['crange']=crange
if cmin is not None: cinfo['cmin' ]=cmin
if cmax is not None: cinfo['cmax' ]=cmax
if cref is not None: cinfo['cref' ]=cref
if cfac is not None: cinfo['cfac' ]=cfac
if climit is not None: cinfo['climit']=climit
if chist is not None: cinfo['chist' ]=chist
if ctresh is not None: cinfo['ctresh']=ctresh
if ref_path is not None: cinfo['cref' ]=0.0
#______________________________________________________________________________________________________
# in case of diff plots
if ref_path is not None:
if ref_year is None: ref_year = year
if ref_mon is None: ref_mon = mon
if ref_record is None: ref_record = record
#________________________________________________________________________________________________________
# define index regions --> reading shape files
box = list()
shp_path = os.path.join(tpv.__path__[0],'shapefiles/')
for region in box_region:
if region is 'global' or isinstance(region,list): box.append(region)
else: box.append(shp.Reader(os.path.join(shp_path,region)))
# +
#___LOAD CLIMATOLOGY_____________________________________________________________________________________
clim_vname= vname
if vname in ['temp', 'salt', 'pdens'] or 'sigma' in vname:
if vname=='temp' and which_clim.lower()=='woa18': clim_vname = 't00an1'
elif vname=='salt' and which_clim.lower()=='woa18': clim_vname = 's00an1'
clim = tpv.load_climatology(mesh, clim_path, clim_vname)
clim_zmtransect = tpv.load_zmeantransect_fesom2(mesh, clim, box, do_compute=True, diagpath='/home/ollie/pscholz/results/trr181_tke_ctrl_ck0.1/1/')
else: raise ValueError('climatology not supported for choosen vname')
del(clim)
#___LOAD FESOM2 DATA____________________________________________________________________________________
list_zmtransects = list()
for datapath, descript in zip(input_paths, input_names):
print(datapath, descript)
data = tpv.load_data_fesom2(mesh, datapath, vname=vname, year=year, mon=mon, descript=descript , do_info=False)
data_zmtransect = tpv.load_zmeantransect_fesom2(mesh, data, box, do_compute=True)
data_zmtransect = tpv.do_transectanomaly(data_zmtransect, clim_zmtransect)
list_zmtransects.append(data_zmtransect)
del(data, data_zmtransect)
#___LOAD FESOM2 REFERENCE DATA________________________________________________________________________
if ref_path is not None:
print(ref_path)
data_ref = tpv.load_data_fesom2(mesh, ref_path, vname=vname, year=ref_year, mon=ref_mon, descript=ref_name, do_info=False)
data_ref_zmtransect = tpv.load_zmeantransect_fesom2(mesh, data_ref, box, do_compute=True)
data_ref_zmtransect = tpv.do_transectanomaly(data_ref_zmtransect, clim_zmtransect)
list_zmtransects.append(data_ref_zmtransect)
del(data_ref, data_ref_zmtransect)
del(clim_zmtransect)
# -
#___PLOT FESOM2 DATA___________________________________________________________________________________
spath = save_path
sname = vname
slabel = list_zmtransects[0][0][sname].attrs['str_lsave']
strans = list_zmtransects[0][0][vname].attrs['transect_name'].replace(' ','_').lower()
if spath is not None: spath = '{}/{}_{}_{}_{}.png'.format(spath, which_mode, sname, strans, slabel)
nrow = np.ceil(len(list_zmtransects)/ncolumn).astype('int')
if save_fname is not None: spath = save_fname
fig, ax, cbar = tpv.plot_zmeantransects(list_zmtransects, cinfo=cinfo, figsize=[ncolumn*7, nrow*4], n_rc=[nrow, ncolumn],
do_rescale=do_rescale, pos_gap=[0.01, 0.01], pos_extend=[0.05, 0.08, 0.97,0.95], cbar_nl=12, do_ylog=do_ylog,
do_save = spath, save_dpi=which_dpi, do_contour=do_contour )
|
templates_notebooks/template_zmeantransect_clim.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from sigelmsync import sigelmsync
from starkelmsync import starkelmsync
from getsig import getsig
import matplotlib.cm as cm
import matplotlib.pylab as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.colors as colors
import numpy as np
import os
import dd
import inspect
#
plt.style.use('helvet2')
#
# +
def smooth(data,N=5):
return np.convolve(data,np.ones(N)/N,'same');
def smooth2d(data,N):
for i in range(len(data[0,:])):
data[:,i] = smooth(data[:,i],N)
return data
# -
inspect.getmodule(sigelmsync)
# ### ELM function
def three_starksync_paper(shotnr, ti, tf, preft=0.002, suft=0.008,
refside='in', divside='in', divitem='jsat',
ref_exper='AUGD', elm_exper='AUGD',
ref_edition=0, elm_edition=0,
mindens=0.5, maxdens=3.0,
minydiv=-5, maxydiv=20, divtickfreq=5, vmax=20, vmaxdiv=5):
"""Ref, divertor and Idiv data ELM Conditional Averaging
"""
graphsets = {'minydiv': minydiv, 'maxydiv': maxydiv}
####Read Data
####Parameters for Refplot
if refside == 'in':
refsgr = "HFSR"
separatrix_signal = "Rin"
minyval = 1.04
maxyval = 1.17
else:
refsgr = "LFSR"
separatrix_signal = "Raus"
minyval = 2.08
maxyval = 2.21
th, rh = sigelmsync(shotnr, "RDL", refsgr, ti=ti, tf=tf, preft=preft, suft=suft, elm_exper=elm_exper)
#Get the densities stored in the shotfile
rdl = dd.shotfile('RDL', shotnr, experiment=ref_exper, edition=ref_edition)
dum1 = rdl.getParameter('Aux','ne')
densities = np.array(dum1.data)
rdl.close()
##Don't use all data
nchans = 11
rh = rh[1:nchans]
## Check densities used (shortcut without dd)
dens = np.linspace(mindens, maxdens, 12)
####Separatrix
trin, rin = sigelmsync(shotnr, "FPG", separatrix_signal, ti=ti, tf=tf,
preft=preft, suft=suft,
elm_exper=elm_exper)
####Divertor current
if divside=='in':
idivname = 'Ipolsoli'
idivclr = 'C3'
else:
idivname = 'Ipolsola'
idivclr = 'C0'
tidiv, idiv = sigelmsync(shotnr, "MAC", idivname, ti=ti, tf=tf,
preft=preft, suft=suft,
elm_exper=elm_exper)
######Jsat
t, s, m = starkelmsync(shotnr, ti=ti, tf=tf,
item=divitem, side=divside,
preft=preft, suft=suft,
elm_exper=elm_exper)
######
fig = plt.figure(figsize=(3.0, 3.5), dpi=100)
gs = mpl.gridspec.GridSpec(3, 2, height_ratios=[1, 1, 1], width_ratios=[4, 1])
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0], sharex=ax1)
ax3 = fig.add_subplot(gs[2, 0], sharex=ax1)
titlestr = '\#' + str(shotnr) + ' t=[' + str(ti) +','+ str(tf) + '] s'
ax1.set_title(titlestr, loc='left', fontsize=9)
ax1.text(0.03, 0.85, '(a)', transform = ax1.transAxes)
ax2.text(0.03, 0.85, '(b)', transform = ax2.transAxes, color='white')
ax3.text(0.03, 0.85, '(c)', transform = ax3.transAxes)
######################################
indr = sorted(xrange(len(trin)), key=lambda ix: trin[ix])
trinsort = trin[indr]
rinsort = rin[indr]
ax1.scatter(trin*1e3, rin, color='k', edgecolors='k', s=0.7)
ax1.fill_between(trinsort*1e3, rinsort, 1.5, color='black', alpha=0.45)
fth = []
frh = []
fne = []
for i in range(len(rh)):
fth.append(th*1e3)
frh.append(rh[i])
fne.append(dens[i] * np.ones(len(th)))
########
fth = np.concatenate(fth)
frh = np.concatenate(frh)
fne = np.concatenate(fne)
########
cmap = cm.get_cmap('viridis')
neindex = (fne>=mindens)&(fne<=maxdens)
quaak1 = fth[neindex]
quaak2 = frh[neindex]
quaak3 = fne[neindex]
sc = ax1.scatter(fth, frh, c=fne, s=2.0, lw=0, cmap=cmap)
###Colorbar
axins = inset_axes(ax1,
width="5%", # width = 10% of parent_bbox width
height="100%", # height : 50%
loc=6,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax1.transAxes,
borderpad=0,
)
cbar = plt.colorbar(sc, cax=axins)# ticks=(np.arange(0.0, max(fne)+1.0, 1.0)))
cbar.set_label(r'$\mathrm{n_{e}\,[10^{19}m^{-3}]}$')
ax1.set_ylim([minyval, maxyval])
#Inner wall
ax1.hlines(1.045, mintime, maxtime, color='black', lw=3)
ax1.set_ylabel(r"$\mathrm{R\,[m]}$")
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
######################################
#Maximum vertical value for the mapping
if divitem == 'nev':
vmax = 80
cbticks = np.linspace(20, vmax, 4)
clblabelstr = "$\mathrm{n_{e,v}\,[10^{20}m^{-3}]}$"
elif divitem == 'te':
vmax = 20 #Limit it to 20eV
cbticks = np.linspace(vmax/4, vmax, 4)
clblabelstr = r"$\mathrm{t_{e}\,[eV]}$"
elif divitem == 'net':
vmax = np.min([24, np.floor(m.max())])
cbticks = np.linspace(vmax/4, vmax, 4)
clblabelstr = r"$\mathrm{n_{e,p}\,[10^{19}m^{-3}]}$"
else: #Default to jsat
cbticks = np.linspace(0, vmax, vmaxdiv)
clblabelstr = r"$\mathrm{\Gamma_{D^{+}}\,[10^{22}m^{-2}s^{-1}]}$"
sc2 = ax2.pcolormesh(t*1e3, s, m, shading='goraud', vmax=vmax, vmin=0.0, cmap=cmap)
###Colorbar
axins = inset_axes(ax2,
width="5%", # width = 10% of parent_bbox width
height="100%", # height : 50%
loc=6,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
cbar = plt.colorbar(sc2, cax=axins, ticks=(np.arange(0.0, vmax+1.0, 5.0)))
cbar.set_label(r"$\mathrm{\Gamma_{D^{+}}\,[10^{22}m^{-2}s^{-1}]}$")
ax2.set_ylim([graphsets['minydiv'], graphsets['maxydiv']])
ax2.set_xlim(-preft*1e3, suft*1e3)
ax2.set_yticks((np.arange(minydiv, maxydiv+1.0, divtickfreq)))
#ax2.set_yticks((np.arange(graphsets['minydiv'], graphsets['minydiv']+1.0, divtickfreq)))
#ax2.set_yticks([-5,0,5,10,15,20])
ax2.set_ylabel(r"$\mathrm{\Delta S\,[cm]}$")
ax2.hlines(0, mintime, maxtime, color='white')
ax2.set_xlim([-preft*1e3, suft*1e3])
######################################
ax3.set_ylabel(r'$\mathrm{|I_{div}|\,[kA]}$')
ax3.scatter(tidiv*1e3, np.abs(idiv*1e-3), color=idivclr, edgecolors=idivclr, s=0.7)
ax3.set_ylim(0,25)
ax3.set_yticks([0, 10, 20])
ax3.set_xlabel(r"$\mathrm{t-t_{ELM}\,[ms]}$")
if (suft+preft)*1e3>5:
divxticks=2.0
else:
divxticks=1.0
ax3.set_xticks(np.arange(mintime, maxtime+1, divxticks))
ax3.set_xlim([-preft*1e3, suft*1e3])
######################################
plt.subplots_adjust(left=0.18, right=0.97, bottom=0.13, top=0.93, wspace=0.10, hspace=0.05)
#Output file
elmdir = "./Revised/"
if not os.path.exists(elmdir):
os.makedirs(elmdir)
shotdir = elmdir + str(shotnr) + '/'
if not os.path.exists(shotdir):
os.makedirs(shotdir)
filename = shotdir + "ssidiv_" + str(shotnr) + "_" + str(ti) + "_" + str(tf) + "_" + refside + divside + ".png"
plt.savefig(filename, format="png", dpi=300)
print "Wrote: " + filename
plt.show()
return fth, frh, fne
# ## #30554 Phase I
shotnr=30554
ti=1.8
tf=2.8
preft=0.002
suft=0.008
divitem = 'jsat'
elm_exper = "guimas"
mintime = -preft*1e3
maxtime = suft*1e3
#three_starksync_paper(shotnr, ti, tf, preft=preft, suft=suft, refside='in', divside='in', elm_exper='guimas')
t,r,n = three_starksync_paper(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out',
elm_exper='guimas',vmax=23, mindens=2.0, maxdens=2.6)
sc = plt.scatter(t,r,s=6,c=n)
plt.colorbar()
plt.show()
# ### Shot #30554, Phase II
shotnr = 30554
ti = 3.0
tf = 3.5
preft = 0.002
suft = 0.008
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='in', divside='in', elm_exper='guimas')
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out', elm_exper='guimas')
# ### Shot #30554, Phase III, N seeding
shotnr = 30554
ti = 4.0
tf = 4.5
preft = 0.001
suft = 0.003
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='in', divside='in', elm_exper='guimas')
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out', elm_exper='guimas')
# ## Shot #32234, Ne seeding experiment
# ### Neon seeding 32234, no seeding
shotnr = 32234
preft = 0.002
suft = 0.006
ti = 2.2
tf = 2.5
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='in', divside='in',
minydiv=-3, maxydiv=17, divtickfreq=3, vmax=30)
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out',
minydiv=-3, maxydiv=17, divtickfreq=3, vmax=30)
# ### Neon seeding 32234, with seeding
shotnr = 32234
preft = 0.002
suft = 0.006
ti = 3.3
tf = 3.72
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='in', divside='in',
minydiv=-3, maxydiv=17, divtickfreq=3, vmax=30)
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out',
minydiv=-3, maxydiv=17, divtickfreq=3, vmax=30)
a,b = getsig(32238, 'ELM', 'f_ELM')
plt.plot(a,b)
a,b = getsig(30554, 'ELM', 'f_ELM')
plt.plot(a,b)
a,b = getsig(30733, 'ELM', 'f_ELM')
plt.plot(a,b)
plt.show()
# +
shotnr = 34462
ti = 3.4
tf = 3.6
preft = 0.002
suft = 0.004
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='in', divside='in')
#three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out', elm_exper='guimas')
# -
three_starksync(shotnr, ti, tf, preft=preft, suft=suft, refside='out', divside='out')
ELM = dd.shotfile("ELM", 34462, experiment='AUGD')
elmd = ELM("t_endELM", tBegin=ti, tEnd=tf)
ELM.close()
|
ELM_cycles.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Installing necessary libraries with pip
# !pip install plotly --user
# !pip install cufflinks --user
# +
# Necessary imports
import os
import sys
import numpy as np
import pandas
import matplotlib.pyplot as plt
# %matplotlib inline
import plotly.plotly as py
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
import plotly.graph_objs as go
import plotly.figure_factory as ff
init_notebook_mode(connected=True)
sys.path.append("".join([os.environ["HOME"]]))
# -
from sklearn.datasets import load_iris
iris_data = load_iris()
iris_data.feature_names
# #### First 2 dimensions
# +
x = [v[0] for v in iris_data.data]
y = [v[1] for v in iris_data.data]
# Create a trace
trace = go.Scatter(
x = x,
y = y,
mode = 'markers'
)
layout= go.Layout(
title= 'Iris Dataset',
hovermode= 'closest',
xaxis= dict(
title= 'sepal length (cm)',
ticklen= 5,
zeroline= False,
gridwidth= 2,
),
yaxis=dict(
title= 'sepal width (cm)',
ticklen= 5,
gridwidth= 2,
),
showlegend= False
)
data = [trace]
fig= go.Figure(data=data, layout=layout)
plot(fig)
# -
# #### Scatter plot matrix
# +
import pandas as pd
df = pd.DataFrame(iris_data.data,
columns=['sepal length (cm)',
'sepal width (cm)',
'petal length (cm)',
'petal width (cm)'])
df['class'] = [iris_data.target_names[i] for i in iris_data.target]
# -
df.head()
fig = ff.create_scatterplotmatrix(df, index='class', diag='histogram', size=10, height=800, width=800)
plot(fig)
# #### 3D Chart
# +
# Creating data for the plotly
trace1 = go.Scatter3d(
# Extracting data based on label
x=[x[0][0] for x in zip(iris_data.data, iris_data.target) if x[1] == 0],
y=[x[0][2] for x in zip(iris_data.data, iris_data.target) if x[1] == 0],
z=[x[0][3] for x in zip(iris_data.data, iris_data.target) if x[1] == 0],
mode='markers',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.8
)
)
trace2 = go.Scatter3d(
# Extracting data based on label
x=[x[0][0] for x in zip(iris_data.data, iris_data.target) if x[1] == 1],
y=[x[0][2] for x in zip(iris_data.data, iris_data.target) if x[1] == 1],
z=[x[0][3] for x in zip(iris_data.data, iris_data.target) if x[1] == 1],
mode='markers',
marker=dict(
color='rgb(#3742fa)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
)
)
trace3 = go.Scatter3d(
# Extracting data based on label
x=[x[0][0] for x in zip(iris_data.data, iris_data.target) if x[1] == 2],
y=[x[0][2] for x in zip(iris_data.data, iris_data.target) if x[1] == 2],
z=[x[0][3] for x in zip(iris_data.data, iris_data.target) if x[1] == 2],
mode='markers',
marker=dict(
color='rgb(#ff4757)',
size=12,
symbol='circle',
line=dict(
color='rgb(104, 74, 114)',
width=1
),
opacity=0.9
)
)
data = [trace1, trace2, trace3]
# Layout settings
layout = go.Layout(
scene = dict(
xaxis = dict(
title= 'sepal length (cm)'),
yaxis = dict(
title= 'petal length (cm)'),
zaxis = dict(
title= 'petal width (cm)'),),
)
fig = go.Figure(data=data, layout=layout)
plot(fig)
# -
# #### Hyperparameters
from sklearn.cluster import KMeans
# +
# KMeans?
# -
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=500, centers=3, n_features=3, cluster_std=[2.1, 1.5, 3.7], random_state=442)
k_means = KMeans(n_clusters=3)
y_pred = k_means.fit_predict(X)
print(y_pred)
k_means = KMeans(n_clusters=3, init='random', n_init=1, max_iter=1)
y_pred = k_means.fit_predict(X)
print(y_pred)
# #### Implement algorithm for one variable
X = np.array([1,2,3,2,1,3,9,8,11,12,10,11,14,25,26,24,30,22,24,27])
# +
trace1 = go.Scatter(
x=X,
y=[0 for num in X],
mode='markers',
name='Data',
marker=dict(
size=12
)
)
layout = go.Layout(
title='1D vector',
)
traces = [trace1]
fig = go.Figure(data=traces, layout=layout)
plot(fig)
# -
# #### KMeans for one variable
import numpy as np
X = np.array([1,2,3,2,1,3,9,8,11,12,10,11,14,25,26,24,30,22,24,27])
n_clusters = 3
c_centers = np.random.choice(X, n_clusters)
print(c_centers)
deltas = np.array([np.abs(point - c_centers) for point in X])
deltas
deltas.argmin(1)
c_centers = np.array([X[np.where(deltas.argmin(1) == i)[0]].mean() for i in range(3)])
print(c_centers)
def Kmeans_1D(X, n_clusters, random_seed=442):
# Randomly choose random indexes as cluster centers
rng = np.random.RandomState(random_seed)
i = rng.permutation(X.shape[0])[:n_clusters]
c_centers = X[i]
# Calculate distances between each point and cluster centers
deltas = np.array([np.abs(point - c_centers) for point in X])
# Get labels for each point
labels = deltas.argmin(1)
while True:
# Calculate mean of each cluster
new_c_centers = np.array([X[np.where(deltas.argmin(1) == i)[0]].mean() for i in range(n_clusters)])
# Calculate distances again
deltas = np.array([np.abs(point - new_c_centers) for point in X])
# Get new labels for each point
labels = deltas.argmin(1)
# If there's no change in centers, exit
if np.all(c_centers == new_c_centers):
break
c_centers = new_c_centers
return c_centers, labels
c_centers, labels = Kmeans_1D(X, 3)
print(c_centers, labels)
# +
trace1 = go.Scatter(
x=X,
y=[0 for num in X],
mode='markers',
name='Data',
marker=dict(
size=12
)
)
trace2 = go.Scatter(
x = c_centers,
y = [0 for num in X],
mode='markers',
name = 'Cluster centers',
marker = dict(
size=12,
color = ('rgb(122, 296, 167)'))
)
layout = go.Layout(
title='1D vector',
)
traces = [trace1, trace2]
fig = go.Figure(data=traces, layout=layout)
plot(fig)
# -
# ### KMeans with multiple variables
# ### Reading dataset as numpy array
from numpy import genfromtxt
wholesales_data = genfromtxt('Wholesale customers data.csv', delimiter=',', skip_header=1)
print(wholesales_data[:5])
wholesales_data.shape
wholesales_data_norm = wholesales_data / np.linalg.norm(wholesales_data)
print(wholesales_data_norm[:5])
import pandas as pd
df = pd.DataFrame(wholesales_data_norm,
columns=['Channel',
'Region',
'Fresh',
'Milk',
'Grocery',
'Frozen',
'Detergents_Paper',
'Delicassen'])
df.head(10)
fig = ff.create_scatterplotmatrix(df, diag='histogram', size=7, height=1200, width=1200)
plot(fig)
df.corr()
import seaborn as sns; sns.set()
ax = sns.heatmap(df.corr(), annot=True)
# +
# Creating data for the plotly
trace1 = go.Scatter3d(
# Extracting data based on label
x=df['Grocery'],
y=df['Detergents_Paper'],
z=df['Milk'],
mode='markers',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.8
)
)
# Layout settings
layout = go.Layout(
scene = dict(
xaxis = dict(
title= 'Grocery'),
yaxis = dict(
title= 'Detergents_Paper'),
zaxis = dict(
title= 'Milk'),),
)
data = [trace1]
fig = dict(data=data, layout=layout)
plot(fig)
# -
df = df[[col for col in df.columns if col not in ['Channel', 'Region']]]
df.head(10)
df.values
def Kmeans_nD(X, n_clusters, random_seed=442):
# Randomly choose random indexes as cluster centers
rng = np.random.RandomState(random_seed)
i = rng.permutation(X.shape[0])[:n_clusters]
c_centers = X[i]
# Calculate distances between each point and cluster centers
deltas = np.array([[np.linalg.norm(i - c) for c in c_centers] for i in X])
# Get labels for each point
labels = deltas.argmin(1)
while True:
# Calculate mean of each cluster
new_c_centers = np.array([X[np.where(deltas.argmin(1) == i)[0]].mean(axis=0) for i in range(n_clusters)])
# Calculate distances again
deltas = np.array([[np.linalg.norm(i - c) for c in new_c_centers] for i in X])
# Get new labels for each point
labels = deltas.argmin(1)
# If there's no change in centers, exit
if np.array_equal(c_centers, new_c_centers):
break
c_centers = new_c_centers
return c_centers, labels
X = df.values
random_seed = 442
n_clusters = 3
rng = np.random.RandomState(random_seed)
i = rng.permutation(X.shape[0])[:n_clusters]
c_centers = X[i]
c_centers
deltas = np.array([[np.linalg.norm(i - c) for c in c_centers] for i in X])
deltas
labels = deltas.argmin(1)
labels
new_c_centers = np.array([X[np.where(deltas.argmin(1) == i)[0]].mean(axis=0) for i in range(n_clusters)])
new_c_centers
centers, labels = Kmeans_nD(df[['Grocery', 'Detergents_Paper']].values, 3)
labels
df['labels'] = labels
# +
# Creating data for the plotly
trace1 = go.Scatter(
# Extracting data based on label
x=df[df['labels'] == 0]['Grocery'],
y=df[df['labels'] == 0]['Detergents_Paper'],
mode='markers',
name='clust_1',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.8
)
)
trace2 = go.Scatter(
# Extracting data based on label
x=df[df['labels'] == 1]['Grocery'],
y=df[df['labels'] == 1]['Detergents_Paper'],
mode='markers',
name='clust_2',
marker=dict(
color='rgb(#3742fa)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
)
)
trace3 = go.Scatter(
# Extracting data based on label
x=df[df['labels'] == 2]['Grocery'],
y=df[df['labels'] == 2]['Detergents_Paper'],
mode='markers',
name='clust_3',
marker=dict(
color='rgb(#ff4757)',
size=12,
symbol='circle',
line=dict(
color='rgb(104, 74, 114)',
width=1
),
opacity=0.9
)
)
data = [trace1, trace2, trace3]
# Layout settings
layout = go.Layout(
scene = dict(
xaxis = dict(
title= 'Grocery'),
yaxis = dict(
title= 'Detergents_Paper'),
)
)
fig = go.Figure(data=data, layout=layout)
plot(fig)
# -
df.groupby('labels').mean()
from scipy.spatial import distance
a = (1,2,3)
b = (4,5,6)
dst = distance.euclidean(a,b)
dst
# +
# distance.euclidean?
# -
|
Ch05/Chapter05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `gollum` Quickstart
# %config InlineBackend.figure_format='retina'
from gollum.phoenix import PHOENIXSpectrum
# Simply provide the $T_{\text{eff}}$ and $\log{g}$ values you desire:
spec = PHOENIXSpectrum(teff=5000, logg=4)
# Normalize the spectrum by the median:
normalized_spectrum = spec.normalize()
# The spectrum has wavelength, with units:
normalized_spectrum.wavelength
# The flux vector is the same length as wavelength:
normalized_spectrum.flux.dtype, normalized_spectrum.shape
ax = normalized_spectrum.plot()
ax.set_ylim(0, 2)
# Right now we truncate the spectrum to the near-IR by default, you can change that with keyword arguments from the beginning:
spec = PHOENIXSpectrum(teff=5000, logg=4, wl_lo=10_800, wl_hi=10_860)
ax = spec.normalize().plot()
ax.set_ylim(0)
# Neat! `gollum` is still under active development. Help us develop the tool by engaging with us on our [GitHub Issues page](https://github.com/BrownDwarf/gollum/issues). You can suggest a feature, or help us brainstorm how to build this project more. Thanks!
|
docs/quickstart.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to fastai
# > fastai simplifies training fast and accurate neural nets using modern best practices
#
# - image: /images/layered.png
# **Important**: This documentation covers fastai v2, which is a from-scratch rewrite of fastai. The v1 documentation has moved to [fastai1.fast.ai](https://fastai1.fast.ai). To stop fastai from updating to v2, run in your terminal `echo 'fastai 1.*' >> $CONDA_PREFIX/conda-meta/pinned` (if you use conda).
# [ [](https://pypi.org/project/fastai/#description) [](https://anaconda.org/fastai/fastai) [](https://github.com/fastai/docker-containers) 
# ## Installing
# You can use fastai without any installation by using [Google Colab](https://colab.research.google.com/). In fact, every page of this documentation is also available as an interactive notebook - click "Open in colab" at the top of any page to open it (be sure to change the Colab runtime to "GPU" to have it run fast!) See the fast.ai course [Introduction to Colab](https://colab.research.google.com/) for more information.
#
# You can install fastai on your own machines with conda (highly recommended). If you're using [Anaconda](https://www.anaconda.com/products/individual) then run:
# ```bash
# conda install -c fastai -c pytorch -c anaconda fastai gh anaconda
# ```
#
# ...or if you're using [miniconda](https://docs.conda.io/en/latest/miniconda.html)) then run:
# ```bash
# conda install -c fastai -c pytorch fastai
# ```
#
# To install with pip, use: `pip install fastai`. If you install with pip, you should install PyTorch first by following the PyTorch [installation instructions](https://pytorch.org/get-started/locally/).
#
# If you plan to develop fastai yourself, or want to be on the cutting edge, you can use an editable install (if you do this, you should also use an editable install of [fastcore](https://github.com/fastai/fastcore) to go with it.):
#
# ```
# git clone https://github.com/fastai/fastai
# pip install -e "fastai[dev]"
# ```
# ## Learning fastai
# The best way to get start with fastai (and deep learning) is to read [the book](https://www.amazon.com/Deep-Learning-Coders-fastai-PyTorch/dp/1492045527), and complete [the free course](https://course.fast.ai).
#
# To see what's possible with fastai, take a look at the [Quick Start](https://docs.fast.ai/quick_start.html), which shows how to use around 5 lines of code to build an image classifier, an image segmentation model, a text sentiment model, a recommendation system, and a tabular model. For each of the applications, the code is much the same.
#
# Read through the [Tutorials](https://docs.fast.ai/tutorial) to learn how to train your own models on your own datasets. Use the navigation sidebar to look through the fastai documentation. Every class, function, and method is documented here.
#
# To learn about the design and motivation of the library, read the [peer reviewed paper](https://www.mdpi.com/2078-2489/11/2/108/htm).
# ## About fastai
# fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes:
#
# - A new type dispatch system for Python along with a semantic type hierarchy for tensors
# - A GPU-optimized computer vision library which can be extended in pure Python
# - An optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4–5 lines of code
# - A novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training
# - A new data block API
# - And much more...
#
# fastai is organized around two main design goals: to be approachable and rapidly productive, while also being deeply hackable and configurable. It is built on top of a hierarchy of lower-level APIs which provide composable building blocks. This way, a user wanting to rewrite part of the high-level API or add particular behavior to suit their needs does not have to learn how to use the lowest level.
# <img alt="Layered API" src="images/layered.png" width="345">
# ## Migrating from other libraries
# It's very easy to migrate from plain PyTorch, Ignite, or any other PyTorch-based library, or even to use fastai in conjunction with other libraries. Generally, you'll be able to use all your existing data processing code, but will be able to reduce the amount of code you require for training, and more easily take advantage of modern best practices. Here are migration guides from some popular libraries to help you on your way:
#
# - [Plain PyTorch](https://docs.fast.ai/migrating_pytorch)
# - [Ignite](https://docs.fast.ai/migrating_ignite)
# - [Lightning](https://docs.fast.ai/migrating_lightning)
# - [Catalyst](https://docs.fast.ai/migrating_catalyst)
# ## Tests
# To run the tests in parallel, launch:
#
# `nbdev_test_nbs` or `make test`
#
# For all the tests to pass, you'll need to install the following optional dependencies:
#
# ```
# pip install "sentencepiece<0.1.90" wandb tensorboard albumentations pydicom opencv-python scikit-image pyarrow kornia \
# catalyst captum neptune-cli
# ```
#
# Tests are written using `nbdev`, for example see the documentation for `test_eq`.
# ## Contributing
# After you clone this repository, please run `nbdev_install_git_hooks` in your terminal. This sets up git hooks, which clean up the notebooks to remove the extraneous stuff stored in the notebooks (e.g. which cells you ran) which causes unnecessary merge conflicts.
#
# Before submitting a PR, check that the local library and notebooks match. The script `nbdev_diff_nbs` can let you know if there is a difference between the local library and the notebooks.
#
# - If you made a change to the notebooks in one of the exported cells, you can export it to the library with `nbdev_build_lib` or `make fastai`.
# - If you made a change to the library, you can export it back to the notebooks with `nbdev_update_lib`.
# ## Docker Containers
# For those interested in official docker containers for this project, they can be found [here](https://github.com/fastai/docker-containers#fastai).
|
nbs/index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sea surface temperature
#
# This is an example of how to convert publicly available raster data to dfs2.
#
# Data are provided in gzipped netcdf.
#
import gzip
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import xarray
import urllib.request
# ## Download file
# +
url = "https://data.nodc.noaa.gov/ghrsst/L4/NSEABALTIC/DMI/DMI_OI/2015/001/20150101-DMI-L4UHfnd-NSEABALTIC-v01-fv01-DMI_OI.nc.gz"
filename = "20150101-DMI-L4UHfnd-NSEABALTIC-v01-fv01-DMI_OI.nc"
gzfilename = filename + ".gz"
urllib.request.urlretrieve (url, gzfilename)
# -
# ## Unzip file
with gzip.open(gzfilename, 'rb') as f_in:
with open(filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
ds = xarray.open_dataset(filename)
ds
ds.analysed_sst[0].plot()
sst = ds.analysed_sst[:,:,:].values
sst.shape
# The input dataset dimensions are ordered (time, lon, lat).
#
# This is the expected order
#
# But the image have to be flipped upside down.
# +
import numpy as np
sst[0] = np.flipud(sst[0])
# -
# ## Coordinates
# +
lat = ds.lat.values
lon = ds.lon.values
nx = len(lon)
ny = len(lat)
x0 = lon[0]
y0 = lat[0]
dx = (lon[-1] - lon[0]) / nx
dy = (lat[-1] - lat[0]) / ny
x0, y0, nx, ny, dx, dy
# -
# ## Time
print(ds.time.values[0])
from datetime import datetime
start_time = datetime(2015,1,1)
# ## Variable types
from mikeio.eum import EUMType, EUMUnit
EUMType.Temperature
EUMType.Temperature.units
# ## Create dfs2
# +
from mikeio import Dfs2
from mikeio.eum import ItemInfo
d = []
d.append(sst)
dfsfilename = filename.replace(".nc", ".dfs2")
coordinate = ['LONG/LAT', x0, y0, 0]
dfs = Dfs2()
dfs.create(filename=dfsfilename,
data=d,
start_time = start_time,
coordinate=coordinate, length_x=dx, length_y=dy,
items=[ItemInfo("Sea surface temperature", EUMType.Temperature, EUMUnit.degree_Kelvin)]
)
# -
# # Dfs2
#
# 
# # Clean up
import os
ds.close()
os.remove(gzfilename)
os.remove(filename)
os.remove(dfsfilename)
|
notebooks/Dfs2 - Sea surface temperature.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Naive Bayes classifier
# +
import re
from os.path import join
from glob import glob
from random import shuffle, seed
from statistics import mode
# import regex
from tqdm import tqdm_notebook
import nltk
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer, PorterStemmer
from nltk.classify import ClassifierI
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
# +
STEMMER = SnowballStemmer('english')
# STEMMER = PorterStemmer()
SEED = 9745
TRAIN_FRACTION = 0.6
# -
# Extract features from the document.
def tokenize(file_name):
text = open(file_name).read().lower()
words = nltk.word_tokenize(text)
# Get tokens without stop words
words = [STEMMER.stem(w)
for w in words if w not in stopwords.words('english')]
# A word most have 3 or more characters with one letter
words = [w for w in words if len(w) >= 3 and re.match(r'[^\W\d\_]', w)]
return words
def build_corpus(locations):
corpus = []
for location, category in locations:
files = glob(join(location, '*.txt'))
for file_name in tqdm_notebook(files, desc=category):
corpus.append((tokenize(file_name), category))
return corpus
def build_frequency_dist(corpus):
all_words = []
for words, label in corpus:
all_words += words
return FreqDist(all_words)
def document_features(features, document):
words = set(document[0])
return {w: (w in words) for w in features}
# +
corpus = build_corpus([('data/Rel-Yes', 'Rel-Yes'),
('data/Rel-No', 'Rel-No')])
shuffle(corpus)
all_words = build_frequency_dist(corpus)
# -
word_features = list(all_words.keys())[:3000]
# +
feature_sets = [(document_features(word_features, d), d[1]) for d in corpus]
train_test_split = int(len(feature_sets) * TRAIN_FRACTION)
train_set = feature_sets[:train_test_split]
test_set = feature_sets[train_test_split:]
# -
nltk_classifier = nltk.NaiveBayesClassifier.train(train_set)
accuracy = nltk.classify.accuracy(nltk_classifier, test_set)
print(f'NLTK Accuracy: {accuracy:0.2f}')
nltk_classifier.show_most_informative_features(10)
# +
mnb_classifier = SklearnClassifier(MultinomialNB())
mnb_classifier.train(train_set)
accuracy = nltk.classify.accuracy(mnb_classifier, test_set)
print(f'Sklearn MultinomialNB Accuracy: {accuracy:0.2f}')
# +
# gnb_classifier = SklearnClassifier(GaussianNB())
# gnb_classifier.train(train_set)
# accuracy = nltk.classify.accuracy(gnb_classifier, test_set)
# print(f'Sklearn GaussianNB Accuracy: {accuracy:0.2f}')
# +
bnb_classifier = SklearnClassifier(BernoulliNB())
bnb_classifier.train(train_set)
accuracy = nltk.classify.accuracy(bnb_classifier, test_set)
print(f'Sklearn BernoulliNB Accuracy: {accuracy:0.2f}')
# +
lr_classifier = SklearnClassifier(LogisticRegression())
lr_classifier.train(train_set)
accuracy = nltk.classify.accuracy(lr_classifier, test_set)
print(f'Sklearn LogisticRegression Accuracy: {accuracy:0.2f}')
# +
sgd_classifier = SklearnClassifier(SGDClassifier())
sgd_classifier.train(train_set)
accuracy = nltk.classify.accuracy(sgd_classifier, test_set)
print(f'Sklearn SGDClassifier Accuracy: {accuracy:0.2f}')
# +
svc_classifier = SklearnClassifier(SVC())
svc_classifier.train(train_set)
accuracy = nltk.classify.accuracy(svc_classifier, test_set)
print(f'Sklearn SVC Accuracy: {accuracy:0.2f}')
# +
lsvc_classifier = SklearnClassifier(LinearSVC())
lsvc_classifier.train(train_set)
accuracy = nltk.classify.accuracy(lsvc_classifier, test_set)
print(f'Sklearn LinearSVC Accuracy: {accuracy:0.2f}')
# +
nusvc_classifier = SklearnClassifier(NuSVC())
nusvc_classifier.train(train_set)
accuracy = nltk.classify.accuracy(nusvc_classifier, test_set)
print(f'Sklearn NuSVC Accuracy: {accuracy:0.2f}')
# -
|
notebooks/classifiers_02_simple_classifiers.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use Keras and hyperparameter optimization (HPO) to recognize hand-written digits with `ibm-watson-machine-learning`
# This notebook contains steps and code to demonstrate support of Deep Learning experiments in the Watson Machine Learning service. It introduces commands for data retrieval, training definition persistance, experiment training, model persistance, model deployment and scoring.
#
# Some familiarity with Python is helpful. This notebook uses Python 3.8.
#
#
# ## Learning goals
#
# The learning goals of this notebook are:
#
# - Working with the Watson Machine Learning service.
# - Training Deep Learning models (TensorFlow).
# - Saving trained models in Watson Machine Learning repository.
# - Online deployment and scoring of the trained model.
#
# ## Contents
#
# This notebook contains the following parts:
#
# 1. [Setup](#setup)
# 2. [Create model definition](#model_def)
# 3. [Train model](#training)
# 4. [Persist trained model](#persist)
# 5. [Deploy and Score](#deploy)
# 6. [Clean up](#clean)
# 7. [Summary and next steps](#summary)
# <a id="setup"></a>
# ## 1. Set up the environment
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# - Create a <a href="https://console.ng.bluemix.net/catalog/services/ibm-watson-machine-learning/" target="_blank" rel="noopener no referrer">Watson Machine Learning (WML) Service</a> instance (a free plan is offered and information about how to create the instance can be found <a href="https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ml-service-instance.html?context=analytics" target="_blank" rel="noopener no referrer">here</a>).
# ### Connection to WML
#
# Authenticate the Watson Machine Learning service on IBM Cloud. You need to provide platform `api_key` and instance `location`.
#
# You can use [IBM Cloud CLI](https://cloud.ibm.com/docs/cli/index.html) to retrieve platform API Key and instance location.
#
# API Key can be generated in the following way:
# ```
# ibmcloud login
# ibmcloud iam api-key-create API_KEY_NAME
# ```
#
# In result, get the value of `api_key` from the output.
#
#
# Location of your WML instance can be retrieved in the following way:
# ```
# ibmcloud login --apikey API_KEY -a https://cloud.ibm.com
# ibmcloud resource service-instance WML_INSTANCE_NAME
# ```
#
# In result, get the value of `location` from the output.
# **Tip**: Your `Cloud API key` can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. You can also get a service specific url by going to the [**Endpoint URLs** section of the Watson Machine Learning docs](https://cloud.ibm.com/apidocs/machine-learning). You can check your instance location in your <a href="https://console.ng.bluemix.net/catalog/services/ibm-watson-machine-learning/" target="_blank" rel="noopener no referrer">Watson Machine Learning (WML) Service</a> instance details.
#
# You can also get service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, then copy the created key and paste it below.
#
# **Action**: Enter your `api_key` and `location` in the following cell.
api_key = 'PASTE YOUR PLATFORM API KEY HERE'
location = 'PASTE YOUR INSTANCE LOCATION HERE'
wml_credentials = {
"apikey": api_key,
"url": 'https://' + location + '.ml.cloud.ibm.com'
}
# ### Install and import the `ibm-watson-machine-learning` package
# **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
# !pip install ibm-watson-machine-learning
# +
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
# -
# ### Working with spaces
#
# First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use [Deployment Spaces Dashboard](https://dataplatform.cloud.ibm.com/ml-runtime/spaces?context=cpdaas) to create one.
#
# - Click New Deployment Space
# - Create an empty space
# - Select Cloud Object Storage
# - Select Watson Machine Learning instance and press Create
# - Copy `space_id` and paste it below
#
# **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/instance-management/Space%20management.ipynb).
#
# **Action**: Assign space ID below
space_id = 'PASTE YOUR SPACE ID HERE'
# You can use `list` method to print all existing spaces.
client.spaces.list(limit=10)
# To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
client.set.default_space(space_id)
# ### 1.1 Working with Cloud Object Storage
# - Create a [Cloud Object Storage (COS)](https://console.bluemix.net/catalog/infrastructure/cloud-object-storage) instance (a lite plan is offered and information about how to order storage is [here](https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#order-storage)).
# - After you create COS instance, go to your COS dashboard.
# - In **Service credentials** tab, click **New Credential**.
# - Add the inline configuration parameter: {["HMAC"](https://console.bluemix.net/docs/services/cloud-object-storage/hmac/credentials.html#using-hmac-credentials):true}, click **Add**.
#
# This configuration parameter adds the following section to the instance credentials, (for use later in this notebook):
# ```
# "cos_hmac_keys": {
# "access_key_id": "***",
# "secret_access_key": "***"
# }
# ```
# The `ibm_boto3` library allows Python developers to manage Cloud Object Storage.
# **Note:** If `ibm_boto3` is not preinstalled in you environment please install it by running the following command: `!pip install ibm-cos-sdk`
import ibm_boto3
from ibm_botocore.client import Config
import os
import json
import warnings
warnings.filterwarnings('ignore')
# **Action**: Enter your COS credentials in the following cell.
# You can find these credentials in your COS instance dashboard under the **Service credentials** tab.
# **Note** the HMAC key, described in [set up the environment](#setup) is included in these credentials.
# +
cos_credentials = {
"apikey": "***",
"cos_hmac_keys": {
"access_key_id": "***",
"secret_access_key": "***"
},
"endpoints": "***",
"iam_apikey_description": "***",
"iam_apikey_name": "***",
"iam_role_crn": "***",
"iam_serviceid_crn": "***",
"resource_instance_id": "***"
}
api_key = cos_credentials['apikey']
service_instance_id = cos_credentials['resource_instance_id']
auth_endpoint = 'https://iam.cloud.ibm.com/identity/token'
service_endpoint = 'https://s3.us-south.cloud-object-storage.appdomain.cloud'
# -
# Create the Boto resource by providing type, endpoint_url and credentials.
cos = ibm_boto3.resource('s3',
ibm_api_key_id=api_key,
ibm_service_instance_id=service_instance_id,
ibm_auth_endpoint=auth_endpoint,
config=Config(signature_version='oauth'),
endpoint_url=service_endpoint)
# Create the buckets that you will use to store training data and training results.
# **Note:**: Bucket name has to be unique - please update following ones to any unique name.
buckets = ['tf-keras-data-example', 'tf-keras-results-example']
for bucket in buckets:
if not cos.Bucket(bucket) in cos.buckets.all():
print('Creating bucket "{}"...'.format(bucket))
try:
cos.create_bucket(Bucket=bucket)
except ibm_boto3.exceptions.ibm_botocore.client.ClientError as e:
print('Error: {}.'.format(e.response['Error']['Message']))
# The buckets are created.
print(list(cos.buckets.limit(50)))
# ### 1.2 Download the MNIST data and upload it to the COS bucket
# In this notebook we work with the Keras **MNIST** sample dataset. Download the training data and upload them to 'mnist-keras-data' bucket.
# Following cell creates the 'MNIST_KERAS_DATA' folder and downloads the file from link.
#
# **Note:** First install `wget` library by the following command
# `!pip install wget`
link = 'https://s3.amazonaws.com/img-datasets/mnist.npz'
# +
import wget
data_dir = 'MNIST_KERAS_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
if not os.path.isfile(os.path.join(data_dir, os.path.join(link.split('/')[-1]))):
wget.download(link, out=data_dir)
# !ls MNIST_KERAS_DATA
# -
# Upload the data files to created bucket.
# +
bucket_name = buckets[0]
bucket_obj = cos.Bucket(bucket_name)
for filename in os.listdir(data_dir):
with open(os.path.join(data_dir, filename), 'rb') as data:
bucket_obj.upload_file(os.path.join(data_dir, filename), filename)
print('{} is uploaded.'.format(filename))
# -
# You can see the list of all buckets and their contents.
for obj in bucket_obj.objects.all():
print('Object key: {}'.format(obj.key))
print('Object size (kb): {}'.format(obj.size/1024))
# ### 1.3 Create connections to a COS bucket.
# +
datasource_type = client.connections.get_datasource_type_uid_by_name('bluemixcloudobjectstorage')
input_conn_meta_props= {
client.connections.ConfigurationMetaNames.NAME: "Input COS connection",
client.connections.ConfigurationMetaNames.DATASOURCE_TYPE: datasource_type,
client.connections.ConfigurationMetaNames.PROPERTIES: {
'bucket': buckets[0],
'access_key': cos_credentials['cos_hmac_keys']['access_key_id'],
'secret_key': cos_credentials['cos_hmac_keys']['secret_access_key'],
'iam_url': auth_endpoint,
'url': service_endpoint
}
}
output_conn_meta_props= {
client.connections.ConfigurationMetaNames.NAME: f"Output COS connection",
client.connections.ConfigurationMetaNames.DATASOURCE_TYPE: datasource_type,
client.connections.ConfigurationMetaNames.PROPERTIES: {
'bucket': buckets[1],
'access_key': cos_credentials['cos_hmac_keys']['access_key_id'],
'secret_key': cos_credentials['cos_hmac_keys']['secret_access_key'],
'iam_url': auth_endpoint,
'url': service_endpoint
}
}
input_conn_details = client.connections.create(meta_props=input_conn_meta_props)
output_conn_details = client.connections.create(meta_props=output_conn_meta_props)
# -
input_connection_id = client.connections.get_uid(input_conn_details)
output_connection_id = client.connections.get_uid(output_conn_details)
# The model is ready to be trained.
# <a id="model_def"></a>
# # 2. Create model definition
# For the purpose of this example two Keras model definitions have been prepared:
#
# - Multilayer Perceptron (MLP)
# - Convolution Neural Network (CNN)
# ### 2.1 Prepare model definition metadata
metaprops = {
client.model_definitions.ConfigurationMetaNames.NAME: "MNIST mlp model definition",
client.model_definitions.ConfigurationMetaNames.DESCRIPTION: "MNIST mlp model definition",
client.model_definitions.ConfigurationMetaNames.COMMAND: "python3 mnist_mlp.py",
client.model_definitions.ConfigurationMetaNames.PLATFORM: {"name": "python", "versions": ["3.8"]},
client.model_definitions.ConfigurationMetaNames.VERSION: "2.0",
client.model_definitions.ConfigurationMetaNames.SPACE_UID: space_id
}
# ### 2.2 Get sample model definition content files from git (Python scripts with CNN and MLP)
# +
filename_mnist = 'MNIST.zip'
if not os.path.isfile(filename_mnist):
filename_mnist = wget.download('https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/definitions/keras/mnist/MNIST.zip')
# -
# **Tip**: Convert below cell to code and run it to see model deinition's code.
# + active=""
# !unzip -oqd . MNIST.zip && cat mnist_mlp.py
# -
# ### 2.3 Publish model definition
model_definition_details = client.model_definitions.store(filename_mnist, meta_props=metaprops)
model_definition_id = client.model_definitions.get_id(model_definition_details)
print(model_definition_id)
# #### List model definitions
client.model_definitions.list(limit=5)
# <a id="training"></a>
# # 3. Train model
# ### 3.1 Prepare training metadata
training_metadata = {
client.training.ConfigurationMetaNames.NAME: "Keras-MNIST",
client.training.ConfigurationMetaNames.SPACE_UID: space_id,
client.training.ConfigurationMetaNames.DESCRIPTION: "Keras-MNIST predict written digits",
client.training.ConfigurationMetaNames.TAGS :[{
"value": "MNIST",
"description": "predict written difits"
}],
client.training.ConfigurationMetaNames.TRAINING_RESULTS_REFERENCE: {
"name": "MNIST results",
"connection": {
"id": output_connection_id,
},
"location": {
"bucket": buckets[1],
"file_name": "."
},
"type": "connection_asset"
},
client.training.ConfigurationMetaNames.MODEL_DEFINITION:{
"id": model_definition_id,
"hardware_spec": {
"name": "K80",
"nodes": 1
},
"software_spec": {
"name": "tensorflow_2.4-py3.8"
},
"parameters": {
"name": "MNIST mlp",
"description": "Simple MNIST mlp model"
}
},
client.training.ConfigurationMetaNames.TRAINING_DATA_REFERENCES: [
{
"name": "training_input_data",
"type": "connection_asset",
"connection": {
"id": input_connection_id,
},
"location": {
"bucket": buckets[0],
"file_name": "."
},
"schema": {
"id":"idmlp_schema",
"fields": [
{
"name": "text",
"type": "string"
}
]
}
}
]
}
# ### 3.2 Train model in background
training = client.training.run(training_metadata)
# ### 3.3 Get training id and status
training_id = client.training.get_id(training)
client.training.get_status(training_id)["state"]
# ### 3.4 Get training details
training_details = client.training.get_details(training_id)
print(json.dumps(training_details, indent=2))
# #### List trainings
client.training.list(limit=5)
# #### Cancel training
# You can cancel the training run by calling the method below.
# **Tip**: If you want to delete train runs and results add `hard_delete=True` as a parameter.
# + active=""
# client.training.cancel(training_id)
# -
# <a id="persist"></a>
# # 4. Persist trained model
# ### 4.1 Download trained model from COS
uid = client.training.get_details(training_id)['entity']['results_reference']['location']['logs']
# #### Download model from COS
# +
bucket_name = buckets[1]
bucket_obj = cos.Bucket(bucket_name)
model_path = ""
for obj in bucket_obj.objects.iterator():
if uid in obj.key and obj.key.endswith(".h5"):
model_path = obj.key
break
model_name = model_path.split("/")[-1]
bucket_obj.download_file(model_path, model_name)
# -
# #### Load downloaded_model.
# **Hint**: To install tensorflow execute `!pip install tensorflow`
# +
from tensorflow import keras
model = keras.models.load_model(model_name)
# -
# ### 4.2 Publish model
# To specify format how keras model is saved use parameter:
# ```
# model_meta_props = {
# ...
# client.repository.ModelMetaNames.TF_MODEL_PARAMS: {"save_format": "tf"}
# ...
# }
# ```
# Where `tf` stands for standard tensorflow format and `h5` will save a model in HDF format.
# For keras models `h5` is default and for native tensorflow models `tf` is used if `TF_MODEL_PARAMS` not passed.
# ### 4.2 Publish model
software_spec_uid = client.software_specifications.get_id_by_name('tensorflow_2.4-py3.8')
# +
model_meta_props = {
client.repository.ModelMetaNames.NAME: "Keras MNIST",
client.repository.ModelMetaNames.TYPE: "tensorflow_2.4",
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: software_spec_uid,
client.repository.ModelMetaNames.TF_MODEL_PARAMS: {"save_format": "h5"}
}
published_model = client.repository.store_model(model=model, meta_props=model_meta_props)
model_uid = client.repository.get_model_uid(published_model)
# -
# ### 4.3 Get model details
model_details = client.repository.get_details(model_uid)
print(json.dumps(model_details, indent=2))
# #### List stored models
client.repository.list_models(limit=5)
# <a id="deploy"></a>
# # 5. Deploy and score
# ### 5.1 Create online deployment for published model
# +
deployment = client.deployments.create(model_uid, meta_props={
client.deployments.ConfigurationMetaNames.NAME: "Keras MNIST",
client.deployments.ConfigurationMetaNames.ONLINE: {}})
deployment_uid = client.deployments.get_id(deployment)
# -
# ### 5.2 Get deployments details
deployments_details = client.deployments.get_details(deployment_uid)
print(json.dumps(deployments_details, indent=2))
# #### List deployments
client.deployments.list(limit=5)
# ### 5.3 Score deployed model
# Let's plot two digits. **Action:** Please install `matplotlib`, `numpy`
# + pycharm={"name": "#%%\n"}
import numpy as np
dataset_filename='mnist.npz'
mnist_dataset = np.load(os.path.join(data_dir, dataset_filename))
x_test = mnist_dataset['x_test']
# -
# %matplotlib inline
import matplotlib.pyplot as plt
for i, image in enumerate([x_test[0], x_test[1]]):
plt.subplot(2, 2, i + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
# Our input node expects to get data with shape (784,) so we need to reshape our two digits.
image_1 = x_test[0].ravel() / 255
image_2 = x_test[1].ravel() / 255
# #### Prepare scoring payload and score.
scoring_payload = {
client.deployments.ScoringMetaNames.INPUT_DATA : [
{'values': [image_1.tolist(), image_2.tolist()]}
]
}
scores = client.deployments.score(deployment_uid, meta_props=scoring_payload)
print("Scoring result:\n" + json.dumps(scores, indent=2))
# <a id="clean"></a>
# # 6. Clean up
# If you want to clean up all created assets:
# - experiments
# - trainings
# - pipelines
# - model definitions
# - models
# - functions
# - deployments
#
# please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
# <a id="summary"></a>
# # 7. Summary and next steps
# You successfully completed this notebook! You learned how to use `ibm-watson-machine-learning-client` to run experiments. Check out our _[Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/wsj/getting-started/welcome-main.html?context=analytics)_ for more samples, tutorials, documentation, how-tos, and blog posts.
# ### Author
#
# **<NAME>**, Intern in Watson Machine Learning.
# Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
|
cloud/notebooks/python_sdk/experiments/deep_learning/Use Keras and HPO to recognize hand-written digits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Load packages
import numpy as np
import pandas as pd
# # Prep other sources
all_games_df = pd.read_csv('Data/~Created data/all_games_df.csv')
all_games_df = all_games_df[['is_tourney','Season','HTeamID','RTeamID','Hwin']]
all_games_df.head()
kp_df = pd.read_csv('Data/~Created data/kp_all.csv')
kp_df.head()
regseason_df = pd.read_csv('Data/~Created data/regseason_df.csv')
regseason_df = regseason_df[['TeamID','Season','wins_top25','PointMargin','FG','FG3']]
regseason_df.head()
massey_df = pd.read_csv('Data/Kaggle NCAA/MasseyOrdinals_thru_2019_day_128.csv')
POM_df = massey_df[massey_df['SystemName'].str.contains("POM")]
POM_end_df = POM_df.loc[POM_df['RankingDayNum'] == 128]
POM_end_df.rename(columns={'OrdinalRank': 'RankPOM'}, inplace=True)
POM_end_df = POM_end_df[['Season','TeamID','RankPOM']]
POM_end_df.head()
# # Create test set
# +
#Test set (this sets the data up in the format Kaggle needs for scoring)
df_seeds = pd.read_csv('Data/Kaggle NCAA/NCAATourneySeeds.csv')
df_seeds = df_seeds[df_seeds['Season']==2019]
df_19_tourney = df_seeds.merge(df_seeds, how='inner', on='Season')
df_19_tourney = df_19_tourney[df_19_tourney['TeamID_x'] < df_19_tourney['TeamID_y']]
df_19_tourney['ID'] = df_19_tourney['Season'].astype(str) + '_' \
+ df_19_tourney['TeamID_x'].astype(str) + '_' \
+ df_19_tourney['TeamID_y'].astype(str)
df_19_tourney['SeedInt_x'] = [int(x[1:3]) for x in df_19_tourney['Seed_x']]
df_19_tourney['SeedInt_y'] = [int(x[1:3]) for x in df_19_tourney['Seed_y']]
#Make home team lower seed (consistent with training data)
df_19_tourney.loc[df_19_tourney['SeedInt_x']<df_19_tourney['SeedInt_y'], 'HTeamID'] = df_19_tourney['TeamID_x']
df_19_tourney.loc[df_19_tourney['SeedInt_x']>df_19_tourney['SeedInt_y'], 'HTeamID'] = df_19_tourney['TeamID_y']
df_19_tourney.loc[df_19_tourney['SeedInt_x']<df_19_tourney['SeedInt_y'], 'RTeamID'] = df_19_tourney['TeamID_y']
df_19_tourney.loc[df_19_tourney['SeedInt_x']>df_19_tourney['SeedInt_y'], 'RTeamID'] = df_19_tourney['TeamID_x']
df_19_tourney.loc[df_19_tourney['SeedInt_x']==df_19_tourney['SeedInt_y'], 'HTeamID'] = df_19_tourney['TeamID_x']
df_19_tourney.loc[df_19_tourney['SeedInt_x']==df_19_tourney['SeedInt_y'], 'RTeamID'] = df_19_tourney['TeamID_y']
df_19_tourney.loc[df_19_tourney['SeedInt_x']<df_19_tourney['SeedInt_y'], 'HSeed'] = df_19_tourney['SeedInt_x']
df_19_tourney.loc[df_19_tourney['SeedInt_x']>df_19_tourney['SeedInt_y'], 'HSeed'] = df_19_tourney['SeedInt_y']
df_19_tourney.loc[df_19_tourney['SeedInt_x']<df_19_tourney['SeedInt_y'], 'RSeed'] = df_19_tourney['SeedInt_y']
df_19_tourney.loc[df_19_tourney['SeedInt_x']>df_19_tourney['SeedInt_y'], 'RSeed'] = df_19_tourney['SeedInt_x']
df_19_tourney.loc[df_19_tourney['SeedInt_x']==df_19_tourney['SeedInt_y'], 'HSeed'] = df_19_tourney['SeedInt_x']
df_19_tourney.loc[df_19_tourney['SeedInt_x']==df_19_tourney['SeedInt_y'], 'RSeed'] = df_19_tourney['SeedInt_y']
df_19_tourney['is_tourney'] = 1
df_19_tourney = df_19_tourney.drop(['Seed_x','Seed_y','TeamID_x','TeamID_y','SeedInt_x','SeedInt_y'], axis=1)
df_19_tourney.sort_index()
# -
home_road = ['H','R']
for hr in home_road:
df_19_tourney = pd.merge(df_19_tourney, regseason_df, left_on=['Season',hr+'TeamID'], right_on = ['Season','TeamID'], how='left')
df_19_tourney.rename(columns={'wins_top25': hr+'wins_top25'}, inplace=True)
df_19_tourney.rename(columns={'PointMargin': hr+'PointMargin'}, inplace=True)
df_19_tourney.rename(columns={'FG': hr+'FG'}, inplace=True)
df_19_tourney.rename(columns={'FG3': hr+'FG3'}, inplace=True)
df_19_tourney = df_19_tourney.drop(['TeamID'], axis=1)
for hr in home_road:
df_19_tourney = pd.merge(df_19_tourney, POM_end_df, left_on=['Season',hr+'TeamID'], right_on = ['Season','TeamID'], how='left')
df_19_tourney.rename(columns={'RankPOM': hr+'RankPOM'}, inplace=True)
df_19_tourney = df_19_tourney.drop(['TeamID'], axis=1)
efficiency_list = ['conf','adjem','adjo','adjd','luck','TeamID']
for hr in home_road:
df_19_tourney = pd.merge(df_19_tourney, kp_df, left_on=[hr+'TeamID','Season'], right_on = ['TeamID','Season'], how='inner')
df_19_tourney = df_19_tourney.drop(['TeamID'], axis=1)
for metric in efficiency_list:
df_19_tourney.rename(columns={metric: hr+metric}, inplace=True)
if hr == 'H':
df_19_tourney.rename(columns={'team': 'home'}, inplace=True)
if hr == 'R':
df_19_tourney.rename(columns={'team': 'road'}, inplace=True)
# +
df_19_tourney['Htourny20plus'] = 0
df_19_tourney['Rtourny20plus'] = 0
experienced_teams = ['kansas','north carolina','kentucky','duke','michigan st.','wisconsin','florida','villanova','gonzaga','louisville','arizona','xavier','connecticut','syracuse','butler','ohio st.','ucla','west virginia','texas','michigan','pittsburgh','memphis','oregon']
for team in experienced_teams:
df_19_tourney.loc[df_19_tourney['home']==team, 'Htourny20plus'] = 1
df_19_tourney.loc[df_19_tourney['road']==team, 'Rtourny20plus'] = 1
# -
df_19_tourney['HBig4Conf'] = 0
df_19_tourney['RBig4Conf'] = 0
conferences = ['ACC','B10','B12','SEC']
for conf in conferences:
df_19_tourney.loc[df_19_tourney['Hconf']==conf, 'HBig4Conf'] = 1
df_19_tourney.loc[df_19_tourney['Rconf']==conf, 'RBig4Conf'] = 1
list(df_19_tourney)
df_19_tourney.tail()
df_19_tourney = df_19_tourney.fillna(df_19_tourney.mean())
# # Output to csv
df_19_tourney.to_csv('Data/~Created data/test_combos_df_19.csv', index=False)
|
04 Create data for 2019 prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# data analysis and wrangling
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
from datetime import timedelta
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# configure visualizations
sns.set_style('whitegrid')
figsize=(8,6)
# +
def load_df(path, columns=None, use_threads=True):
try:
return pq.read_table(path, columns=columns, use_threads=use_threads).to_pandas()
except Exception as e:
print(e)
def downcast_ints(df):
# downcast int types
df_int = df.select_dtypes(include=['int'])
converted_int = df_int.apply(pd.to_numeric, downcast='unsigned')
df[converted_int.columns] = converted_int
return df
def prepare(path,cols):
df = load_df(path,cols)
df = df.reset_index(drop=True)
df = downcast_ints(df)
return df
# +
cols = ['sample_id', 'network_type', 'mobile_network_type', 'mobile_data_status', 'mobile_data_activity', 'roaming_enabled', 'wifi_status', 'wifi_signal_strength', 'wifi_link_speed', 'wifi_ap_status', 'network_operator', 'sim_operator', 'mcc', 'mnc']
df = prepare('1-parquet-files/network_details.parquet',cols)
#fix unsigned int
df_level = df.sample_id
converted_level = df_level.astype(np.int32)
df['sample_id'] = converted_level
df_level = df.roaming_enabled
converted_level = df_level.astype(np.int32)
df['roaming_enabled'] = converted_level
df['network_type'] = df['network_type'].apply(lambda x: x.upper())
df['mobile_network_type'] = df['mobile_network_type'].apply(lambda x: x.upper())
#df['mobile_network_type'] = df['mobile_network_type'].apply(lambda x: 'UNKNOWN' if '0' else 'IWLEN' if '18' else 'GSM' if '16' else 'NR5G' if '20' else 'TD_SCDMA' if '17' else x)
df['mobile_data_status'] = df['mobile_data_status'].apply(lambda x: x.upper())
df['mobile_data_activity'] = df['mobile_data_activity'].apply(lambda x: x.upper())
df['wifi_status'] = df['wifi_status'].apply(lambda x: x.upper())
df['wifi_ap_status'] = df['wifi_ap_status'].apply(lambda x: x.upper())
df['network_operator'] = df['network_operator'].apply(lambda x: x.upper() if pd.notnull(x) else x)
df['sim_operator'] = df['sim_operator'].apply(lambda x: x.upper())
df['network_type'] = df['network_type'].apply(lambda x: 'BLUETOOTH TETHERING' if (x == 'BLUETOOTH_TETHER' ) else x)
df.info()
# -
df.to_parquet('2-datasets/network_details.parquet', compression='none')
dfNetworkTypeGB = df.groupby(['network_type'])['network_type'].count().reset_index(name='count').sort_values(['count'], ascending=False).head(20)
print(dfNetworkTypeGB)
|
notebooks/ParquetCleanNetworkDetails.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #!/usr/bin/python
# -*- coding: latin-1 -*-
import os, sys, inspect
from datetime import datetime, timedelta
current_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
folder_parts = current_folder.split(os.sep)
pywikibot_folder = os.sep.join(folder_parts[0:-1])
if current_folder not in sys.path:
sys.path.insert(0, current_folder)
if pywikibot_folder not in sys.path:
sys.path.insert(0, pywikibot_folder)
import pywikibot as pb
from pywikibot import pagegenerators, textlib
from StringIO import StringIO
import mwparserfromhell as mwh
import pandas as pd
# +
BASE_WLE_NAME = u"Commons:Wiki Loves Earth 2017 in Spain"
log_page = BASE_WLE_NAME + u"/Log"
commons_site = pb.Site("commons", "commons")
wle_list_page = pb.Page(commons_site, log_page)
wle_list_text = StringIO(wle_list_page.text[wle_list_page.text.find('\n') +
1:wle_list_page.text.rfind('\n')])
wle_df = pd.read_csv(wle_list_text, sep=";",
index_col=False,
names=["title", "code", "author", "date"])
images_wle = ["File:"+image for image in wle_df["title"]]
# -
images_wle
image_counter = 0
for image in images_wle:
image = unicode(image, "utf-8", "replace")
page = pb.Page(commons_site, image)
if (image_counter != 0) and (image_counter % 50 == 0) :
pb.output ('Retrieving --> %d image descriptions downloaded' %(image_counter))
image_counter += 1
text = page.text
if "{{Uncategorized|year=2017|month=May|" in text:
page.text = text + '\n[[Category:Uncategorized images from Wiki Loves Earth 2017 in Spain]]'
page.save(u"WLE Spain 2017 maintenance")
|
WLE 2017 uncategorized classifier.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import IPython
from IPython.core.display import display
import librosa
import librosa.display
# our own stuff
from ct_utils import analog
# -
# this is required for the `Output` widgets to work properly
# %matplotlib widget
# Follow through the *notebook* and answer the 5 questions raised (highligted in <font color='red'>red</font>).
# A couple of off-topic remarks:
# * Seemingly, the performance of the app is better in [Chrome](https://www.google.com/chrome/), though [Firefox](https://www.mozilla.org/en-US/firefox/) is also fine.
# * Before playing any recording, especially if you are using earphones, please make sure the volume is not too loud.
# * For answering the questions, you don't need to hear the full song every time. It is usually enough to play the first 5-10 seconds.
# * When adjusting the sliders below, you usually need to wait for a few seconds for the app to update and some *flickering* might occur. Please be patient. Bear in mind that this is running *in the cloud* and, sometimes, modulation/demodulation of a high-quality audio recording is being performed for you.
# # Introduction
# Please check the [slides of the course](https://manuvazquez.github.io/assets/communications_theory/slides/analog_modulations.pdf) for this module. Extra background information can be found in the slides for the [course introduction](https://manuvazquez.github.io/assets/communications_theory/slides/introduction.pdf), when talking about *Analog vs Digital communications systems*
# # Amplitude Modulation (conventional AM)
# An amplitude modulation is a kind of *linear* or *amplitude* (**analog**) modulation, i.e., the information signal is embedded in the amplitude of the signal (meaning the frequency and phase of the *carrier signal* stay constant). If we denote the information signal (also referred to as _modulat**ing**_ signal) by $x(t)$, then the _modulat**ed**_ signal is given by
#
# $$
# \large
# y(t)
# =
# \left(
# A_c +
# A_m
# x(t)
# \right)
# \cos (w_ct)
# $$
# where
# * both $A_c$ and $A_m$ are (adjustable) modulation parameters
# * $w_c$ is the carrier frequency
# The above signal can be expressed in a different way
#
# $$
# \large
# y(t)
# =
# \left(
# A_c +
# \frac{A_c}{A_c}
# A_m
# x(t)
# \right)
# \cos (w_ct)
# =
# A_c
# \left(
# 1 + mx(t)
# \right)
# \cos (w_ct)
# $$
# by defining the **modulation index**
#
# $$
# \large
# m
# =
# \frac{A_m}{A_c}
# $$
# .
# # Demodulation
# If the signal is *normalized* (i.e., $|x(t)| \le 1$), looking at the above equation, demodulation is very easy if
#
# $$
# \large
# A_c
# \left(
# 1 + mx(t)
# \right)
# \ge
# 0
# $$
#
# i.e., if the term multiplying the cosine is (at every time instant) non-negative. The reason is that whatever *positive* signal multiplies a rapidly varying cosine constitutes its so-called upper [envelope](https://en.wikipedia.org/wiki/Envelope_(waves)) (a smooth signal that outlines the extremes of a sinusoid), and simple/cheap/efficient hardware is available to extract the latter. Now, if the signal is at some point negative, then it cannot be recovered as the envelope of the signal. In our particular case, what do we need for the condition
# $
# A_c
# \left(
# 1 + mx(t)
# \right)
# \ge
# 0
# $
# to hold? Above we have guaranteed that $|x(t)| \le 1$. Let us also assume that $A_c \ge 0$ (no need to go into details, but this is not a problem). Then, we just need to choose $m$ so that $mx(t) \ge -1$, i.e., $ 0 < m \le 1$.
#
# So, in summary, if the modulation index, $m$ is between $0$ and $1$, then the envelope of the modulated signal (easy to extract) is exactly
# $
# A_c
# \left(
# 1 + mx(t)
# \right)
# \ge
# 0
# $, and from the latter one can solve for $x(t)$ to recover the information signal. If $m>1$, then the envelope of the signal doesn't match anymore
# $
# A_c
# \left(
# 1 + mx(t)
# \right)
# \ge
# 0
# $
# and the signal recovered with this envelope-based method is not correct. This is called **overmodulation**.
# # An audio signal
# We load a piece of the song *Reverie* by [\_ghost](http://ccmixter.org/files/_ghost/25389) (downloaded from [ccMixter](http://ccmixter.org/) under [Creative Commons licence](https://creativecommons.org/licenses/by/3.0/)).
# filename = pathlib.Path('_ghost_-_Reverie_(small_theme).mp3')
filename = pathlib.Path('_ghost_-_Reverie_(small_theme).wav')
assert filename.exists()
signal, sampling_rate = librosa.load(filename)
# signal.shape
# a normalized version of the signal
normalized_signal, normalization_const = analog.normalize(signal, return_normalization_constant=True)
# a scaled up one
amplified_signal = signal * 11
# time axis
t = np.arange(len(signal)) / sampling_rate
IPython.display.display(IPython.display.Audio(signal, rate=sampling_rate))
# Parameters
w_c = 2 * np.pi * 1_000
A_m = 1.
A_c = 2.
def make_plot_and_player(signal: np.ndarray) -> list:
# modulation/demodulation
am = analog.AmplitudeModulation(Am=A_m, Ac=A_c, carrier_freq=w_c)
modulated_signal, *_ = am.modulate(t, signal)
demodulated_signal = am.demodulate(modulated_signal)
res = []
# a `matplotlib` figure embedded in an `Output` widget
figure_size = (6,8)
output = widgets.Output()
with output:
fig, ax = plt.subplots(1, 1, figsize=figure_size)
ax.plot(t, signal);
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.resizable = False
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
res.append(output)
# an audio widget
output = widgets.Output()
with output:
IPython.display.display(IPython.display.Audio(demodulated_signal, rate=sampling_rate))
res.append(output)
return res
# **<font color='red'>Q1.</font>** Two scaled versions of the signal are modulated and, afterwards, *properly* demodulated using the above algebra. Look at the pictures noting the scale of the vertical axis. Can you guess which one is going to sound OK? If not sure, just play both of them for a few seconds. Which one sounds fine? Why not so much the other? Assume each one of the signals below constitutes the exact $x(t)$ to be transformed using the above equation.
widgets.HBox([
widgets.VBox(make_plot_and_player(amplified_signal)),
widgets.VBox(make_plot_and_player(normalized_signal))])
# # Overmodulation
# an interval specifying a small piece of the signal
interval_of_interest = range(100_000,100_200)
# When you adjust one of the slides, please wait a few seconds (more or less depending on the *cloud* servers load) for the browser to refresh (most likely, it will flicker **twice**).
# +
common_properties = {'min': 0.1, 'max': 50.}
Am_slider_widget = widgets.FloatSlider(**common_properties, value=1., description='$A_m$')
Ac_slider_widget = widgets.FloatSlider(**common_properties, value=2, description='$A_c$')
ui = widgets.VBox([Am_slider_widget, Ac_slider_widget])
def f(Am: float, Ac: float):
# in order to try and avoid the "RuntimeWarning: More than 20 figures have been opened. Figures...
try:
plt.close(fig)
except UnboundLocalError:
pass
am = analog.AmplitudeModulation(Am=Am, Ac=Ac, carrier_freq=w_c)
modulated_signal, envelope, cosine_factor = am.modulate(t, normalized_signal)
demodulated_signal = am.demodulate(modulated_signal)
# figure
fig, ax = plt.subplots(1, 1, figsize=(15,8))
ax.plot(
t[interval_of_interest], normalized_signal[interval_of_interest], label='information (modulating) signal')
ax.plot(
t[interval_of_interest], demodulated_signal[interval_of_interest], label='demodulated signal',
marker='P', markevery=5)
# ax.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left',fontsize='x-large')
ax.legend(loc='upper left',fontsize='x-large')
player = IPython.display.Audio(demodulated_signal, rate=sampling_rate)
fig.canvas.toolbar_visible = False
fig.canvas.footer_visible = False
fig.canvas.header_visible = False
fig.canvas.resizable = False
# print(f'm = {am.m}')
display(IPython.display.Audio(demodulated_signal, rate=sampling_rate))
out = widgets.interactive_output(f, {'Am': Am_slider_widget, 'Ac': Ac_slider_widget})
IPython.display.display(ui, out)
# -
# Try different values of the parameteres $A_m$ and $A_c$ and compare the information and demodulated signals. You can also play the demodulated signal by clicking the corresponding button below.
# **<font color='red'>Q2.</font>** Provide values for $A_m$ and $A_c$ that cause overmodulation. What do you notice in the plot? Why doesn't it affect the whole signal?
# **<font color='red'>Q3.</font>** Test the effects of overmodulation on our ears. Can you pick a pair of values for $A_m$ and $A_c$ such that overmodulation happens but doesn't cause a noticeable degradation on the *perceived* sound quality? Above which (approximate) value of the modulation index, $m$, do you start noticing glitches in the audio?
# # Noise resilience
# power of the signal
P_s = (np.abs(normalized_signal)**2).mean()
# the modulated signal is always the same
am = analog.AmplitudeModulation(Am=A_m, Ac=A_c, carrier_freq=w_c)
modulated_signal, envelope, cosine_factor = am.modulate(t, normalized_signal)
# Below you can see the effect on the demodulated signal when noise is added to the modulated one. In particular, you can adjust the signal-to-noise ratio (SNR) **in dBs** and look and hear the result.
# +
SNR_slider_widget = widgets.FloatSlider(min=5., max=60., value=15., description='SNR')
print_output = widgets.Output()
def f(SNR: float):
# in order to try and avoid the "RuntimeWarning: More than 20 figures have been opened. Figures...
try:
plt.close(fig)
except UnboundLocalError:
pass
global print_output
noise_variance = P_s / 10**(SNR / 10.)
noise = np.random.randn(*normalized_signal.shape) * np.sqrt(noise_variance)
demodulated_signal = am.demodulate(modulated_signal + noise)
# figure
fig, ax = plt.subplots(1, 1, figsize=(15,8))
ax.plot(
t[interval_of_interest], normalized_signal[interval_of_interest], label='information (modulating) signal')
ax.plot(
t[interval_of_interest], demodulated_signal[interval_of_interest], label='demodulated signal',
marker='P', markevery=5)
ax.legend(loc='upper left',fontsize='x-large')
player = IPython.display.Audio(demodulated_signal, rate=sampling_rate)
fig.canvas.toolbar_visible = False
fig.canvas.footer_visible = False
fig.canvas.header_visible = False
fig.canvas.resizable = False
print_output.clear_output()
with print_output:
print(f'noise variance = {noise_variance}')
display(IPython.display.Audio(demodulated_signal, rate=sampling_rate))
out = widgets.interactive_output(f, {'SNR': SNR_slider_widget})
IPython.display.display(widgets.VBox([SNR_slider_widget, print_output]), out)
# -
# **<font color='red'>Q4.</font>** Below which value of the SNR is the sound not *perceived* as sharp (clear) anymore? $10$ dBs is usually considered a *pretty good* SNR in high quality radio (see, e.g., the last paragraphs in [these notes](https://www.electronics-notes.com/articles/radio/radio-receiver-sensitivity/signal-to-noise-ratio-s-n-snr-formula.php)). According to that, is AM noise-resilient or not?
# <!-- Notice we are not performing any kind of de-noising, which we should-->
# **<font color='red'>Q5.</font>** What is the power of the signal for an SNR of about $20$ dBs?
|
labs/notebooks/analog_modulations_lab.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import math
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
# %matplotlib inline
sns.set()
# -
df = pd.read_csv("../data/data_csv.csv")
# +
df["Year"] = pd.DatetimeIndex(df['Date']).year
df["Month"] = pd.DatetimeIndex(df['Date']).month
start_y = 2000
end_y = 2018
df_new = df[df.Year>=start_y].reset_index(drop=True)
df_new = df_new.drop("Date", axis=1)
# +
vif = pd.DataFrame()
df_vif = df_new
vif["features"] = df_vif.columns
vif["vif_Factor"] = [variance_inflation_factor(df_vif.values, i) for i in range(df_vif.shape[1])]
print(vif)
print("\nIf VIF value is higher than 10, it is usually considered having high correlation with other independent variables.")
# -
# ### To calculate real returns we use the formula:
# return[t] = return[t-1] * (((Real Price[t] - Real Price[t-1]) / Real Price[t-1])) + 1) + (Real Dividend[t-1]/RealPrice[t-1]) * (return[t-1]/12)
def calculate_real_return(idx, old_idx, previous_return, df_new_s):
diff_price = ((df_new_s["Real Price"].values[idx] - df_new_s["Real Price"].values[old_idx]) / (df_new_s["Real Price"].values[old_idx])) + 1
cum_return = previous_return * diff_price
reinvested_dividens = cum_return + (df_new_s["Real Dividend"].values[old_idx] / df_new_s["Real Price"].values[old_idx]) * (previous_return/12)
return reinvested_dividens
# +
previous_real_returns = []
previous_real_returns.append(1) # start with investing one dollar at the begining
i = 0
for y in range(len(df_new)-1):
real_return = calculate_real_return(y+1, y, previous_real_returns[i], df_new)
previous_real_returns.append(real_return)
i += 1
# +
return_val = []
month = 1 # january
i = 0
month_count = []
for idx, row in df_new.iterrows():
if row.Month == month:
return_val.append(previous_real_returns[i])
month_count.append(i)
else:
month += 1
i += 1
return_val.append(previous_real_returns[i])
month_count.append(i)
# -
df_new["Return"] = return_val
df_new["InvestedMonths"] = month_count
plt.hist(df_new["Real Price"])
plt.hist(df_new["InvestedMonths"])
plt.hist(df_new["SP500"])
plt.hist(df_new["Return"])
# +
plt.figure(figsize=(8, 8))
sns.heatmap(df_new.drop(["Return", "Year", "Month"], axis=1).corr(), annot=True, cmap=plt.cm.Blues)
plt.show()
print("select those independent variables with high correlation with dependent variable")
# -
plt.scatter(df_new["Real Price"], df_new["Return"])
plt.scatter(df_new["SP500"], df_new["Return"])
returns = [math.log(r) for r in df_new["Return"]]
plt.scatter(df_new["InvestedMonths"], df_new["Return"])
plt.xlabel("Invested Months")
plt.ylabel("Return")
plt.scatter(df_new["InvestedMonths"], returns)
plt.xlabel("Invested Months")
plt.ylabel("Log Return")
plt.title("Return of investment")
sns.regplot(x="InvestedMonths", y="Return", data=df_new[["InvestedMonths", "Return"]])
plt.scatter(df_new["SP500"], returns)
plt.xlabel("SP500")
plt.ylabel("Log Return")
# +
vif = pd.DataFrame()
df_vif = df_new[["SP500", "Real Price"]]
vif["features"] = df_vif.columns
vif["vif_Factor"] = [variance_inflation_factor(df_vif.values, i) for i in range(df_vif.shape[1])]
print(vif)
print("\nIf VIF value is higher than 10, it is usually considered having high correlation with other independent variables.")
# -
def get_cv_scores(model, x, y):
scores = cross_val_score(model, x, y, cv=10, scoring='r2')
print(f'CV Mean: {np.mean(scores):.4f}')
print(f'STD: {np.std(scores):.4f}')
def results(y_t, y_p):
print(f'Mean squared error: {mean_squared_error(y_t, y_p):.4f}')
# The coefficient of determination: 1 is perfect prediction
print(f'Coefficient of determination: {r2_score(y_t, y_p):.4f}')
# ### Model 1: = Return ~ SP500
# +
X = df_new[["SP500"]]
y = df_new[["Return"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
lr = LinearRegression().fit(X_train, y_train)
print("**** Results ****")
get_cv_scores(lr, X_train, y_train)
y_pred = lr.predict(X_test)
results(y_test, y_pred)
# -
X_train_sm = sm.add_constant(X_train)
model = sm.OLS(y_train, X_train_sm).fit()
model.bic
# ### Model 2: Return ~ InvestedMonths
# +
X = df_new[["InvestedMonths"]]
y = returns
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
lr = LinearRegression().fit(X_train, y_train)
print("**** Results ****")
get_cv_scores(lr, X_train, y_train)
y_pred = lr.predict(X_test)
results(y_test, y_pred)
# -
X_train_sm = sm.add_constant(X_train)
model = sm.OLS(y_train, X_train_sm).fit()
model.bic
# ### Model 3: Return ~ SP500+RealPrice
# +
X = df_new[["SP500", "Real Price"]]
y = df_new[["Return"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
lr = LinearRegression().fit(X_train, y_train)
print("**** Results ****")
get_cv_scores(lr, X_train, y_train)
y_pred = lr.predict(X_test)
results(y_test, y_pred)
# -
# ### Model 4: Return ~ All features
# +
y = df_new[["Return"]]
X = df_new.drop("Return", axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
lr = LinearRegression().fit(X_train, y_train)
print("**** Results ****")
get_cv_scores(lr, X_train, y_train)
y_pred = lr.predict(X_test)
results(y_test, y_pred)
# -
X_train_sm = sm.add_constant(X_train)
model = sm.OLS(y_train, X_train_sm).fit()
model.bic
# #### From results above we could see that feature "Invested Years" doesn't improve our model so we could exclude it in future analysis.
# ## Using PCA to transform features
def transform_features(num_components, X_tr, X_te):
scaler = MinMaxScaler()
X_train_minmax = scaler.fit_transform(X_tr)
X_test_minmax = scaler.transform(X_te)
pca = PCA(n_components=num_components)
X_train_pca = pca.fit_transform(X_train_minmax)
X_test_pca = pca.transform(X_test_minmax)
return pca, X_train_pca, X_test_pca
# +
y = df_new[["Return"]]
X = df_new.drop("Return", axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
pca, X_train_pca, X_test_pca = transform_features(0.99, X_train, X_test)
lr = LinearRegression().fit(X_train_pca, y_train)
print("**** Results ****")
get_cv_scores(lr, X_train_pca, y_train)
y_pred = lr.predict(X_test_pca)
results(y_test, y_pred)
# -
X_train_sm = sm.add_constant(X_train_pca)
model = sm.OLS(y_train, X_train_sm).fit()
model.bic
# +
plt.rcParams["figure.figsize"] = (12,6)
fig, ax = plt.subplots()
end_d = 6
xi = np.arange(1, end_d, step=1)
y = np.cumsum(pca.explained_variance_ratio_)
plt.ylim(0.0,1.1)
plt.plot(xi, y, marker='o', linestyle='--', color='b')
plt.xlabel('Number of Components')
plt.xticks(np.arange(0, end_d, step=1))
plt.ylabel('Cumulative variance (%)')
plt.title('The number of components needed to explain variance')
plt.axhline(y=0.95, color='r', linestyle='-')
plt.text(0.5, 0.85, '95% cut-off threshold', color = 'red', fontsize=16)
ax.grid(axis='x')
plt.show()
# -
# ### From the plot above we could see that number of components is between 3 and 4.
# ### Model 5: Return ~ PCA with 3 components
# +
y = df_new[["Return"]]
X = df_new.drop(["Return", "Year", "Month", "PE10", "Long Interest Rate"], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
# X_train_months = X_train["InvestedMonths"]
# X_test_months = X_test["InvestedMonths"]
# X_train = X_train.drop("InvestedMonths", axis=1)
# X_test = X_test.drop("InvestedMonths", axis=1)
pca, X_train_pca, X_test_pca = transform_features(3, X_train, X_test) # try with 3 components
lr = LinearRegression().fit(X_train_pca, y_train)
print("**** Results ****")
get_cv_scores(lr, X_train_pca, y_train)
y_pred = lr.predict(X_test_pca)
results(y_test, y_pred)
# -
X_train_sm = sm.add_constant(X_train_pca)
model = sm.OLS(y_train, X_train_sm).fit()
model.bic
df1 = pd.concat([X_train[["InvestedMonths"]], y_train], join = 'outer', axis = 1).reset_index(drop=True)
plt.title("Results on train set")
sns.regplot(x="InvestedMonths", y="Return", data=df1)
# +
import itertools
test_return = pd.DataFrame({'Return': list(itertools.chain.from_iterable(y_pred.tolist()))})
# -
df2 = pd.concat([X_test[["InvestedMonths"]].reset_index(drop=True), test_return], join = 'outer', axis = 1).reset_index(drop=True)
plt.title("Results on test set")
sns.regplot(x="InvestedMonths", y="Return", data=df2)
|
notebooks/predicting_financial_market_returns-2000-2017-(monthly).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.1 32-bit
# language: python
# name: python37132bit55124e49dad2434bbd4e31f7bbeee9ee
# ---
# # Agenda
#
# 1. Numpy
# 2. Array
# 3. Working with NumPy
# 4. Difference between NumPy and Array
# 5. NumPy Array
# 6. How to Create NumPy Array
# 7. Operation on NumPy: Slicing
# 8. Join in an Array
# # NumPy
#
#
# + An acronym for **"Numerical Python"**
# + Python library for scientific computing application
# + fast mathematical computation on array and matrices
# + ndarray (n-dimensional array) objects
# + [Numpy Reference](https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Numpy_Python_Cheat_Sheet.pdf)
# ## Array
#
# + Container that hold fixed number of items
# + Items have same data type
# + Contiguous (one after the other) location holding the elements
# ### An array consists of:
#
# + **Element**: Each item stored in an array is called an element.
#
# + **Index**: Each location of an element in an array has a numerical index, which is used to identify the element.
#
# #### Array Representation
#
# 
# ### Anatomy of an Array
#
# 
#
#
# ### Terms
#
# 1. **Axes**: ***axis = 0*** ( row-wise) and ***axis = 1*** (column-wise)
# 2. **Rank**: Number of axes it possesses.
#
# > **Rank = Number of axes**
#
# 3. **Shape**: Number of elements it contains along each of its axis.
# ###### Array Representation in a language (C++)
#
# Arrays can be declared in various ways in different languages. Given below is an illustration:
#
# 
#
# 
#
# <hr>
# ## Working with NumPy
#
# 1. Install the numpy on your PC
#
# ```python
# pip install numpy
# ```
# 2. Import Numpy
#
# ```python
# import numpy as np
# ```
# 3. Now, you are ready to use NumPy
# ### Numpy Arrays
#
# 1. **One-dimesional (1D) Array**: An array of dimension one is called one-dimesional array. It is also known as **Vector**.
#
# 
#
# 2. **Multidimensional arrays or ndarrays**: An array of n-dimension is called ndarray. It is also known as **Matrices**
#
# 
# ### Difference between NumPy Array and List
#
# | NumPy Array | List |
# | --------------------------------------------------------------- | --------------------------------------------------------------- |
# | NumPy array works on homogeneous (same) types. | Python lists are made up of heterogeneous (different) types |
# | NumPy array does not support addition and removal of elements | Python lists supports adding and removal of elements |
# | Can't contain elements of different types | Can contain elements of different types |
# | Less memory consumption | More memory consumption |
# | Faster runtime execution | Runtime execution is comparatively slower than arrays |
# <hr>
# ## How to CREATE A NumPy ARRAY
#
#
# There are several ways of creating a NumPy array.
#
# + **np.array()**: To create 1D/2D NumPy array
# + **np.fromstring()**: To create 1D array from string
# 3. **np.empty()**: An empty array or uninitialized array
# 4. **np.zeros()**: New array with all elements as zeroes
# 5. **np.ones()**: New array with all ones as its elements
# 6. **np.arange()**: Used to create array from a range
# 7. **np.linspace()**: To create array of range
# 8. **np.copy()**: Used to create a copy of an existing array
# 9. **np.reshape()**: We can create 2D array from 1D array
# 10. **np.eye() or np.identity()**: All diagonals value will be one
# ### array()
# +
#To create a one-dimensional array
import numpy as np
l1 = [2,3,4,5,6]
arr1 = np.array(l1)
print(arr1)
# +
#To create a two-dimesional array
import numpy as np
A = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(A)
# -
# ### fromstring()
# +
import numpy as np
data = np.fromstring('1 2 3 4 10 12 13', dtype = int, sep = " ")
print(data)
# -
# ### empty()
# +
import numpy as np
arr = np.empty( [3,2], dtype = int, order = 'F') # default order: C
print(arr)
# -
# ### zeros()
# +
# array of five zeros. Default type is float
import numpy as np
arr1 = np.zeros(5)
print(arr1)
# -
# ### ones()
# +
# array of five ones. Default dtype is float
import numpy as np
arr1 = np.ones(5)
print(arr1)
# -
# ### arange()
#
# 
# +
import numpy as np
a1 = np.arange(1,13)
print(a1)
# -
# ### linspace()
# +
import numpy as np
line = np.linspace(1,10,10)
print(line)
# -
# ### copy()
# +
import numpy as np
x = np.array([1,2,3])
y = x
z = np.copy(x)
x[0] = 10
print(x)
print(y)
print(z)
# -
# ### reshape()
#
# Reshaping means changing the arrangement of items so that the shape of the array changes while maintaining the same number of dimensions.
#
# 1D
# 
#
# Reshaped into (2D)
# 
# +
import numpy as np
a1 = np.arange(1,13)
reshape = a1.reshape(3,4)
print("1D: array\n", a1)
print("\nAfter Reshaped (2D): \n", reshape)
# -
reshape = a1.reshape(3,4, order = 'F')
reshape
# +
# Creating of 2D array from 1D array using reshape() function
import numpy as np
A = np.array([1,2,3,4,5,6])
print(A)
print("-------------")
B = np.reshape(A, (2,3)) #reshape shall convert 1D array into 2D array with 2 rows and 3 columns
print(B)
# -
# ### eye() / identity()
# +
# create a 4x4 identity matrix using the eye method
import numpy as np
# 4x4 matrix wil 1's as all diagonal elements
mat1 = np.identity(4)
print("\nMatrix 1: \n", mat1)
# +
#Alternatively
mat2 = np.eye(4, dtype = float)
print("\nMatrix 2: \n", mat2)
# -
# <hr>
# ## Operations ON NumPy
#
# #### Array Slicing
#
# + Slicing means sub-sequence of the structure can be indexed and retrieved.
# + Slicing is specified by using the colon operator `:` with a `from` and `to`.
# + **data[from:to]**
#
# ### Slicing in 1D Array
# +
# Slicing in 1D array
import numpy as np
data = np.array([5,2,7,3,9])
print(data[:]) #Shall extract slice from start to end
# -
print(data[1:3]) #Shall extract slice from 1st position to 4th, excluding the 4th position
print(data[:2])
print(data[-2:])
# ### Slicing in 2D array
#
# 
# +
# Slicing in 2D array
import numpy as np
arr = np.array([[1,2,3], [4,5,6], [7,8,9]])
print(arr)
# -
print(arr[:2, 1:])
print(arr[2])
print(arr[2, :])
print(arr[2:, :])
print(arr[:, :2])
print(arr[1, :2])
print(arr[1:2, :2])
# +
# Another example of Slicing
import numpy as np
a = np.array([[1,2,3,4],[5,6,7,8], [9,10,11,12]])
print(a)
# -
b = a[1:2, 0:4]
print(b)
b = a[1:2,1:2]
print(b)
b = a[:2, 0:2]
print(b)
b = a[:2, 1:3]
print(b)
b = a[1:3, 2:4]
print(b)
# <hr>
# ## Joins in Arrays
#
# Joining of two arrays in NumPy is done using **concatenate()** function.
#
# This function is used to join two or more arrays of the same shape.
#
# **Note**
#
# In case of a 2D array, this function concatenates two arrays either by rows or by columns.
# +
#Concatenating two 1D arrays
import numpy as np
a = np.array([1,2,3])
b = np.array([5,6])
c = np.concatenate([a,b,a])
print(c)
# +
#Concatenating two 2D arrays using single array
import numpy as np
A = np.array([[7,5], [1,6]])
## concatenate along the first axis
print("\nAlong First Axis")
print(np.concatenate([A,A]))
## concatenate along the second axis (zero-indexed)
print("\nAlong Second Axis")
print(np.concatenate([A,A], axis = 1))
# +
x = np.array([1,2])
# vertically stack the arrays
print("Vertical Stack")
print(np.vstack([x, A]))
# horizontally stack the arrays
print("\nHorizontal Stack")
y = np.array([[99], [99]])
print(np.hstack([A, y]))
# -
# ## Vertical Stack
#
# 
# +
a = np.array([[1,2],[2,4]])
b = np.array([[7,8]])
c = np.concatenate((a,b), axis = 0)
print(c)
# -
np.vstack([a,b])
# ## Horizontal Stack
#
# 
# +
a = np.array([[1,2],[2,4]])
b = np.array([[7,8]])
c = np.concatenate((a,b.T), axis = 1)
print(c)
# -
np.hstack([a,b.T])
# <hr>
# 
# Reference:
# 1. https://towardsdatascience.com/reshaping-numpy-arrays-in-python-a-step-by-step-pictorial-tutorial-aed5f471cf0b
|
NumPy/01.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (unityml)
# language: python
# name: python3
# ---
# # Section 4 - Computer vision-based machine learning #
# ## Mini-project 4: Introduction to `SciNet` architecture ##
#
# ## Dr. <NAME> (<EMAIL>)
# ## High Energy Physics Group
# ## 523 Blackett Lab
# #### The markdown comments were added by <NAME> (<EMAIL>) ####
# * [Original repository](https://github.com/fd17/SciNet_PyTorch)
# * [Reference](https://arxiv.org/abs/1807.10300)
import torch
import numpy as np
from models import SciNet
from utils import pendulum
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Load trained model
scinet = SciNet(50,1,3,64)
scinet.load_state_dict(torch.load("trained_models/scinet1.dat"))
# +
# Set resolution for plots
size = 20
# Initialize array for latent layer information
neuron_activation = [np.zeros((size,size)),np.zeros((size,size)),np.zeros((size,size))]
# Set pendulum parameters
tmax = 10
A0 = 1
delta0 = 0
m = 1
N_SAMPLE = 50
for ik, k in enumerate(np.linspace(5,10,size)):
for ib, b in enumerate(np.linspace(0.5,1,size)):
tprime = np.random.uniform(0,tmax)
question = tprime
answer = pendulum(tprime,A0,delta0,k,b,m)
if answer == None:
continue
x = np.linspace(0,tmax,50)
t_arr = np.linspace(0,tmax,N_SAMPLE)
x = pendulum(t_arr,A0,delta0,k,b,m)
combined_inputs = np.append(x, question)
results = scinet.forward(torch.Tensor([combined_inputs]))
latent_layer = scinet.mu.detach().numpy()[0]
neuron_activation[0][ik,ib] = latent_layer[0]
neuron_activation[1][ik,ib] = latent_layer[1]
neuron_activation[2][ik,ib] = latent_layer[2]
# -
# Let's visualizing the activations from the latent layer (`Z-axis`) for `k` and `b`; parameters of the oscillation of the pendulum.
# +
fig = fig = plt.figure(figsize=(30,8))
for i in range(3):
ax = fig.add_subplot(1, 3, 1+i, projection='3d')
k_array = np.linspace(5,10,size)
b_array = np.linspace(0.5,1,size)
X,Y = np.meshgrid(k_array, b_array)
Z = neuron_activation[i]
surf = ax.plot_surface(X, Y, Z, cmap=None,
linewidth=0, antialiased=True)
ax.set_xlabel("k")
ax.set_ylabel("b")
ax.set_zlim3d(-.2,.2)
ax.set_title("Latent Neuron %i" % (i+1))
ax.view_init(25, -120)
plt.show()
# +
from utils import pendulum as solution
# Pendulum settings
tmax = 10
A0 = 1
delta0 = 0
m = 1
N_SAMPLE = 50
xarr = np.linspace(0,tmax,N_SAMPLE)
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
yarr = solution(xarr,A0,delta0,10,1,m)
sampled_points = yarr
y_test = []
xspace = np.linspace(0,tmax,50)
for x in xspace:
x_in = np.append(sampled_points, x)
x_in = torch.Tensor(x_in).reshape(1,N_SAMPLE+1)
y_test.append(scinet.forward(x_in)[0,0])
ax1.plot(xspace,yarr, ".", label="Input trajectory")
ax1.plot(xspace,y_test, label="Scinet reconstruction")
ax1.legend(loc="upper right")
ax1.set_title("k=%.1f, b=%.1f" %(10,1))
yarr = solution(xarr,A0,delta0,5,0.5,m)
sampled_points = yarr
y_test = []
xspace = np.linspace(0,tmax,50)
for x in xspace:
x_in = np.append(sampled_points, x)
x_in = torch.Tensor(x_in).reshape(1,N_SAMPLE+1)
y_test.append(scinet.forward(x_in)[0,0])
ax2.plot(xspace,yarr, ".", label="Input trajectory")
ax2.plot(xspace,y_test, label="Scinet reconstruction")
ax2.set_title("k=%.1f, b=%.1f" %(5,0.5))
plt.show()
# -
# The accuracy of the reconstruction will be improved by training with longer epochs (e.g. 20 -> 100).
|
Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# # 2500 Batch Size
df2500 = pd.read_csv('../src/performance/testing/final-scores-2500.csv')
df2500
plt.plot(df2500['Batch'], df2500['SGD Score'] * 100, label='SGD', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['NB Score'] * 100, label='NB', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['PA Score'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('Accuracy in %')
plt.title('Testing Accuracy % for Batch Size 2500')
plt.legend()
plt.figure()
plt.plot(df2500['Batch'], df2500['SGD F1'] * 100, label='SGD', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['NB F1'] * 100, label='NB', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['PA F1'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('F1 in %')
plt.title('Testing F1 % for Batch Size 2500')
plt.legend()
plt.figure()
plt.plot(df2500['Batch'], df2500['SGD Precision'] * 100, label='SGD', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['NB Precision'] * 100, label='NB', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['PA Precision'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Precision in %')
plt.title('Testing Precision % for Batch Size 2500')
plt.legend()
plt.figure()
plt.plot(df2500['Batch'], df2500['SGD Recall'] * 100, label='SGD', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['NB Recall'] * 100, label='NB', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['PA Recall'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Recall in %')
plt.title('Testing Recall % for Batch Size 2500')
plt.legend()
plt.figure()
plt.plot(df2500['Batch'], df2500['SGD F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['SGD Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['SGD Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing SGD Evaluation for Batch Size 2500')
plt.legend()
plt.figure()
plt.plot(df2500['Batch'], df2500['NB F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['NB Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['NB Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing NB Evaluation for Batch Size 2500')
plt.legend()
plt.figure()
plt.plot(df2500['Batch'], df2500['PA F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['PA Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df2500['Batch'], df2500['PA Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing PA Evaluation for Batch Size 2500')
plt.legend()
plt.figure()
# # 5000 Batch Size
df5000 = pd.read_csv('../src/performance/testing/final-scores-5000.csv')
df5000
plt.plot(df5000['Batch'], df5000['SGD Score'] * 100, label='SGD', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['NB Score'] * 100, label='NB', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['PA Score'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('Accuracy in %')
plt.title('Testing Accuracy % for Batch Size 5000')
plt.legend()
plt.figure()
plt.plot(df5000['Batch'], df5000['SGD F1'] * 100, label='SGD', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['NB F1'] * 100, label='NB', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['PA F1'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('F1 in %')
plt.title('Testing F1 % for Batch Size 5000')
plt.legend()
plt.figure()
plt.plot(df5000['Batch'], df5000['SGD Precision'] * 100, label='SGD', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['NB Precision'] * 100, label='NB', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['PA Precision'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Precision in %')
plt.title('Testing Precision % for Batch Size 5000')
plt.legend()
plt.figure()
plt.plot(df5000['Batch'], df5000['SGD Recall'] * 100, label='SGD', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['NB Recall'] * 100, label='NB', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['PA Recall'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Recall in %')
plt.title('Testing Recall % for Batch Size 5000')
plt.legend()
plt.figure()
plt.plot(df5000['Batch'], df5000['SGD F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['SGD Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['SGD Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing SGD Evaluation for Batch Size 5000')
plt.legend()
plt.figure()
plt.plot(df5000['Batch'], df5000['NB F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['NB Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['NB Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing NB Evaluation for Batch Size 5000')
plt.legend()
plt.figure()
plt.plot(df5000['Batch'], df5000['PA F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['PA Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df5000['Batch'], df5000['PA Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing PA Evaluation for Batch Size 5000')
plt.legend()
plt.figure()
# # 10000 Batch Size
df10000 = pd.read_csv('../src/performance/testing/final-scores-10000.csv')
df10000
plt.plot(df10000['Batch'], df10000['SGD Score'] * 100, label='SGD', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['NB Score'] * 100, label='NB', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['PA Score'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('Accuracy in %')
plt.title('Testing Accuracy % for Batch Size 10000')
plt.legend()
plt.figure()
plt.plot(df10000['Batch'], df10000['SGD F1'] * 100, label='SGD', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['NB F1'] * 100, label='NB', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['PA F1'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('F1 in %')
plt.title('Testing F1 % for Batch Size 10000')
plt.legend()
plt.figure()
plt.plot(df10000['Batch'], df10000['SGD Precision'] * 100, label='SGD', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['NB Precision'] * 100, label='NB', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['PA Precision'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Precision in %')
plt.title('Testing Precision % for Batch Size 10000')
plt.legend()
plt.figure()
plt.plot(df10000['Batch'], df10000['SGD Recall'] * 100, label='SGD', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['NB Recall'] * 100, label='NB', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['PA Recall'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Recall in %')
plt.title('Testing Recall % for Batch Size 10000')
plt.legend()
plt.figure()
plt.plot(df10000['Batch'], df10000['SGD F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['SGD Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['SGD Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing SGD Evaluation for Batch Size 10000')
plt.legend()
plt.figure()
plt.plot(df10000['Batch'], df10000['NB F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['NB Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['NB Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing NB Evaluation for Batch Size 10000')
plt.legend()
plt.figure()
plt.plot(df10000['Batch'], df10000['PA F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['PA Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df10000['Batch'], df10000['PA Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing PA Evaluation for Batch Size 10000')
plt.legend()
plt.figure()
# # 20000 Batch Size
df20000 = pd.read_csv('../src/performance/testing/final-scores-20000.csv')
df20000
plt.plot(df20000['Batch'], df20000['SGD Score'] * 100, label='SGD', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['NB Score'] * 100, label='NB', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['PA Score'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('Accuracy in %')
plt.title('Testing Accuracy % for Batch Size 20000')
plt.legend()
plt.figure()
plt.plot(df20000['Batch'], df20000['SGD F1'] * 100, label='SGD', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['NB F1'] * 100, label='NB', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['PA F1'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('F1 in %')
plt.title('Testing F1 % for Batch Size 20000')
plt.legend()
plt.figure()
plt.plot(df20000['Batch'], df20000['SGD Precision'] * 100, label='SGD', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['NB Precision'] * 100, label='NB', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['PA Precision'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Precision in %')
plt.title('Testing Precision % for Batch Size 20000')
plt.legend()
plt.figure()
plt.plot(df20000['Batch'], df20000['SGD Recall'] * 100, label='SGD', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['NB Recall'] * 100, label='NB', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['PA Recall'] * 100, label='PA', linewidth=0.75)
plt.xlabel('Batch No')
plt.ylabel('Recall in %')
plt.title('Testing Recall % for Batch Size 20000')
plt.legend()
plt.figure()
plt.plot(df20000['Batch'], df20000['SGD F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['SGD Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['SGD Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing SGD Evaluation for Batch Size 20000')
plt.legend()
plt.figure()
plt.plot(df20000['Batch'], df20000['NB F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['NB Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['NB Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing NB Evaluation for Batch Size 20000')
plt.legend()
plt.figure()
plt.plot(df20000['Batch'], df20000['PA F1'] * 100, label='F1', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['PA Precision'] * 100, label='Precision', linewidth=0.75)
plt.plot(df20000['Batch'], df20000['PA Recall'] * 100, label='Recall', linewidth=0.75)
plt.xlabel('Batch')
plt.ylabel('in %')
plt.title('Testing PA Evaluation for Batch Size 20000')
plt.legend()
plt.figure()
|
Plotting/Test Plots.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
] activate .
# # Futamura Projections in Julia
#
# We're going to implement the first [Futamura Projection](https://en.wikipedia.org/wiki/Partial_evaluation#Futamura_projections) in Julia. The goal is to compile code without writing a compiler – we'll only write an interpreter, which is much easier to get working, and then *specialise* it to create a compiled program.
#
# The language we're interpreting is [Brainfuck](https://en.wikipedia.org/wiki/Brainfuck), which is helpful because it's particularly simple to implement. The interpreter itself isn't too important (though it's fun to write one yourself), so I'm going to [pull in one I made earlier](https://github.com/MikeInnes/Mjolnir.jl/blob/78f5b5614dbab7a90463ccf409b50ad2816e9662/examples/futamura/brainfuck.jl).
include("brainfuck.jl")
# We separate parsing and execution, so we can parse BF into an AST like this:
bfparse("+[><]-")
# And run it like this:
interpret("[->>+<<]>[->+<]", [5, 8, 0])
# The program `[->>+<<]>[->+<]` implements addition of the first two cells in the tape ($5$ and $8$), putting the results in cell 3 ($13$). Here's a similar implementation of multiplication, using a nested loop.
interpret("[->[->>+<<]>>[-<+<+>>]<<<]", [5, 8, 0, 0])
# Now we'll bring in Mjolnir, a partial evaluation system, to specialise the `interpret` function on the input program. This is very simple. We provide the input string as a constant, and the tape as a vector with unknown values (we could pass an array of zeros, but this would cause the whole program to be evaluated at compile time, which would be less interesting).
# +
using Mjolnir
@trace interpret("++", Vector{Int64})
# -
# Mjolnir works with IR, not high-level Julia code, so this is a bit hard to read. But you can hopefully see that there's no parsing happening (there's no need, since it can all be done at compile time). We can show that it's equivalent to the following hand-written code representing the brainfuck program – the IR is the same (down to some formatting differences).
# +
function test(_, tape)
tape[1] += 1
tape[1] += 1
return tape
end
@code_typed optimize=false test(nothing, Int[])
# -
# How did this happen? At core our interpreter is [just a loop](https://github.com/MikeInnes/Mjolnir.jl/blob/78f5b5614dbab7a90463ccf409b50ad2816e9662/examples/futamura/brainfuck.jl#L57-L71) which checks each brainfuck instruction and runs the corresponding Julia code. But we know how many steps the interpreter loop needs to run for (twice, one for each `+` in our program) so we can unroll it. We also know what each instruction is (`+`), so we can get rid of the `if`/`else` and just insert the right instruction directly.
#
# This works for loops too:
@trace interpret("[->>+<<]>[->+<]", Vector{Int})
# Again, this is a bit verbose and hard to read, but it's equivalent to the Julia program
#
# ```julia
# while tape[1] != 0
# tape[1] -= 1
# tape[3] += 1
# end
# while tape[2] != 0
# tape[2] -= 1
# tape[3] += 1
# end
# ```
#
# which is a direct translation of the original program `[->>+<<]>[->+<]`. (There's also `ptr` variable in the interpreter, tracking the current location of the brainfuck pointer, but in this case we can elide that too.) Both `while` loops here are 'really' the same `while` loop [inside our interpreter](https://github.com/MikeInnes/Mjolnir.jl/blob/78f5b5614dbab7a90463ccf409b50ad2816e9662/examples/futamura/brainfuck.jl#L50-L52), but the loop gets specialised twice for both the loops in our brainfuck code. Similarly, our multiplication program `[->[->>+<<]>>[-<+<+>>]<<<]` gets compiled to the lowered version of
#
# ```julia
# while tape[1] != 0
# tape[1] -= 1
# while tape[2] != 0
# tape[2] -= 1
# tape[4] += 1
# end
# while tape[4] != 0
# tape[4] -= 1
# tape[2] += 1
# tape[3] += 1
# end
# end
# ```
# Aside from just admiring the skinny code that gets generated, we can of course compile the output via Julia and run it to make sure it's doing the right thing.
# +
using IRTools
ir = @trace interpret("[->[->>+<<]>>[-<+<+>>]<<<]", Vector{Int})
f = IRTools.func(ir)
f(nothing, nothing, [5, 8, 0, 0])
# -
# Of course, being compiled code, it's significantly faster than the uncompiled version – by ~40x on my system.
# +
using BenchmarkTools
@btime interpret("[->[->>+<<]>>[-<+<+>>]<<<]", [5, 8, 0, 0]);
@btime $f(nothing, nothing, [5, 8, 0, 0]);
# -
# Let's make a wrapper that compiles brainfuck code to a Julia function (by specialising our interpreter and then compiling the result through Julia).
# +
using Mjolnir: Defaults, Const, trace
function compile(bf)
f = trace(Defaults(),Const(interpret),Const(bf),Vector{Int}) |> IRTools.func
(a, b) -> f(nothing, nothing, NVector((a, b, 0, 0)))[3]
end
# -
# This utility uses `NVector`, which is easy for Julia and LLVM to optimise – that makes this code about 20x faster still.
# +
f = compile("[->[->>+<<]>>[-<+<+>>]<<<]")
@btime $f(5, 8);
# -
# This gets even better for our addition code, which is about four million times faster on this benchmark.
# +
f = compile("[->>+<<]>[->+<]")
@btime interpret("[->>+<<]>[->+<]", NVector((2^20, 2^20, 0, 0)));
@btime $f(2^20, 2^20);
# -
# Single-digit nanoseconds might seem implausibly fast, but it's really just the result of LLVM recognising that our compiled loop is equivalent to an addition. We can check out the optimised LLVM output to see the single, native machine add instruction that this code boils down to.
@code_llvm f(1, 1)
# Remember, this is not the result of compiling a brainfuck program, but the result of optimising an interpreter that parses and then executes brainfuck programs. By partially evaluating that interpeter, and feeding the result through Julia/LLVM, we get a relatively capable, optimising brainfuck compiler without having to actually write one.
|
examples/futamura/futamura.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # jupyter 사용방법
#
# 실행은 block 별로 된다!! 그래서 하나하나 끊어서 할 수 있다!!!
#
# 실행은 위에서 세모랑 작대기 붙어있는 것 눌러서 해도되지만
#
# shift + enter치면 바로됨!! 기억하기!!
#
# 다른 단축키는 help -> keyboard shortcuts에 있음!!
from pandas import DataFrame
import pandas as pd
import json
import csv
import numpy as np
import re
#csv파일 읽는 것은 read_csv로
data_aisles = pd.read_csv("./capstone_data/aisles.csv")
data_departments = pd.read_csv("./capstone_data/departments.csv")
data_orders = pd.read_csv("./capstone_data/orders.csv")
data_product = pd.read_csv("./capstone_data/products.csv")
data_order_products = pd.read_csv("./capstone_data/order_products__prior.csv")
#데이터에서 aisle field에 존재하는 unique한 데이터 갯수 확인하고 싶을 때
print("unique한 aisle 갯수 : "),
print(len(data_aisles.aisle.unique()))
# +
#데이터 앞에 5개만 보는 것 .head() , 마지막 5개만 보고싶으면 .tail() 사용, []괄호 안에 범위 설정
#data_aisles.head()
# -
data_order_products.head()
# dataframe의 전체 record수 확인
print("total number of records : "),
print(data_aisles.shape[0])
print("total number of records : "),
print(data_departments.shape[0])
print("total number of records : "),
print(data_order_products.shape[0])
#Unique한 데이터 갯수
len(data_orders.order_id.unique())
len(data_orders.order_hour_of_day.unique())
# ## 특정 위치에 있는 record보는 방법
data_order_products[0:10]
#97번째 데이터보기
data_aisles.loc[97]
data_aisles.loc[25]
# ## # 특정 컬럼에서 특정 값이 있는 record만 보는 방법
# +
data_product.loc[data_product['department_id'] == 7].head()
# -
data_aisles.loc[data_aisles['aisle']=='instant foods']
|
src/recommendation/data_analysis/how_to_read_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="QB7BF7_SZeiW" colab_type="text"
# [Open with Colab](https://colab.research.google.com/github/dsbook/dsbook/blob/master/learn_generative_model.ipynb)
# + [markdown] id="O4PuF_TJTlwz" colab_type="text"
# OpenMNTと必要なライブラリをインストールします.
# + id="Or56lFl-j5t9" colab_type="code" colab={}
# !git clone https://github.com/OpenNMT/OpenNMT-py.git -b 0.9.2
# !cd OpenNMT-py;pip install -r requirements.txt
# + [markdown] id="BcpbRejPT1bJ" colab_type="text"
# データの読み込みと保存のためGoogle Driveに接続します.
#
# 「Go to this URL in a browser: https:// ...」と表示されるのでURLをクリックし,使用するアカウントを選択します.
#
# 次のページで「許可」ボタンをクリックするとコードが表示されますので,
# そのコードを「Enter your authorization code:」の下の入力欄に入れ,エンターキーを押してください.
#
#
# + id="Tl_QvOmCh-a_" colab_type="code" colab={}
from google.colab import drive
drive.mount('./drive')
# + [markdown] id="XjusKPSDWDpj" colab_type="text"
# 学習を実行します.学習は2時間程度で終了します.
# + id="SqH0muc2kLUX" colab_type="code" colab={}
# !python OpenNMT-py/preprocess.py -train_src "drive/My Drive/dsbook/OpenNMT/train.src" -train_tgt "drive/My Drive/dsbook/OpenNMT/train.tgt" \
# -valid_src "drive/My Drive/dsbook/OpenNMT/dev.src" -valid_tgt "drive/My Drive/dsbook/OpenNMT/dev.tgt" -save_data dlg
# !python OpenNMT-py/train.py -gpu_ranks 0 --save_checkpoint_steps 50000 --train_steps 100000 -save_model "drive/My Drive/dsbook/OpenNMT/dlg_model" -data dlg
# + [markdown] id="1N9x3IjkWLtC" colab_type="text"
# 学習結果を表示します.
# + id="7hppJObpq35y" colab_type="code" colab={}
# !python OpenNMT-py/translate.py -model "drive/My Drive/dsbook/OpenNMT/dlg_model_step_100000.pt" -src "drive/My Drive/dsbook/OpenNMT/test.src" -output pred.txt -replace_unk -verbose
|
learn_generative_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bayesian-modelling-tutorial
# language: python
# name: bayesian-modelling-tutorial
# ---
# # What is probability? A simulated introduction
#Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set()
# ## Learning Objectives of Part 1
# - To have an understanding of what "probability" means, in both Bayesian and Frequentist terms;
# - To be able to simulate probability distributions that model real-world phenomena;
# - To understand how probability distributions relate to data-generating **stories**.
# ## Probability
# > To the pioneers such as Bernoulli, Bayes and Laplace, a probability represented a _degree-of-belief_ or plausibility; how much they thought that something was true, based on the evidence at hand. To the 19th century scholars, however, this seemed too vague and subjective an idea to be the basis of a rigorous mathematical theory. So they redefined probability as the _long-run relative frequency_ with which an event occurred, given (infinitely) many repeated (experimental) trials. Since frequencies can be measured, probability was now seen as an objective tool for dealing with _random_ phenomena.
#
# -- _Data Analysis, A Bayesian Tutorial_, Sivia & Skilling (p. 9)
# What type of random phenomena are we talking about here? One example is:
#
# - Knowing that a website has a click-through rate (CTR) of 10%, we can calculate the probabilty of having 10 people, 9 people, 8 people ... and so on click through, upon drawing 10 people randomly from the population;
# - But given the data of how many people click through, how can we calculate the CTR? And how certain can we be of this CTR? Or how likely is a particular CTR?
#
# Science mostly asks questions of the second form above & Bayesian thinking provides a wondereful framework for answering such questions. Essentially Bayes' Theorem gives us a way of moving from the probability of the data given the model (written as $P(data|model)$) to the probability of the model given the data ($P(model|data)$).
#
# We'll first explore questions of the 1st type using simulation: knowing the model, what is the probability of seeing certain data?
# + [markdown] toc-hr-collapsed=false
# ## Simulating probabilities
# -
# * Let's say that a website has a CTR of 50%, i.e. that 50% of people click through. If we picked 1000 people at random from the population, how likely would it be to find that a certain number of people click?
#
# We can simulate this using `numpy`'s random number generator.
#
# To do so, first note we can use `np.random.rand()` to randomly select floats between 0 and 1 (known as the _uniform distribution_). Below, we do so and plot a histogram:
# Draw 1,000 samples from uniform & plot results
x = np.random.rand(1000)
plt.hist(x);
# To then simulate the sampling from the population, we check whether each float was greater or less than 0.5. If less than or equal to 0.5, we say the person clicked.
# Computed how many people click
clicks = x <= 0.5
n_clicks = sum(clicks)
f"Number of clicks = {n_clicks}"
# The proportion of people who clicked can be calculated as the total number of clicks over the number of people:
# Computed proportion of people who clicked
f"Proportion who clicked = {n_clicks/len(clicks)}"
# **Discussion**: Did you get the same answer as your neighbour? If you did, why? If not, why not?
# **Up for discussion:** Let's say that all you had was this data and you wanted to figure out the CTR (probability of clicking).
#
# * What would your estimate be?
# * Bonus points: how confident would you be of your estimate?
# **Note:** Although, in the above, we have described _probability_ in two ways, we have not described it mathematically. We're not going to do so rigorously here, but we will say that _probabilty_ defines a function from the space of possibilities (in the above, the interval $[0,1]$) that describes how likely it is to get a particular point or region in that space. Mike Betancourt has an elegant [Introduction to Probability Theory (For Scientists and Engineers)](https://betanalpha.github.io/assets/case_studies/probability_theory.html) that I can recommend.
# ### Hands-on: clicking
# Use random sampling to simulate how many people click when the CTR is 0.7. How many click? What proportion?
# Solution
clicks = x <= 0.7
n_clicks = sum(clicks)
print(f"Number of clicks = {n_clicks}")
print(f"Proportion who clicked = {n_clicks/len(clicks)}")
# _Discussion point_: This model is know as the bias coin flip.
# - Can you see why?
# - Can it be used to model other phenomena?
# ### Galapagos finch beaks
# You can also calculate such proportions with real-world data. Here we import a dataset of Finch beak measurements from the Galápagos islands. You can find the data [here](https://datadryad.org/resource/doi:10.5061/dryad.9gh90).
# Import and view head of data
df_12 = pd.read_csv('../data/finch_beaks_2012.csv')
df_12.head()
# Store lengths in a pandas series
lengths = df_12['blength']
# * What proportion of birds have a beak length > 10 ?
p = (sum(lengths > 10))/len(lengths)
p
# **Note:** This is the proportion of birds that have beak length $>10$ in your empirical data, not the probability that any bird drawn from the population will have beak length $>10$.
# ### Proportion: A proxy for probability
#
# As stated above, we have calculated a proportion, not a probability. As a proxy for the probability, we can simulate drawing random samples (with replacement) from the data seeing how many lengths are > 10 and calculating the proportion (commonly referred to as [hacker statistics](https://speakerdeck.com/jakevdp/statistics-for-hackers)):
n_samples = 10000
sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
# ### Another way to simulate coin-flips
# In the above, you have used the uniform distribution to sample from a series of biased coin flips. I want to introduce you to another distribution that you can also use to do so: the **binomial distribution**.
#
# The **binomial distribution** with parameters $n$ and $p$ is defined as the probability distribution of
#
# > the number of heads seen when flipping a coin $n$ times when with $p(heads)=p$.
# **Note** that this distribution essentially tells the **story** of a general model in the following sense: if we believe that they underlying process generating the observed data has a binary outcome (affected by disease or not, head or not, 0 or 1, clicked through or not), and that one the of the two outcomes occurs with probability $p$, then the probability of seeing a particular outcome is given by the **binomial distribution** with parameters $n$ and $p$.
# Any process that matches the coin flip story is a Binomial process (note that you'll see such coin flips also referred to as Bernoulli trials in the literature). So we can also formulate the story of the Binomial distribution as
#
# > the number $r$ of successes in $n$ Bernoulli trials with probability $p$ of success, is Binomially distributed.
# We'll now use the binomial distribution to answer the same question as above:
# * If P(heads) = 0.7 and you flip the coin ten times, how many heads will come up?
#
# We'll also set the seed to ensure reproducible results.
# +
# Set seed
np.random.seed(seed=16071982)
# Simulate one run of flipping the biased coin 10 times
np.random.binomial(10, 0.7)
# -
# ### Simulating many times to get the distribution
#
# In the above, we have simulated the scenario once. But this only tells us one potential outcome. To see how likely it is to get $n$ heads, for example, we need to simulate it a lot of times and check what proportion ended up with $n$ heads.
# +
# Simulate 1,000 run of flipping the biased coin 10 times
x = np.random.binomial(10, 0.3, 10000)
# Plot normalized histogram of results
plt.hist(x, density=True, bins=10);
# -
# * Group chat: what do you see in the above?
# ### Hands-on: Probabilities
# - If I flip a biased coin ($P(H)=0.3$) 20 times, what is the probability of 5 or more heads?
# Solution
sum(np.random.binomial(20, 0.3, 10000) >= 5)/10000
# - If I flip a fair coin 20 times, what is the probability of 5 or more heads?
sum(np.random.binomial(20,0.5,10000) >= 5)/10000
# - Plot the normalized histogram of number of heads of the following experiment: flipping a fair coin 10 times.
# Plot histogram
x = np.random.binomial(10, 0.5, 10000)
plt.hist(x, density=True, bins=10);
# **Note:** you may have noticed that the _binomial distribution_ can take on only a finite number of values, whereas the _uniform distribution_ above can take on any number between $0$ and $1$. These are different enough cases to warrant special mention of this & two different names: the former is called a _probability mass function_ (PMF) and the latter a _probability distribution function_ (PDF). Time permitting, we may discuss some of the subtleties here. If not, all good texts will cover this. I like (Sivia & Skilling, 2006), among many others.
#
# **Question:**
# * Looking at the histogram, can you tell me the probability of seeing 4 or more heads?
# Enter the ECDF.
# ## Empirical cumulative distribution functions (ECDFs)
# An ECDF is, as an alternative to a histogram, a way to visualize univariate data that is rich in information. It allows you to visualize all of your data and, by doing so, avoids the very real problem of binning.
# - can plot control plus experiment
# - data plus model!
# - many populations
# - can see multimodality (though less pronounced) -- a mode becomes a point of inflexion!
# - can read off so much: e.g. percentiles.
#
# See <NAME>'s great post on ECDFS [here](https://ericmjl.github.io/blog/2018/7/14/ecdfs/) AND [this twitter thread](https://twitter.com/allendowney/status/1019171696572583936) (thanks, <NAME>!).
#
# So what is this ECDF?
#
# **Definition:** In an ECDF, the x-axis is the range of possible values for the data & for any given x-value, the corresponding y-value is the proportion of data points less than or equal to that x-value.
# Let's define a handy ECDF function that takes in data and outputs $x$ and $y$ data for the ECDF.
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points
n = len(data)
# x-data for the ECDF
x = np.sort(data)
# y-data for the ECDF
y = np.arange(1, n+1) / n
return x, y
# ### Hands-on: Plotting ECDFs
# Plot the ECDF for the previous hands-on exercise. Read the answer to the following question off the ECDF: he probability of seeing 4 or more heads?
# +
# Generate x- and y-data for the ECDF
x_flips, y_flips = ecdf(x)
# Plot the ECDF
plt.plot(x_flips, y_flips, marker='.', linestyle='none');
# -
# ## Probability distributions and their stories
# **Credit:** Thank you to [<NAME>](http://bois.caltech.edu/) for countless hours of discussion, work and collaboration on thinking about probability distributions and their stories. All of the following is inspired by Justin & his work, if not explicitly drawn from.
# In the above, we saw that we could match data-generating processes with binary outcomes to the story of the binomial distribution.
#
# > The Binomial distribution's story is as follows: the number $r$ of successes in $n$ Bernoulli trials with probability $p$ of success, is Binomially distributed.
#
# There are many other distributions with stories also!
# ### Poisson processes and the Poisson distribution
# In the book [Information Theory, Inference and Learning Algorithms](https://www.amazon.com/Information-Theory-Inference-Learning-Algorithms/dp/0521642981) <NAME> tells the tale of a town called Poissonville, in which the buses have an odd schedule. Standing at a bus stop in Poissonville, the amount of time you have to wait for a bus is totally independent of when the previous bus arrived. This means you could watch a bus drive off and another arrive almost instantaneously, or you could be waiting for hours.
#
# Arrival of buses in Poissonville is what we call a Poisson process. The timing of the next event is completely independent of when the previous event happened. Many real-life processes behave in this way.
#
# * natural births in a given hospital (there is a well-defined average number of natural births per year, and the timing of one birth is independent of the timing of the previous one);
# * Landings on a website;
# * Meteor strikes;
# * Molecular collisions in a gas;
# * Aviation incidents.
#
# Any process that matches the buses in Poissonville **story** is a Poisson process.
#
#
# The number of arrivals of a Poisson process in a given amount of time is Poisson distributed. The Poisson distribution has one parameter, the average number of arrivals in a given length of time. So, to match the story, we could consider the number of hits on a website in an hour with an average of six hits per hour. This is Poisson distributed.
# +
# Generate Poisson-distributed data
samples = np.random.poisson(6, size=10**6)
# Plot histogram
plt.hist(samples, bins=21);
# -
# **Question:** Does this look like anything to you?
# In fact, the Poisson distribution is the limit of the Binomial distribution for low probability of success and large number of trials, that is, for rare events.
# To see this, think about the stories. Picture this: you're doing a Bernoulli trial once a minute for an hour, each with a success probability of 0.05. We would do 60 trials, and the number of successes is Binomially distributed, and we would expect to get about 3 successes. This is just like the Poisson story of seeing 3 buses on average arrive in a given interval of time. Thus the Poisson distribution with arrival rate equal to np approximates a Binomial distribution for n Bernoulli trials with probability p of success (with n large and p small). This is useful because the Poisson distribution can be simpler to work with as it has only one parameter instead of two for the Binomial distribution.
# #### Hands-on: Poisson
# Plot the ECDF of the Poisson-distributed data that you generated above.
# +
# Generate x- and y-data for the ECDF
x_p, y_p = ecdf(samples)
# Plot the ECDF
plt.plot(x_p, y_p, marker='.', linestyle='none');
# -
# #### Example Poisson distribution: field goals attempted per game
# This section is explicitly taken from the great work of <NAME>. You can find more [here](https://github.com/justinbois/dataframed-plot-examples/blob/master/lebron_field_goals.ipynb).
# Let's first remind ourselves of the story behind the Poisson distribution.
# > The number of arrivals of a Poisson processes in a given set time interval is Poisson distributed.
#
# To quote <NAME>:
#
# > We could model field goal attempts in a basketball game using a Poisson distribution. When a player takes a shot is a largely stochastic process, being influenced by the myriad ebbs and flows of a basketball game. Some players shoot more than others, though, so there is a well-defined rate of shooting. Let's consider LeBron James's field goal attempts for the 2017-2018 NBA season.
# First thing's first, the data ([from here](https://www.basketball-reference.com/players/j/jamesle01/gamelog/2018)):
fga = [19, 16, 15, 20, 20, 11, 15, 22, 34, 17, 20, 24, 14, 14,
24, 26, 14, 17, 20, 23, 16, 11, 22, 15, 18, 22, 23, 13,
18, 15, 23, 22, 23, 18, 17, 22, 17, 15, 23, 8, 16, 25,
18, 16, 17, 23, 17, 15, 20, 21, 10, 17, 22, 20, 20, 23,
17, 18, 16, 25, 25, 24, 19, 17, 25, 20, 20, 14, 25, 26,
29, 19, 16, 19, 18, 26, 24, 21, 14, 20, 29, 16, 9]
# To show that this LeBron's attempts are ~ Poisson distributed, you're now going to plot the ECDF and compare it with the the ECDF of the Poisson distribution that has the mean of the data (technically, this is the maximum likelihood estimate).
# #### Hands-on: Simulating Data Generating Stories
# Generate the x and y values for the ECDF of LeBron's field attempt goals.
# Generate x & y data for ECDF
x_ecdf, y_ecdf = ecdf(fga)
# Now we'll draw samples out of a Poisson distribution to get the theoretical ECDF, plot it with the ECDF of the data and see how they look.
# +
# Number of times we simulate the model
n_reps = 1000
# Plot ECDF of data
plt.plot(x_ecdf, y_ecdf, '.', color='black');
# Plot ECDF of model
for _ in range(n_reps):
samples = np.random.poisson(np.mean(fga), size=len(fga))
x_theor, y_theor = ecdf(samples)
plt.plot(x_theor, y_theor, '.', alpha=0.01, color='lightgray');
# Label your axes
plt.xlabel('field goal attempts')
plt.ylabel('ECDF')
# -
# You can see from the ECDF that LeBron's field goal attempts per game are Poisson distributed.
# ### Exponential distribution
# We've encountered a variety of named _discrete distributions_. There are also named _continuous distributions_, such as the Exponential distribution and the Normal (or Gaussian) distribution. To see what the story of the Exponential distribution is, let's return to Poissonville, in which the number of buses that will arrive per hour are Poisson distributed.
# However, the waiting time between arrivals of a Poisson process are exponentially distributed.
#
# So: the exponential distribution has the following story: the waiting time between arrivals of a Poisson process are exponentially distributed. It has a single parameter, the mean waiting time. This distribution is not peaked, as we can see from its PDF.
#
# For an illustrative example, lets check out the time between all incidents involving nuclear power since 1974. It's a reasonable first approximation to expect incidents to be well-modeled by a Poisson process, which means the timing of one incident is independent of all others. If this is the case, the time between incidents should be Exponentially distributed.
#
#
# To see if this story is credible, we can plot the ECDF of the data with the CDF that we'd get from an exponential distribution with the sole parameter, the mean, given by the mean inter-incident time of the data.
#
# Load nuclear power accidents data & create array of inter-incident times
df = pd.read_csv('../data/nuclear_power_accidents.csv')
df.Date = pd.to_datetime(df.Date)
df = df[df.Date >= pd.to_datetime('1974-01-01')]
inter_times = np.diff(np.sort(df.Date)).astype(float) / 1e9 / 3600 / 24
# +
# Compute mean and sample from exponential
mean = np.mean(inter_times)
samples = np.random.exponential(mean, size=10**6)
# Compute ECDFs for sample & model
x, y = ecdf(inter_times)
x_theor, y_theor = ecdf(samples)
# -
# Plot sample & model ECDFs
plt.plot(x_theor, y_theor);
plt.plot(x, y, marker='.', linestyle='none');
# We see that the data is close to being Exponentially distributed, which means that we can model the nuclear incidents as a Poisson process.
# ### Normal distribution
# The Normal distribution, also known as the Gaussian or Bell Curve, appears everywhere. There are many reasons for this. One is the following:
#
# > When doing repeated measurements, we expect them to be Normally distributed, owing to the many subprocesses that contribute to a measurement. This is because (a formulation of the Central Limit Theorem) **any quantity that emerges as the sum of a large number of subprocesses tends to be Normally distributed** provided none of the subprocesses is very broadly distributed.
#
# Now it's time to see if this holds for the measurements of the speed of light in the famous Michelson–Morley experiment:
# Below, I'll plot the histogram with a Gaussian curve fitted to it. Even if that looks good, though, that could be due to binning bias. SO then you'll plot the ECDF of the data and the CDF of the model!
# Load data, plot histogram
import scipy.stats as st
df = pd.read_csv('../data/michelson_speed_of_light.csv')
df = df.rename(columns={'velocity of light in air (km/s)': 'c'})
c = df.c.values
x_s = np.linspace(299.6, 300.1, 400) * 1000
plt.plot(x_s, st.norm.pdf(x_s, c.mean(), c.std(ddof=1)))
plt.hist(c, bins=9, density=True)
plt.xlabel('speed of light (km/s)')
plt.ylabel('PDF')
# #### Hands-on: Simulating Normal
# +
# Get speed of light measurement + mean & standard deviation
michelson_speed_of_light = df.c.values
mean = np.mean(michelson_speed_of_light)
std = np.std(michelson_speed_of_light, ddof=1)
# Generate normal samples w/ mean, std of data
samples = np.random.normal(mean, std, size=10000)
# Generate data ECDF & model CDF
x, y = ecdf(michelson_speed_of_light)
x_theor, y_theor = ecdf(samples)
# Plot data & model (E)CDFs
_ = plt.plot(x_theor, y_theor)
_ = plt.plot(x, y, marker='.', linestyle='none')
_ = plt.xlabel('speed of light (km/s)')
_ = plt.ylabel('CDF')
# -
# Some of you may ask but is the data really normal? I urge you to check out Allen Downey's post [_Are your data normal? Hint: no._ ](http://allendowney.blogspot.com/2013/08/are-my-data-normal.html)
|
notebooks/01a-instructor-probability-simulation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
line = '– <NAME>: I’m The Song That My Enemies Sing<br /> – Andy & Joey: You’re Wondering Now<br /> – Tiefenrausch: Döner <img class="alignnone size-full wp-image-37" src="http://ska.one/new.gif"/><br /> – King Horror: Loch Ness Monster<br /> – The Upsetters: Man From MI5<br /> – Ill At Ease: Red Eyed<br />'
line
rep = line.replace('<br />', '<br/>')
def replace_all(text):
text = text.replace('<br />', '<br/>')
text = text.replace('<p>', '')
text = text.replace('</p>', '')
text = text.replace('&', '&')
text = text.replace('–', '-')
text = text.replace('‘', "'")
text = text.replace('’', "'")
text = text.replace(' <img class="alignnone size-full wp-image-37" src="http://ska.one/new.gif"/>', '<new/>')
text = text.replace(' <a href="http://file-under-ska.de/old/ska21/" target="_blank" rel="noopener"><img src="http://ska.one/ska21_16.gif" alt="Track von SKA 21" border="0"></a>', '<ska21/>')
return text
repl = replace_all(rep)
rep
repl
repl.split('<br/>')
from urllib.request import urlopen
import re
def read_url(url):
return urlopen(url).read().decode()
repl.split('<br/>')[0]
def parse_song_line(line):
# assumed to start with '- '
line = line[2:]
both = line.split(": ")
artist = both[0]
title = both[1]
return artist, title
ret = parse_song_line(repl.split('<br/>')[0])
ret
def parse_all(page):
inner = extract_raw_playlists(page)
block = replace_all(inner)
all_songs = ['1', '2', '3', False]
counter = 0
while (counter < len(block)):
line = block[counter]
artist = 'artist'
title = 'title'
date = 'date'
is_new = False
if line == '':
# ignore
break
if line.endswith('<ska21/>'):
line = line[0:'<ska21/>'.length]
if line.endswith('<new/>'):
is_new = True
line = line[0:'<new/>'.length]
else:
is_new = False
if line.startswith('Playlist '):
date = line['Playlist '.length:]
elif line.startswith('- '):
artist, title = parse_song_line(line)
else:
# cannot handle line
print ('Cannot handle' + line)
all_songs[counter] = artist, title, date, is_new
counter += 1
return all_songs
def extract_raw_playlists(text):
start = text.index('<!-- Start -->')
end = text.index('<!-- End -->')
raw = text[start+14 : end]
return raw
test = "bububu<!-- Start -->Playlists<!-- End -->ububub"
extract_raw_playlists(test)
def parse_url(url):
text = read_url(url)
res = parse_all(text)
return res
def go():
all = parse_url('http://ska.one/playlists/playlists-2003/')
all.add(parse_url('http://ska.one/playlists/playlists-2004/'))
all.add(parse_url('http://ska.one/playlists/playlists-2005/'))
all.add(parse_url('http://ska.one/playlists/playlists-2006/'))
all.add(parse_url('http://ska.one/playlists/playlists-2007/'))
all.add(parse_url('http://ska.one/playlists/playlists-2008/'))
all.add(parse_url('http://ska.one/playlists/playlists-2009/'))
all.add(parse_url('http://ska.one/playlists/playlists-2010/'))
all.add(parse_url('http://ska.one/playlists/playlists-2011/'))
all.add(parse_url('http://ska.one/playlists/playlists-2012/'))
all.add(parse_url('http://ska.one/playlists/playlists-2013/'))
all.add(parse_url('http://ska.one/playlists/playlists-2014/'))
all.add(parse_url('http://ska.one/playlists/playlists-2015/'))
all.add(parse_url('http://ska.one/playlists/playlists-2016/'))
all.add(parse_url('http://ska.one/playlists/playlists-2017/'))
all.add(parse_url('http://ska.one/playlists/playlists/'))
print(all)
read_url('http://ska.one/playlists/playlists-2003/')
text = read_url('http://ska.one/playlists/playlists-2003/')
repl = replace_all(text)
inner = extract_raw_playlists(text)
read_url('http://ska.one/playlists/playlists-2003/')
|
Playfield.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Omega and Xi
#
# To implement Graph SLAM, a matrix and a vector (omega and xi, respectively) are introduced. The matrix is square and labelled with all the robot poses (xi) and all the landmarks (Li). Every time you make an observation, for example, as you move between two poses by some distance `dx` and can relate those two positions, you can represent this as a numerical relationship in these matrices.
#
# It's easiest to see how these work in an example. Below you can see a matrix representation of omega and a vector representation of xi.
#
# <img src='images/omega_xi.png' width=20% height=20% />
#
# Next, let's look at a simple example that relates 3 poses to one another.
# * When you start out in the world most of these values are zeros or contain only values from the initial robot position
# * In this example, you have been given constraints, which relate these poses to one another
# * Constraints translate into matrix values
#
# <img src='images/omega_xi_constraints.png' width=70% height=70% />
#
# If you have ever solved linear systems of equations before, this may look familiar, and if not, let's keep going!
#
# ### Solving for x
#
# To "solve" for all these x values, we can use linear algebra; all the values of x are in the vector `mu` which can be calculated as a product of the inverse of omega times xi.
#
# <img src='images/solution.png' width=30% height=30% />
#
# ---
# **You can confirm this result for yourself by executing the math in the cell below.**
#
# +
import numpy as np
# define omega and xi as in the example
omega = np.array([[1,0,0],
[-1,1,0],
[0,-1,1]])
xi = np.array([[-3],
[5],
[3]])
# calculate the inverse of omega
omega_inv = np.linalg.inv(np.matrix(omega))
# calculate the solution, mu
mu = omega_inv*xi
# print out the values of mu (x0, x1, x2)
print(mu)
# -
# ## Motion Constraints and Landmarks
#
# In the last example, the constraint equations, relating one pose to another were given to you. In this next example, let's look at how motion (and similarly, sensor measurements) can be used to create constraints and fill up the constraint matrices, omega and xi. Let's start with empty/zero matrices.
#
# <img src='images/initial_constraints.png' width=35% height=35% />
#
# This example also includes relationships between poses and landmarks. Say we move from x0 to x1 with a displacement `dx` of 5. Then we have created a motion constraint that relates x0 to x1, and we can start to fill up these matrices.
#
# <img src='images/motion_constraint.png' width=50% height=50% />
#
# In fact, the one constraint equation can be written in two ways. So, the motion constraint that relates x0 and x1 by the motion of 5 has affected the matrix, adding values for *all* elements that correspond to x0 and x1.
#
# ### 2D case
#
# In these examples, we've been showing you change in only one dimension, the x-dimension. In the project, it will be up to you to represent x and y positional values in omega and xi. One solution could be to create an omega and xi that are 2x larger, so that they can hold both x and y values for poses. I might suggest drawing out a rough solution to graph slam as you read the instructions in the next notebook; that always helps me organize my thoughts. Good luck!
|
2. Omega and Xi, Constraints.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
# -
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', default = "configs/svhn/mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_adamw_3x_svhn.py", help='train config file path')
parser.add_argument('--work-dir',default = "output/", help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
default = 1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args, unknown = parser.parse_known_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
args = parse_args()
print(args.config)
args.config = "configs/svhn/mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_adamw_3x_svhn.py"
#args.resume_from = "output/epoch_2.pth"
# +
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# -
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
print(model)
for p in model.parameters():
p.requires_grad = False
for name, p in model.named_parameters():
if "roi_head.bbox_head" in name:
p.requires_grad = True
if "roi_head.mask_head" in name:
p.requires_grad = True
if "neck" in name:
print(name)
p.requires_grad = True
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
|
train.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sherpa
import sherpa.algorithms.bayesian_optimization as bayesian_optimization
import GPyOpt
import scipy
# %matplotlib inline
# # Objective Function
def f(x, sd=0):
y = (x-3)**2 + 10.
if sd == 0:
return y
else:
return y + np.random.normal(loc=0., scale=sd, size=np.array(x).shape)
x = np.linspace(0, 7, 50, endpoint=True)
fig, ax = plt.subplots()
ax.plot(x, f(x))
ax.fill_between(x, f(x)-1.96*1, f(x)+1.96*1, alpha=0.2)
fig.savefig('function.png')
# # GPyOpt with n=1 (no noise)
bounds = [{'name': 'x', 'type': 'continuous', 'domain': (1,6)}]
myBopt = GPyOpt.methods.BayesianOptimization(f=lambda x: f(x, sd=0), # function to optimize
domain=bounds, # box-constraints of the problem
acquisition_type='EI',
exact_feval = False)
# +
# Run the optimization
max_iter = 100 # evaluation budget
max_time = 60 # time budget
eps = 10e-6 # Minimum allows distance between the las two observations
myBopt.run_optimization(max_iter, max_time, eps)
# -
myBopt.plot_acquisition()
myBopt.plot_convergence()
X, Y = myBopt.get_evaluations()
print(X[np.argmin(Y)])
Ymean, Yvar = myBopt.model.model.predict(X)
idx = np.argmin(Ymean)
print("X=", X[idx], " Y=", Ymean[idx])
# # GPyOpt with noise (sd=1) and one evaluation
bounds = [{'name': 'x', 'type': 'continuous', 'domain': (1,6)}]
myBopt = GPyOpt.methods.BayesianOptimization(f=lambda x: f(x, sd=1), # function to optimize
domain=bounds, # box-constraints of the problem
acquisition_type='EI',
exact_feval = False)
# +
# Run the optimization
max_iter = 50 # evaluation budget
max_time = 60 # time budget
eps = 10e-6 # Minimum allows distance between the las two observations
myBopt.run_optimization(max_iter, max_time, eps)
# -
myBopt.plot_acquisition(filename='acq_n1.png')
myBopt.plot_convergence()
X, Y = myBopt.get_evaluations()
print(X[np.argmin(Y)])
Ymean, Yvar = myBopt.model.model.predict(X)
idx = np.argmin(Ymean)
print("X=", X[idx][0], " Y=", Ymean[idx])
# ### Run multiple times max_iter=50
# +
# max_iter = 50 # evaluation budget
# max_time = 60 # time budget
# eps = 10e-6 # Minimum allows distance between the las two observations
# num_runs = 100
# mineval = []
# minpredicted = []
# for _ in range(num_runs):
# myBopt = GPyOpt.methods.BayesianOptimization(f=lambda x: f(x, sd=1), # function to optimize
# domain=bounds, # box-constraints of the problem
# acquisition_type='EI',
# exact_feval = False)
# myBopt.run_optimization(max_iter, max_time, eps)
# X, Y = myBopt.get_evaluations()
# mineval.append(X[np.argmin(Y)][0])
# Ymean, Yvar = myBopt.model.model.predict(X)
# idx = np.argmin(Ymean)
# minpredicted.append(X[idx][0])
# np.savez('gpyopt_max_iter_50_sd_1.npz', mineval=np.array(mineval), minpredicted=np.array(minpredicted))
# -
data = np.load('gpyopt_max_iter_50_sd_1.npz')
mineval = data['mineval']
minpredicted = data['minpredicted']
fig, ax = plt.subplots(figsize=(10,10))
ax.hist(mineval, histtype='step', color='b', label='BO Min Evaluation MSE={:.3}'.format(np.mean((np.array(mineval)-3)**2)))
ax.hist(minpredicted, histtype='step', color='r', label='BO Min Predicted MSE={:.3}'.format(np.mean((np.array(minpredicted)-3)**2)))
ax.legend()
# # GPyOpt with noise (sd=1) and 10 evaluations
bounds = [{'name': 'x', 'type': 'continuous', 'domain': (1,6)}]
myBopt = GPyOpt.methods.BayesianOptimization(f=lambda x: np.mean([f(x, sd=1) for _ in range(10)]), # function to optimize
domain=bounds, # box-constraints of the problem
acquisition_type='EI',
exact_feval = False)
# +
# Run the optimization
max_iter = 20 # evaluation budget
max_time = 60 # time budget
eps = 10e-6 # Minimum allows distance between the las two observations
myBopt.run_optimization(max_iter, max_time, eps)
# -
myBopt.plot_acquisition(filename='acq_n10.png')
X, Y = myBopt.get_evaluations()
print(X[np.argmin(Y)])
Ymean, Yvar = myBopt.model.model.predict(X)
idx = np.argmin(Ymean)
print("X=", X[idx], " Y=", Ymean[idx])
myBopt.plot_convergence()
# ### Run multiple times with max_iter=20
# +
# max_iter = 20 # evaluation budget
# max_time = 60 # time budget
# eps = 10e-6 # Minimum allows distance between the las two observations
# num_runs = 100
# mineval = []
# minpredicted = []
# for _ in range(num_runs):
# myBopt = GPyOpt.methods.BayesianOptimization(f=lambda x: np.mean([f(x, sd=1) for _ in range(10)]), # function to optimize
# domain=bounds, # box-constraints of the problem
# acquisition_type='EI',
# exact_feval = False)
# myBopt.run_optimization(max_iter, max_time, eps)
# X, Y = myBopt.get_evaluations()
# mineval.append(X[np.argmin(Y)][0])
# Ymean, Yvar = myBopt.model.model.predict(X)
# idx = np.argmin(Ymean)
# minpredicted.append(X[idx][0])
# np.savez('gpyopt_max_iter_20_n_10_sd_1.npz', mineval=np.array(mineval), minpredicted=np.array(minpredicted))
# -
data = np.load('gpyopt_max_iter_20_n_10_sd_1.npz')
mineval_n10 = data['mineval']
minpredicted_n10 = data['minpredicted']
fig, ax = plt.subplots(figsize=(10,10))
ax.hist(mineval_n10, histtype='step', color='b', label='BO Min Evaluation MSE={:.3}'.format(np.mean((mineval_n10-3)**2)))
ax.hist(minpredicted_n10, histtype='step', color='r', label='BO Min Predicted MSE={:.3}'.format(np.mean((minpredicted_n10-3)**2)))
ax.legend()
|
bayesian-optimization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Classification of text documents using sparse features
#
#
# This is an example showing how scikit-learn can be used to classify documents
# by topics using a bag-of-words approach. This example uses a scipy.sparse
# matrix to store the features and demonstrates various classifiers that can
# efficiently handle sparse matrices.
#
# The dataset used in this example is the 20 newsgroups dataset. It will be
# automatically downloaded, then cached.
#
# The bar plot indicates the accuracy, training time (normalized) and test time
# (normalized) of each classifier.
#
#
#
# +
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
# #############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# #############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
Part 7 - Natural Language Processing/document_classification_20newsgroups.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > **How to run this notebook (command-line)?**
# 1. Install the `ReinventCommunity` environment:
# `conda env create -f environment.yml`
# 2. Activate the environment:
# `conda activate ReinventCommunity`
# 3. Execute `jupyter`:
# `jupyter notebook`
# 4. Copy the link to a browser
# # `Lib-INVENT`: Reinforcement Learning - QSAR model
#
# This tutorial covers the assembly of the `JSON` configuration for a reinforcement learning (RL) run where the scoring function involves only a predictive QSAR model for the DRD2 receptor.
# +
# load dependencies
import os
import re
import json
import tempfile
# --------- change these path variables as required
reinvent_dir = os.path.expanduser("~/Desktop/Reinvent")
reinvent_env = os.path.expanduser("~/miniconda3/envs/reinvent.v3.0")
output_dir = os.path.expanduser("~/Desktop/REINVENT_RL_QSAR_demo")
# --------- do not change
# get the notebook's root path
try: ipynb_path
except NameError: ipynb_path = os.getcwd()
# if required, generate a folder to store the results
try:
os.mkdir(output_dir)
except FileExistsError:
pass
# -
# ## Setting up the configuration
# `REINVENT` has an entry point that loads a specified `JSON` file on startup. `JSON` is a low-level data format that allows to specify a fairly large number of parameters in a cascading fashion very quickly. The parameters are structured into *blocks* which can in turn contain blocks or simple values, such as *True* or *False*, strings and numbers. In this tutorial, we will go through the different blocks step-by-step, explaining their purpose and potential values for given parameters. Note, that while we will write out the configuration as a `JSON` file in the end, in Python we handle the same information as a simple `dict`.
# ### 1. Run Type Block
# initialize the dictionary
configuration = {
"version": 3,
"model_type": "lib_invent",
"run_type": "reinforcement_learning"
}
# ### 2. Logging
# In order to analyse the results of any run afterwards, it is paramount to *log* intermediate results, e.g., to judge whether the agent has been focused enough (or too much), whether the learning is going well and so on. On top of this, we also need to make sure the final result (compounds) is deposited appropriately. Thus, we will log these data to two folders and inspect it afterwards with `tensorboard`, which is already installed in the environment.
# add block to specify whether to run locally or not and
# where to store the results and logging
configuration["logging"] = {
"sender": "", # only relevant if "recipient" is set to "remote"
"recipient": "local", # either to local logging or use a remote REST-interface
"logging_path": os.path.join(output_dir, "progress.log"), # load this folder in tensorboard
"result_folder": os.path.join(output_dir, "results"), # output directory for results
"job_name": "Reinforcement learning QSAR demo", # set an arbitrary job name for identification
"job_id": "n/a" # only relevant if "recipient" is set to "remote"
}
# ### 3. Parameters Block
# All of the remaining specifications and arguments are passed through a final block named `parameters`.
#
# For RL, this requires standard run parameters such as the number of steps and learning rate, paths to the pretrained model as well as the configuration of the scoring function.
#
# The `actor` and `critic` parameters should point to pretrained priors. In production setups, this is typically the same setup. The critic serves as an anchor ensuring that the SMILES syntax is not forgotten while the agent learns.
#
# `scaffolds` gives a list of SMILES strings of the scaffolds to be decorated in the run. Note that the attachment points should be labeled by an integer.
#
# `randomize_scaffolds` is a boolean parameter specifying whether a random SMILES representation of the scaffolds should be used at each step. Crucially, this is not yet possible if a selective reaction filter is imposed.
#
# The DAP learning strategy is recommended in production runs.
# +
# add the "parameters" block
configuration["parameters"] = {}
configuration["parameters"] = {
"actor": os.path.join(ipynb_path, "models/library_design.prior"),
"critic": os.path.join(ipynb_path, "models/library_design.prior"),
"scaffolds": ["[*:0]N1CCN(CC1)CCCCN[*:1]"],
"n_steps": 100,
"learning_rate": 0.0001,
"batch_size": 128,
"randomize_scaffolds": True,
"learning_strategy": {
"name": "dap",
"parameters": {
"sigma": 120
}
}
}
# -
# #### Configure the scoring strategy
# This includes setting up `Diversity Filters`, `Reaction Filters` and a `Scoring Function`.
# Only standard scoring strategy is offered to the end user.
configuration["parameters"]["scoring_strategy"] = {
"name": "lib_invent" # Do not change
}
# ##### Configure Diversity Filter
# Diversity Filters (DFs) are used to penalize repetition, thus preventing the agent from ending up in a mode collapse. The implementation is different from the diversity filters in other `REINVENT` models. Due to the nature of the task, scaffold-based DFs are not used. Instead, only repetition of entire compounds is supported.
#
# The two DF options are `NoFilter` which doesn't penalize at all and `NoFilterWithPenalty` which penalizes the score by 0.5 if a previously seen compound is proposed.
configuration["parameters"]["scoring_strategy"]["diversity_filter"] = {
"name": "NoFilterWithPenalty",
}
# ##### Configure Reaction Filter
# Reaction Filters (RFs) are a penalty component enforcing that only decorations obtainable through specified chemical transformations are generated.
#
# The `selective` filter assigns a specific reaction to each attachment point. The `non_selective` filter does not determine which reactions correspond to which attachment points.
#
# Multiple relevant reactions can be assigned in both cases.
#
# More details on the expected inputs can be found in the `Lib-INVENT_RL2_QSAR_RF` tutorial.
configuration["parameters"]["scoring_strategy"]["reaction_filter"] = {
"type":"selective",
"reactions":{} # no reactions are imposed.
}
# ##### Define the scoring function
# The final step of the assembly is the most essential one: to define a scoring function that allows the agent to identify promising suggestions and discard molecules that are of no interest to the project. It is not necessarily better to build a very complex scoring function (on the contrary it can make it hard for the agent to find appropriate solutions). Always bear in mind that there is a post-processing step at the end, in which you will be able to discard molecules either by eye-inspection or by applying further (probably more expensive) methods you have not used in the reinforcement learning loop.
#
# In this example, the scoring function includes a QSAR predictive model and custom alerts penalising undesirable patterns. More use cases can be found in the other tutorials. The individual scoring components are sub-dictionaries passed as a list to the `parameters` block within the `scoring_function` block.
#
#
# **Score transformation**
# An important topic requiring some explanation are the *score transformations*. Each scoring component returns a value between 0 and 1 (where higher values meaning "better") and all scores together are combined into a *total score* for a given compound (also between 0 and 1). This is done either by a weighed average (`custom_sum`) or by a product (`custom_product`).
#
# Since some components might not naturally return values on this scale, score transformations are offered to map the output of the component to the range $[0, 1]$. These transformations involve a sigmoid, reverse sigmoid and double sigmoid functions with flexible parameters to give the user a control over the strictness of the penalty.
# For clarity, the individual components are defined first here. Note that while the `name` parameter can be arbitrary and chose by the user, the `component_type` has to be recognised by the scoring function.
#
# In the second step, the entire scoring function block is assembled.
# +
scoring_function = {
"name": "custom_sum",
"parallel": False, # Do not change
"parameters": [
{
"component_type": "predictive_property",
"name": "DRD2",
"weight": 1,
"specific_parameters": {
"model_path": os.path.join(ipynb_path, "models/drd2.pkl"),
"scikit": "classification",
"descriptor_type": "ecfp",
"size": 2048,
"radius": 3,
"transformation": {
"transformation_type": "no_transformation"
}
}
},
{
"component_type": "custom_alerts",
"name": "Custom alerts",
"weight": 1,
"specific_parameters": {
"smiles": [
"[*;r8]",
"[*;r9]",
"[*;r10]",
"[*;r11]",
"[*;r12]",
"[*;r13]",
"[*;r14]",
"[*;r15]",
"[*;r16]",
"[*;r17]",
"[#8][#8]",
"[#6;+]",
"[#16][#16]",
"[#7;!n][S;!$(S(=O)=O)]",
"[#7;!n][#7;!n]",
"C#C",
"C(=[O,S])[O,S]",
"[#7;!n][C;!$(C(=[O,N])[N,O])][#16;!s]",
"[#7;!n][C;!$(C(=[O,N])[N,O])][#7;!n]",
"[#7;!n][C;!$(C(=[O,N])[N,O])][#8;!o]",
"[#8;!o][C;!$(C(=[O,N])[N,O])][#16;!s]",
"[#8;!o][C;!$(C(=[O,N])[N,O])][#8;!o]",
"[#16;!s][C;!$(C(=[O,N])[N,O])][#16;!s]"
]
}
}]
}
configuration["parameters"]["scoring_strategy"]["scoring_function"] = scoring_function
# -
# We now have successfully filled the dictionary and will write it out as a `JSON` file in the output directory. Please have a look at the file before proceeding in order to see how the paths have been inserted where required and the `dict` -> `JSON` translations (e.g. `True` to `true`) have taken place.
# write out the configuration to disc
configuration_JSON_path = os.path.join(output_dir, "RL_QSAR_input.json")
with open(configuration_JSON_path, 'w') as f:
json.dump(configuration, f, indent=4, sort_keys=True)
# The resulting configruation file should be placed in the `output_dir` path you specified in the beginning of this notebook.
# # Run
# Execute in jupyter notebook
# +
# %%capture captured_err_stream --no-stderr
# execute REINVENT from the command-line
# !{reinvent_env}/bin/python {reinvent_dir}/input.py {configuration_JSON_path}
# -
# print the output to a file, just to have it for documentation
with open(os.path.join(output_dir, "run.err"), 'w') as file:
file.write(captured_err_stream.stdout)
# Execute in command line
# ```
# # activate environment
# $ conda activate reinvent.v3.0
#
# # execute in command line
# $ python <reinvent_dir>/input.py <configuration_JSON_path>
# ```
# ## Analyse the results
#
# In order to analyze the run in a more intuitive way, we can use `tensorboard`:
#
# ```
# # go to the root folder of the output
# $ cd <output_dir>
#
# # activate the environment
# $ conda activate reinvent.v3.0
#
# # start tensorboard
# $ tensorboard --logdir "progress.log"
# ```
#
# Then copy the link provided to a browser window. The following figures are example plots - there is always some randomness involved. The `tensorboard` plots display both the individual scoring function components and the average overall score. Valid SMILES are SMILES that follow correct chemical syntax. Number of SMILES found shows SMILES with scores over 0.4 (or other `min_score` specified int he DF setup if required). The raw values correspond to the output of the components before the score transformation.
#
# The valid SMILES plot is learning very fast at the beginning and gradually getting closer to 99.5, which means the model can learn to generate valid compounds quickly and most of the compounds generated are valid. Number of SMILES is growing in a linear way, which means nearly the same number of new compounds are generated at each step. This is desirable since it means that new valid compounds are discovered at each step. Optimally, the number of compounds discovered at each step corresponds to the batch size.
#
# In this case, reaction filters have not been implemented. This means that the RF score follows exactly the validity of the molecules, since any valid molecule has necessarily passed the RFs.
# 
# The average score is getting up quickly which means the model is learning fast. The average score for each step is above 0.60 which indicates a rather good result. It is worth noting that an increasing trend is not expected throughout the run for Lib-INVENT since the model starts in a "good place" and only needs to focus during the production of the chemical library.
# 
#
# It might also be informative to look at the results from the prior (dark blue), the agent (blue) and the augmented likelihood (purple) over time.
#
# 
#
# And last but not least, there is a "Images" tab available that lets you browse through the compounds generated in an easy way. There is also a scroll bar which can be scrolled to se the molecules generated at each step. The total scores are given per molecule. The example below is from step 100.
#
# 
# ## Show top compounds
# All valid compounds with scores over 0.4 can be found in the scaffold_memory.csv file.The SMILES are ranked by their total scores from high to low.
#
# note: scaffold_memory.csv contains SMILES, Scaffolds, Decorations, Scores from different components and Total score.
# +
# import needed packages
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Draw
scaffold_memory_path = os.path.join(output_dir, 'results/scaffold_memory.csv')
data = pd.read_csv(scaffold_memory_path)
smile_list = data['SMILES'][:6].to_list() # change the number here to show more/less top compounds
mols = [Chem.MolFromSmiles(smiles) for smiles in smile_list]
Draw.MolsToGridImage(mols)
|
notebooks/Lib-INVENT_RL1_QSAR.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="tnjU8MV2zl_y"
# ---
# # Predicting Credit Card Default
# ### Model Development: Exploring methods and hiperparameters
#
# ---
#
# -
# Sections:
# - [Loading Preprocessed Data](#Loading-Data)
# - [Validation Set Partitioning](#Set-Partitioning)
# - [Model Development](#Model-Development)
# - [Decision Tree](#Model-Tree)
# - [Random Forest](#Model-Forest)
# - [AdaBoosting](#Model-AdaBoosting)
# - [Neural Network](#Model-NN)
# - [SVM with RBF kernel](#Model-SVM)
# - [Model Evaluation](#Model-Evaluation)
# - [Decision Tree](#Model-Tree-eval)
# - [Random Forest](#Model-Forest-eval)
# - [AdaBoosting](#Model-AdaBoosting-eval)
# - [Neural Network](#Model-NN-eval)
# - [SVM with RBF kernel](#Model-SVM-eval)
#
#
# + [markdown] id="Rl1_quz5C1OE"
# <a id="Loading-Data"></a>
# # Loading preprocessed data
#
# Note:
# Open <a href="./data_preparation.ipynb">data_preparation.ipynb</a> to see how the data was preprocessed.
# +
import pandas as pd
import numpy as np
import imblearn #libary for imbalanced functions i.e. K-means SMOTE
from sklearn import preprocessing
#from google.colab import drive
#drive.mount('/content/drive')
# filename = "drive/Shareddrives/DS-project/default_processed.csv"
filename = "default_processed.csv"
data = pd.read_csv(filename)
data.head(10)
# + [markdown] id="SnzwxPITA_kk"
# <a id="Set-Partitioning"></a>
# # Validation Set Partitioning
# + [markdown] id="cqJ4cIau_C-d"
# Let's split our data into training and test sets, using an 80-20 split. We will stratify the test set so that it contains approximately the same percentage of samples of each target class as the complete set.
# + id="DIVwtt7MrUtt"
from sklearn.model_selection import train_test_split
features = data.iloc[:, :-1]
label = data.iloc[:,[-1]]
X_train, X_test, y_train, y_test = train_test_split(features, label, test_size=0.2, stratify=label, random_state=10)
# + [markdown] id="PDIlEY__BKrg"
# ### Oversampling using SMOTE
#
# One way to fight the inbalance *training set* is to generate new samples in the classes which are under-represented.
#
# The most naive strategy is to generate new samples by randomly sampling with replacement the current available samples.
#
# **over-sample**
# > Object to over-sample the minority class(es) by picking samples at random with replacement.
# + colab={"base_uri": "https://localhost:8080/", "height": 268} executionInfo={"elapsed": 615, "status": "ok", "timestamp": 1634508533445, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="zwtlCg35ArNt" outputId="b68442b8-51aa-449b-8c62-e48e52892c76"
from sklearn import preprocessing
from imblearn.over_sampling import SMOTE
import seaborn as sns
import matplotlib.pyplot as plt
le = preprocessing.LabelEncoder() #This transformer should be used to encode target values, i.e. y,
#encode the training target variable
y_train_encoded = le.fit_transform(y_train['DEFAULT'])
#oversampling using SMOTE
oversample = SMOTE(random_state=42)
X_smote, y_smote = oversample.fit_resample(X_train, y_train_encoded)
index,counts = np.unique(y_smote, return_counts=True) #
sns.set(rc={'figure.figsize':(4.0,4.0)})
sns.barplot(index, counts)
plt.show()
# + [markdown] id="xjXhOU7BDyWO"
# <a id="Model-Development"></a>
# # Model Development
#
# ---
#
# -
# Auxiliar Functions
# + id="hElBp2-IoJh2"
def model_predict(classifier, X_train, y_train, X_test):
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_prob = classifier.predict_proba(X_test)
return y_pred, y_prob[:, 1]
def plot_feature_importance(classifier):
feature_importance = classifier.feature_importances_
importance = pd.Series(feature_importance)
#importance.plot.bar()
df = pd.DataFrame({'features': X_train.columns, 'importance': importance})
sns.set(rc={'figure.figsize':(14,8.27)})
ax = sns.barplot(x="importance", y="features", data=df)
# + [markdown] id="BQwPJVJLdn-s"
# <a id="Model-Tree"></a>
# ## Decision Tree
# + id="3h5e7H2osENm"
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(random_state=0)
y_pred_tree = {}
y_prob_tree = {}
# + [markdown] id="3PtvO_RBp0RG"
# ### Decision Tree: original data
# + id="aYiqbAjLdn-y"
y_pred_tree['Original'], y_prob_tree['Original']= model_predict(tree, X_train, y_train, X_test)
# + [markdown] id="AQGIzNKGdn-y"
# Let's visualize the feature importance. This value is the average decrease in impurity
# + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 766, "status": "ok", "timestamp": 1634508833229, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="8qvRU48Xdn-y" outputId="c08907d4-1882-4acb-e8c8-e8bc454435e1"
plot_feature_importance(tree)
# + [markdown] id="JmPCYaWbsVva"
# ### Decision Tree: K-means SMOTE data
# + id="g29R0eV_sVvb"
y_pred_tree['SMOTE'], y_prob_tree['SMOTE'] = model_predict(tree, X_smote, y_smote, X_test)
# + [markdown] id="nuiXGdYVsVvb"
# Let's visualize the feature importance. This value is the average decrease in impurity
# + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 727, "status": "ok", "timestamp": 1634508893111, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "07828637835314585763"}, "user_tz": 240} id="XT3Vaz4_sVvc" outputId="ebd7f445-7308-4856-85d1-cf24a8617411"
plot_feature_importance(tree)
# + [markdown] id="NLYarIYGcFtY"
# <a id="Model-Forest"></a>
# ## Random Forest
# + id="-GMUoi81D12U"
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=50, random_state=0)
y_pred_forest = {}
y_prob_forest = {}
# + [markdown] id="erjFwrfCs6c4"
# ### Random Forest: original data
# + id="0yHKQPGgs6c-"
y_pred_forest['Original'], y_prob_forest['Original'] = model_predict(forest, X_train, np.ravel(y_train.values), X_test)
# + [markdown] id="ZLmh_yNns6c-"
# Let's visualize the feature importance. This value is the average decrease in impurity
# + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 445, "status": "ok", "timestamp": 1634508974477, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="gSMfSanss6c_" outputId="fc1fe84b-979e-463d-836a-feea212ff486"
plot_feature_importance(forest)
# + [markdown] id="rnFmNSs-uqDL"
# ### Random Forest: SMOTE data
# + id="07HZT7EyuqDZ"
y_pred_forest['SMOTE'], y_prob_forest['SMOTE'] = model_predict(forest, X_smote, y_smote, X_test)
# + [markdown] id="yXvQdOk2uqDZ"
# Let's visualize the feature importance. This value is the average decrease in impurity
# + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 615, "status": "ok", "timestamp": 1634509001455, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="auNLo87quqDZ" outputId="f4b4add9-3b7a-41e2-f9b0-376e5a3246aa"
plot_feature_importance(forest)
# + [markdown] id="E_Mq7mAhznKU"
# <a id="Model-AdaBoosting"></a>
# ## AdaBoosting
# + id="WXaWiPkrznKa"
from sklearn.ensemble import AdaBoostClassifier
adaboost = AdaBoostClassifier(n_estimators=100, random_state=0)
y_pred_adaboost = {}
y_prob_adaboost = {}
# + [markdown] id="pI1MXm51vH0E"
# ### AdaBoosting: original data
# + id="VKjzxhyXvH0T"
y_pred_adaboost['Original'], y_prob_adaboost['Original'] = model_predict(adaboost, X_train, np.ravel(y_train.values), X_test)
# + [markdown] id="WraYKpGzvH0T"
# Let's visualize the feature importance. This value is the average decrease in impurity
# + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 573, "status": "ok", "timestamp": 1634509043218, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="8K1VWnUMvH0T" outputId="24dcc9c1-1220-42fc-d9ec-f348cc151c60"
plot_feature_importance(adaboost)
# + [markdown] id="nq4A7FD6vfId"
# ### AdaBoosting: SMOTE data
# + id="jX6ueYE5vfIs"
y_pred_adaboost['SMOTE'], y_prob_adaboost['SMOTE'] = model_predict(adaboost, X_smote, y_smote, X_test)
# + [markdown] id="zSLNRKnEvfIs"
# Let's visualize the feature importance. This value is the average decrease in impurity
# + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 615, "status": "ok", "timestamp": 1634509066566, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="3YdmIEePvfIs" outputId="31de6162-47bd-46c1-a1cb-59fc23e2ea9c"
plot_feature_importance(adaboost)
# + [markdown] id="q9UHtqXJmOIo"
# <a id="Model-NN"></a>
# ## Neural Network
# + id="hs5jpE3C304U"
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
y_pred_nn = {}
y_prob_nn = {}
def nn_predict(X_train, y_train, X_test):
scaler = StandardScaler()
scaler.fit(X_train)
X_train_nn = scaler.transform(X_train)
X_test_nn = scaler.transform(X_test)
mlp = MLPClassifier(hidden_layer_sizes=(25,15), activation='logistic', random_state=1, max_iter=700)
mlp.fit(X_train_nn, y_train)
y_pred = mlp.predict(X_test_nn)
y_prob = mlp.predict_proba(X_test_nn)
return y_pred, y_prob[:, 1]
# + [markdown] id="avyLL0Jcv-Z3"
# ### Neural Network: original data
# + id="rz9lB6Kzv-aF"
y_pred_nn['Original'],y_prob_nn['Original'] = nn_predict(X_train, np.ravel(y_train.values), X_test)
# + [markdown] id="g5gTSViCwU9h"
# ### Neural Network: SMOTE data
# + id="udgoLSBswU9h"
y_pred_nn['SMOTE'], y_prob_nn['SMOTE'] = nn_predict(X_smote, y_smote, X_test)
# + [markdown] id="_nnUOcQdhlcX"
# <a id="Model-SVM"></a>
# ## SVM with RBF kernel
# + id="waWCiE4Khl6y"
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
svm_rbf_kernel = SVC(probability=True)
y_pred_svm_rbf = {}
y_prob_svm_rbf = {}
# + [markdown] id="QRJN1WxlhxQo"
# ### SVM with RBF kernel: original data
# + id="rCO8cYochxeu"
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
scaler.fit(X_train)
X_train_svm = scaler.transform(X_train)
X_test_svm = scaler.transform(X_test)
y_pred_svm_rbf['Original'], y_prob_svm_rbf['Original'] = model_predict(
svm_rbf_kernel,
X_train_svm,
y_train,
X_test_svm)
# + [markdown] id="cePSn1zohxxc"
# ### SVM with RBF kernel: SMOTE data
# + id="qp0c9X95hx9K"
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
scaler.fit(X_smote)
X_train_smote_svm = scaler.transform(X_smote)
X_test_smote_svm = scaler.transform(X_test)
y_pred_svm_rbf['SMOTE'], y_prob_svm_rbf['SMOTE'] = model_predict(
svm_rbf_kernel,
X_train_smote_svm,
y_smote,
X_test_smote_svm)
# + [markdown] id="AikCakBrfjZG"
# <a id="Model-Evaluation"></a>
# # Model Evaluation
#
# ---
# -
# Auxiliar Functions
# + id="gFFZlyC21jpg"
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, auc
import seaborn as sns
from cf_matrix import make_confusion_matrix
import sys
sys.path.insert(0, './')
from cf_matrix import *
# parameters for confusion matrix
labels = ["TN","FP","FN","TP"]
categories = ["0-No", "1-Yes"]
sns.set(rc={'figure.figsize':(4,2)})
title = "Prediction for 'default payment next month'"
def plot_ROC(y_test_encoded, y_pred):
fpr, tpr, _ = roc_curve(y_test_encoded, y_pred)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='cadetblue', label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='sandybrown', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves')
plt.legend(loc="lower right")
plt.show()
def cf_matrix_heatmap(y_test_encoded, y_pred):
cf_matrix = confusion_matrix(y_test_encoded,y_pred)
sns.heatmap(cf_matrix, annot=True, cmap='icefire')
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
def evaluate(y_pred, y_prob, do_plot_ROC=True):
classes = ['No Default', 'Default']
y_test_encoded = le.fit_transform(y_test['DEFAULT'])
print(classification_report(y_test_encoded,y_pred, target_names=classes))
print("Accuracy: %0.2f%%" % (accuracy_score(y_test_encoded,y_pred)*100))
print("Confusion Matrix:")
#cf_matrix_heatmap(y_test_encoded, y_pred)
make_confusion_matrix(confusion_matrix(y_test_encoded, y_pred),
group_names=labels,
categories=categories,
cmap='icefire',
sum_stats=False,
count=True,
title=title,
cbar=True)
if do_plot_ROC:
plot_ROC(y_test['DEFAULT'], y_prob)
# + [markdown] id="5RN2Zwiy1opg"
# <a id="Model-Tree-eval"></a>
# ## Decision Tree Evaluation
# + [markdown] id="6UEoTHwS2ZgU"
# ### Decision Tree: original data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 712, "status": "ok", "timestamp": 1634528095290, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="-g_OAlFO1t_w" outputId="c6078fe5-0a89-4246-b6fa-6117398c6ea9"
evaluate(y_pred_tree["Original"], y_prob_tree["Original"])
# + [markdown] id="mhqOGlN522tK"
# ### Decision Tree: SMOTE data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 680, "status": "ok", "timestamp": 1634528097698, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="yjGxPqga22ta" outputId="ddde6dbb-401c-4487-f7a7-81e4a6f6faa7"
evaluate(y_pred_tree["SMOTE"], y_prob_tree["SMOTE"])
# + [markdown] id="-OaNVAR_22ta"
# <a id="Model-Forest-eval"></a>
# ## Random Forest Evaluation
# + [markdown] id="sQkQa3T-4Wow"
# ### Random Forest: original data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 1206, "status": "ok", "timestamp": 1634528099990, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="CSNa1Fvj4Wo1" outputId="ad22bd1f-f4e4-4578-85cd-9b3411b1ca83"
evaluate(y_pred_forest["Original"], y_prob_forest["Original"])
# + [markdown] id="C8JDFOgY4wFl"
# ### Random Forest: SMOTE data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 666, "status": "ok", "timestamp": 1634528103489, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="mUpOb80k22ta" outputId="7d3810d7-4159-427c-9e76-762dab5841a4"
evaluate(y_pred_forest["SMOTE"], y_prob_forest["SMOTE"])
# + [markdown] id="mcE6eJnI5Hem"
# <a id="Model-AdaBoosting-eval"></a>
# ## Adaboost Evaluation
# + [markdown] id="_wMaYR1S5Hen"
# ### Adaboost Evaluation: original data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 696, "status": "ok", "timestamp": 1634528105159, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="TsJX0r-k5Hen" outputId="cfb003e8-b14d-4fb9-de13-d5fb27d4bd7e"
evaluate(y_pred_adaboost["Original"], y_prob_adaboost["Original"])
# + [markdown] id="6xWRo_tL5Hen"
# ### Adaboost Evaluation: SMOTE data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 693, "status": "ok", "timestamp": 1634528106984, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="gHmYIN5t5Heo" outputId="59498a45-e5de-47d9-a652-5f0981db3370"
evaluate(y_pred_adaboost["SMOTE"], y_prob_adaboost["SMOTE"])
# + [markdown] id="2-zFFTYX5nSm"
# <a id="Model-NN-eval"></a>
# ## Neural Network Evaluation
# + [markdown] id="xEErcklV5nSv"
# ### Neural Network: original data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 340, "status": "ok", "timestamp": 1634528108378, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="VfY3S-HU5nSv" outputId="39a3fa7a-a5b9-4086-8e76-7234392095a3"
evaluate(y_pred_nn["Original"], y_prob_nn["Original"])
# + [markdown] id="Ala-FCsz5nSv"
# ### Neural Network: SMOTE data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 739, "status": "ok", "timestamp": 1634528110245, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="VrfjAp-85nSv" outputId="daab033e-4514-48af-d22d-8cc465e38b43"
evaluate(y_pred_nn["SMOTE"], y_prob_nn["SMOTE"])
# + [markdown] id="A-ku8_coiImL"
# <a id="Model-SVM-eval"></a>
# ## SVM with RBF kernel Evaluation
# + [markdown] id="rro0QlRliUFA"
# ### SVM with RBF kernel: original data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 781, "status": "ok", "timestamp": 1634528112648, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="oPt_7NfDYoFI" outputId="2b44dcb2-3383-4f37-ef2e-7a5d09b6ace3"
evaluate(y_pred_svm_rbf["Original"], y_prob_svm_rbf["Original"])
# + [markdown] id="9ySq07h0iUsn"
# ### SVM with RBF kernel: SMOTE data
# + colab={"base_uri": "https://localhost:8080/", "height": 558} executionInfo={"elapsed": 782, "status": "ok", "timestamp": 1634528114394, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gghtsg1uY3yyY2vFF_440TF-lm5x9TZwqtgVKXS=s64", "userId": "07828637835314585763"}, "user_tz": 240} id="wFedTpo_ii7a" outputId="fef4b12c-64f8-4a3b-8d17-a61c947523d3"
evaluate(y_pred_svm_rbf["SMOTE"], y_prob_svm_rbf["SMOTE"])
|
model_training.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook demonstrates the pipeline for using the linear method to perform site response analysis.
from PySeismoSoil.class_Vs_profile import Vs_Profile
from PySeismoSoil.class_ground_motion import Ground_Motion
from PySeismoSoil.class_simulation import Linear_Simulation
# The linear method assumes the properties of soil layers do not change over time, no matter how strong the input motion is. Therefore, we only need the soil profile and the input ground motion:
soil_profile = Vs_Profile('../PySeismoSoil/tests/files/profile_FKSH14.txt')
input_motion = Ground_Motion('../PySeismoSoil/tests/files/sample_accel.txt', unit='cm/s/s')
# (`Vs_Profile` class: [documentation](https://pyseismosoil.readthedocs.io/en/stable/api_docs/class_Vs_profile.html) and [examples](./Demo_02_Vs_Profile.ipynb). `Ground_Motion` class: [documentations](https://pyseismosoil.readthedocs.io/en/stable/api_docs/class_ground_motion.html) and [examples](./Demo_01_Ground_Motion.ipynb).)
soil_profile.summary()
input_motion.plot();
# Initialize a `Linear_Simulation` object. We use an "elastic" boundary condition in this example, which means that the input ground motion is the ground motion on a nearby reference rock site (or "rock outcrop site"). (If the input ground motion is the motion recorded in a borehole at the bottom of the $V_S$ profile, then use "rigid" as the boundary condition.)
lin_sim = Linear_Simulation(soil_profile, input_motion, boundary='elastic')
# Then run the simulation with the `run()` method:
sim_results_1 = lin_sim.run(show_fig=True, every_layer=True)
# The figure above is the profile of maximum acceleration, velocity, displacement, shear strain, and shear stress during the whole duration of shaking.
# We can set the parameter `every_layer` to `False`, which only calculates the ground motion on the soil surface, so is faster.
sim_results_2 = lin_sim.run(show_fig=True, every_layer=False);
# We can see that the result (on the soil surface) is the same as above.
# `sim_results_1` and `sim_results_2` are objects of `Simulation_Results` class. You can save these simulation results to the hard drive like this:
#
# ```python
# >>> sim_results_1.to_txt()
# ```
#
# To also save the figures (input/output ground motion, maximum accel/veloc/displ/strain/stress profiles), use the `plot()` method:
#
# ```python
# >>> sim_results_2.plot(dpi=200, save_fig=True)
# ```
#
# For more information about the `Simulation_Results` class, see its [documentation](https://pyseismosoil.readthedocs.io/en/stable/api_docs/class_simulation_results.html).
|
examples/Pipeline_03_Linear_Method.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All the IPython Notebooks in this lecture series by Dr. <NAME> are available @ **[GitHub](https://github.com/milaan9/04_Python_Functions/tree/main/002_Python_Functions_Built_in)**
# </i></small></small>
# # Python `hasattr()`
#
# The **`hasattr()`** method returns true if an object has the given named attribute and false if it does not.
#
# **Syntax**:
#
# ```python
# hasattr(object, name)
# ```
#
# **`hasattr()`** is called by **[getattr()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/025_Python_getattr%28%29.ipynb)** to check to see if **`AttributeError`** is to be raised or not.
# ## `hasattr()` Parameters
#
# **`hasattr()`** method takes two parameters:
#
# * **object** - object whose named attribute is to be checked
# * **name** - name of the attribute to be searched
# ## Return Value from `hasattr()`
#
# **`hasattr()`** method returns:
#
# * **True**, if object has the given named attribute
# * **False**, if object has no given named attribute
# +
# Example: How hasattr() works in Python?
class Person:
age = 23
name = 'Adam'
person = Person()
print('Person has age?:', hasattr(person, 'age'))
print('Person has salary?:', hasattr(person, 'salary'))
# -
|
002_Python_Functions_Built_in/028_Python_hasattr().ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
i = misc.ascent()
print(i.shape)
print(i)
plt.imshow(i)
plt.gray()
plt.show()
i_transformed = np.copy(i)
size_x = i_transformed.shape[0]
size_y = i_transformed.shape[1]
# +
# This filter detects edges nicely
# It creates a convolution that only passes through sharp edges and straight
# lines.
# Experiment with different values for fun effects.
# filter = [ [0, 1, 0], [1, -4, 1], [0, 1, 0]]
# A couple more filters to try for fun!
filters = [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]
# filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
# If all the digits in the filter don't add up to 0 or 1, you
# should probably do a weight to get it to do so
# so, for example, if your weights are 1,1,1 1,2,1 1,1,1
# They add up to 10, so you would set a weight of .1 if you want to
# normalize them
weight = 1
# -
for x in range(1, size_x-1):
for y in range(1, size_y-1):
convolution = 0.0
convolution = convolution + (i[x - 1, y-1] * filters[0][0])
convolution = convolution + (i[x, y-1] * filters[0][1])
convolution = convolution + (i[x + 1, y-1] * filters[0][2])
convolution = convolution + (i[x-1, y] * filters[1][0])
convolution = convolution + (i[x, y] * filters[1][1])
convolution = convolution + (i[x+1, y] * filters[1][2])
convolution = convolution + (i[x-1, y+1] * filters[2][0])
convolution = convolution + (i[x, y+1] * filters[2][1])
convolution = convolution + (i[x+1, y+1] * filters[2][2])
convolution = convolution * weight
if convolution < 0:
convolution = 0
if convolution > 255:
convolution = 255
i_transformed[x, y] = convolution
plt.gray()
plt.grid(False)
plt.imshow(i_transformed)
# plt.axis('off')
plt.show()
# +
new_x = int(size_x/2)
new_y = int(size_y/2)
newImage = np.zeros((new_x, new_y))
for x in range(0, size_x, 2):
for y in range(0, size_y, 2):
pixels = [
i_transformed[x, y],
i_transformed[x + 1, y],
i_transformed[x, y + 1],
i_transformed[x + 1, y + 1]
]
pixels.sort(reverse=True)
newImage[int(x/2), int(y/2)] = pixels[0]
# Plot the image. Note the size of the axes --
# now 256 pixels instead of 512
plt.gray()
plt.grid(False)
plt.imshow(newImage)
# plt.axis('off')
plt.show()
|
week-03/convolutional_sidebar.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import some python libraries
import rasterio
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import json
import os
# Setup a dask cluster
import dask
from dask.distributed import Client
from dask_kubernetes import KubeCluster
# Settings to display all outputs within notebook
# %matplotlib inline
# %load_ext wurlitzer
# +
# Debug logging
#import logging
#logger = logging.getLogger()
#logger.setLevel(logging.DEBUG)
# +
# Print package versions
print('Dask version: ', dask.__version__)
print('Xarray version: ', xr.__version__)
print('Rasterio version: ', rasterio.__version__)
# -
# Launch Dask Cluster
cluster = KubeCluster(n_workers=5)
cluster
client = Client(cluster)
client
# ## Read single Cloud-Optimized Geotiff
# +
# # !aws s3api get-bucket-location --bucket cog-benchmark-landsat
#us-west-2
# # !aws s3 ls s3://cog-benchmark-landsat
#2018-08-27 12:55:19 56368448 aws-LC08_L1TP_047027_20180113_20180119_01_T1_B4.TIF
#2018-08-27 12:55:19 88498262 aws-cog.TIF
#2018-08-27 12:55:19 65195094 google-LC08_L1TP_047027_20180113_20180119_01_T1_B4.TIF
#2018-08-27 12:55:19 118157830 google-cog.TIF
#2018-08-27 12:55:19 125384178 usgs-LC08_L1TP_047027_20180113_20180119_01_T1_B4.TIF
# WAPOR COG file
# https://io.apps.fao.org/gcs-bucket/L2_GBWP_17s1.cog.tif
#2018-10-30 12:00:00 L2_GBWP_17s1.cog.tif
# -
# NOTE temporary public bucket
# bucket = 'cog-benchmark-landsat'
bucket = 'gcs-bucket'
# cog = 'aws-cog.TIF'
cog_gbwp = 'L2_GBWP_17s1.cog.tif'
# region = 'us-west-2'
# s3URL = f's3://{bucket}/{cog}'
# httpURL = f'https://s3-{region}.amazonaws.com/{bucket}/{cog}'
httpURL = f'https://io.apps.fao.org/{bucket}/{cog_gbwp}'
print(httpURL)
# Validate that the file is actually a COG
query = f'http://cog-validate.radiant.earth/api/validate?url={httpURL}'
# response = !curl -s {query}
json.loads(response[0])
# +
% time
# NOTE: Red text output is coming from gdal behind the scenes (info on HTTP requests)
# this is coming from the wurlitzer package (%load_ext wurlitzer)
# Warning, timing calls are misleading if code has recently been run
# b/c results are cached...
# Read individual COG metadata with rasterio
with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR',
GDAL_HTTP_UNSAFESSL='YES',
CURL_CA_BUNDLE='/etc/ssl/certs/ca-certificates.crt', #For Ubunutu
CPL_VSIL_CURL_ALLOWED_EXTENSIONS='TIF',
CPL_DEBUG=True,
CPL_CURL_VERBOSE=True,
VSI_CACHE=False):
with rasterio.open(httpURL, 'r') as src:
print(src.profile)
# +
% time
# Read first 10 tiles:
ntiles = 10
with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR',
GDAL_HTTP_UNSAFESSL='YES',
CURL_CA_BUNDLE='/etc/ssl/certs/ca-certificates.crt', #For Ubunutu
CPL_VSIL_CURL_ALLOWED_EXTENSIONS='TIF',
CPL_CURL_VERBOSE=True,
VSI_CACHE=False):
with rasterio.open(httpURL, 'r') as src:
# Window(col_off=0.0, row_off=0.0, width=100.0, height=100.0)
width = src.profile['blockxsize'] * ntiles
height = src.profile['blockysize']
window = rasterio.windows.Window(30000, 30000, width, height)
w = src.read(1, window=window)
# -
# Compare to loading with xarray
w
plt.imshow(w)
# +
# Note that the blocksize of the image is 256 by 256, so we want xarray to use some multiple of that
xchunk = 512
# xchunk = 2048
ychunk = 512
# ychunk = 2048
# How to get open_rasterio() to use rasterio.Env manager?
# No verbose output from the following:
# https://rasterio.readthedocs.io/en/latest/topics/configuration.html
with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR',
GDAL_HTTP_UNSAFESSL='YES',
CURL_CA_BUNDLE='/etc/ssl/certs/ca-certificates.crt', #For Ubunutu
CPL_VSIL_CURL_ALLOWED_EXTENSIONS='TIF',
CPL_CURL_VERBOSE=True,
VSI_CACHE=False):
with xr.open_rasterio(httpURL, chunks={'band': 1, 'x': xchunk, 'y': ychunk}) as da:
print(da)
# +
# Instead set os environment variables
os.environ['GDAL_DISABLE_READDIR_ON_OPEN']='YES'
os.environ['CURL_CA_BUNDLE']='/etc/ssl/certs/ca-certificates.crt'
os.environ['CPL_VSIL_CURL_ALLOWED_EXTENSIONS']='TIF'
os.environ['CPL_CURL_VERBOSE']='YES'
os.environ['VSI_CACHE']='FALSE'
os.environ['AWS_REGION']='us-west-2'
# added
os.environ['GDAL_HTTP_UNSAFESSL']='YES'
os.environ['GDAL_HTTP_MAX_RETRY']='3'
os.environ['GDAL_HTTP_MERGE_CONSECUTIVE_RANGES']='YES'
# +
# %time
# Read same subset via xarray + dask
da = xr.open_rasterio(httpURL,
chunks={'band': 1,
'x': xchunk,
'y': ychunk})
data = da.isel(x=slice(30000,width), y=slice(30000,height))
# +
# %time
plt.imshow(data)
# see error on certificate https://github.com/mapbox/rasterio/issues/1289
# -
# !gdalinfo '/vsicurl/https://io.apps.fao.org/gcs-bucket/L2_GBWP_17s1.cog.tif'
# +
# Look at open_rasterio() function
# #xr.open_rasterio??
# +
from ipyleaflet import Map, basemaps, basemap_to_tiles, SplitMapControl
m = Map(center=(0, 0), zoom=3)
import ipyleaflet
# right_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2017-11-11")
right_layer = ipyleaflet.TileLayer(
opacity=1.0,
url='http://tiles.rdnt.io/tiles/{{z}}/{{x}}/{{y}}?url={0}'.format(httpURL),
zoom=0,
max_zoom=15
)
left_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisAquaBands721CR, "2017-11-11")
control = SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
m
# + outputHidden=false inputHidden=false
# Error ipyleaflet debug
# jupyter labextension list
# jupyter nbextension list
# workaround
# jupyter labextension install jupyter-leaflet
|
notebooks/cog-tests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
torch.cuda.is_available()
# +
from tqdm import tqdm
import logging
import math
import os
import random
import sys
import time
import pickle
import copy
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.trainer import Trainer
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.models.pruned_transformer import PrunedTransformerModel
# -
args = pickle.load(open("argsfile.p", "rb"))
args.data = '/raj-learn/data/wmt16_en_de_bpe32k'
task = tasks.setup_task(args)
task.load_dataset(args.valid_subset, combine=False, epoch=1)
dataset = task.dataset(args.valid_subset)
# checkpoint_dir = "/home/raj/data/raj-learn/checkpoints/lr-rewind_0.75sparsity_0.2frac_30epochs/"
checkpoint_dir = "/raj-learn/checkpoints/lr-rewind_0.75sparsity_0.2frac_30epochs/"
model_paths = ["checkpoint_LTH0_epoch60.pt",
"checkpoint_LTH1_epoch60_sparsity0.168.pt",
"checkpoint_LTH2_epoch60_sparsity0.302.pt",
"checkpoint_LTH3_epoch60_sparsity0.410.pt",
"checkpoint_LTH4_epoch60_sparsity0.496.pt",
"checkpoint_LTH5_epoch60_sparsity0.565.pt",
"checkpoint_LTH6_epoch60_sparsity0.620.pt",
"checkpoint_LTH7_epoch60_sparsity0.664.pt",
"checkpoint_LTH8_epoch60_sparsity0.699.pt",
]
# +
import h5py
def make_hdf5_file(vectors, output_file_path):
'''
Vectors: int -> np.array
Creates hdf5 file.
'''
with h5py.File(output_file_path, 'w') as fout:
for key, embeddings in vectors.items():
fout.create_dataset(
str(key),
embeddings.shape, dtype='float32',
data=embeddings)
# +
# %%time
import time
for path in model_paths:
t0 = time.time()
model_name = path.split('_')[1]
args.path = checkpoint_dir + path
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
task=task,
)
model = models[0]
model.cuda()
model.eval()
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
all_attns_encenc = {}
all_attns_encdec = {}
all_attns_decdec = {}
for batch in tqdm(itr):
ids = batch["id"].cpu().numpy().tolist()
src_lens = batch["net_input"]["src_lengths"].cpu().numpy()
enc_outputs = model.encoder(batch["net_input"]["src_tokens"].cuda(), batch["net_input"]["src_lengths"].cuda(),
return_all_hiddens=False, return_all_attns=True)
encenc_attns = np.array([x.detach().cpu().numpy() for x in enc_outputs.encoder_self_attns])
out, props = model(batch["net_input"]["src_tokens"].cuda(), batch["net_input"]["src_lengths"].cuda(), \
batch["net_input"]["prev_output_tokens"].cuda())
encdec_attns = [x.detach().cpu().numpy() for x in props["encdec_attns"]]
decdec_attns = [x.detach().cpu().numpy() for x in props["decdec_attns"]]
pad_lens = torch.sum(batch['target'] == 1, axis=1)
tgt_lens = batch['target'].shape[1] - pad_lens
for i, id_ in enumerate(ids):
all_attns_encenc[id_] = np.array([attn[i, :, -src_lens[i]:, -src_lens[i]:] for attn in encenc_attns])
all_attns_encdec[id_] = np.array([attn[:, i, :tgt_lens[i], -src_lens[i]:] for attn in encdec_attns])
all_attns_decdec[id_] = np.array([attn[i, :, :tgt_lens[i], :tgt_lens[i]] for attn in decdec_attns])
for (j, attntype) in enumerate([all_attns_encenc, all_attns_encdec, all_attns_decdec]):
if j == 0:
outfile = f'/raj-learn/data/precomputed_attns/{model_name}/encenc_attns_wmt_en_de_val.hdf5'
elif j == 1:
outfile = f'/raj-learn/data/precomputed_attns/{model_name}/encdec_attns_wmt_en_de_val.hdf5'
else:
outfile = f'/raj-learn/data/precomputed_attns/{model_name}/decdec_attns_wmt_en_de_val.hdf5'
make_hdf5_file(attntype, outfile)
print("Model %s took %.2fsec" % (model_name, time.time() - t0))
# -
for id_, mask in all_masks.items():
assert np.allclose(1, np.sum(mask["encenc"][5][0,1,:]))
assert np.allclose(1, np.sum(mask["encdec"][-1][0,1,:]))
assert np.allclose(1, np.sum(mask["decdec"][-1][0,1,:]))
|
get_attn_dists.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Task 8: Largest product in a series
# As always, let's start with basic brute force with a 'window' of 13 numbers:
# +
raw_list_of_nums = list("""73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450""")
list_of_nums = list(filter(lambda x: x != '\n', raw_list_of_nums))
list_of_nums = list(map(lambda x: int(x), list_of_nums))
max_prod = 0
i = 12
while i < len(list_of_nums):
if list_of_nums[i] * list_of_nums[i - 1] * list_of_nums[i - 2] * list_of_nums[i - 3] * \
list_of_nums[i - 4] * list_of_nums[i - 5] * list_of_nums[i - 6] * list_of_nums[i - 7] * \
list_of_nums[i - 8] * list_of_nums[i - 9] * list_of_nums[i - 10] * list_of_nums[i - 11] * \
list_of_nums[i - 12] > max_prod:
max_prod = list_of_nums[i] * list_of_nums[i - 1] * list_of_nums[i - 2] * list_of_nums[i - 3] * \
list_of_nums[i - 4] * list_of_nums[i - 5] * list_of_nums[i - 6] * list_of_nums[i - 7] * \
list_of_nums[i - 8] * list_of_nums[i - 9] * list_of_nums[i - 10] * list_of_nums[i - 11] * \
list_of_nums[i - 12]
i += 1
print(max_prod)
# -
# This algorithm has an O(N) speed and the only improvement I can see is to remove all elements around zeroes in range [i-12, i] and [i, i+12] where i is a zero, which could reduce the number of possible eleme
#
|
python/Problem8.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false
# ## To make the libraries you have uploaded in the Library Manager available in this Notebook, run the command below to get started
#
# ```run -i platform-libs/initialize.py```
# + deletable=false
run -i platform-libs/initialize.py
# +
from pipelineblocksdk.construct.base.StreamBlock import StreamBlock
from pipelineblocksdk.api.Singleton import Singleton
from pipelineblocksdk.data.spark.SparkConfCustom import SparkConfCustom
from pipelineblocksdk.util.async_util import async
from pipelineblocksdk.util.kerbUtil import generate_ticket_granting_ticket
from pipelineblocksdk.util.ThirdPartyIntegration import get_oracle_creds
from hdfs import InsecureClient
from hdfs.ext.kerberos import KerberosClient
from shutil import rmtree
from ast import literal_eval
import time
import uuid
import os
import requests
import json
import pathlib
import traceback
from threading import Thread
# Following code is the definition for a batch block
class MyBlock(StreamBlock, Singleton):
base_temp_path = '/tmp/'
temp_file_paths = []
hdfs_client = None
spark = None
kafka_api_instance = None
left_df = None
right_df = None
join_df = None
output_topic = None
part_files = None
# This is the entry point for the block. This is a mandatory function.
def run(self):
t1 = time.time()
output_dict = dict()
self.logger.info('Run function says: SparkJoin')
self.logger.info(f"Inputs available: {self.input_dict}")
os.makedirs(self.base_temp_path, exist_ok=True)
self.kafka_api_instance = self.data_handler.api_instance
self.spark = SparkConfCustom(self.input_dict["SparkConf"]).get_spark_session()
self.spark.sparkContext.setLogLevel('INFO')
try:
left_path, left_thread, right_path, right_thread = None, None, None, None
if self.input_dict['LeftDataSource']['queueTopicName'] \
and len(self.input_dict['LeftDataSource']['queueTopicName']) > 3:
left_path, left_thread = self.get_streaming_path(self.input_dict['LeftDataSource'])
if self.input_dict['RightDataSource']['queueTopicName'] \
and len(self.input_dict['RightDataSource']['queueTopicName']) > 3:
right_path, right_thread = self.get_streaming_path(self.input_dict['RightDataSource'])
# Waiting for streams to download the data to local disk
if left_thread:
left_thread.join()
if right_thread:
right_thread.join()
self.logger.info(self.input_dict['DataTarget']['stream'])
if ('type' in self.input_dict['LeftDataSource'] and self.input_dict['LeftDataSource']['type'] == 'hdfs') \
or ('type' in self.input_dict['RightDataSource'] and self.input_dict['RightDataSource'][
'type'] == 'hdfs'):
self.hdfs_client = self.get_client(self.block_params, self.input_dict['ConnectionParams'])
if (self.input_dict['DataTarget']['stream'] is not True) :
self.hdfs_client = self.get_client(self.block_params, self.input_dict['ConnectionParams'])
self.left_df = self.get_df(self.spark, self.input_dict['LeftDataSource'], self.block_params, left_path)
self.logger.info(str(self.left_df.schema.json()))
self.right_df = self.get_df(self.spark, self.input_dict['RightDataSource'], self.block_params, right_path)
self.logger.info(str(self.right_df.schema.json()))
self.spark.sparkContext.setLogLevel("INFO")
self.right_df.show()
self.left_df.createOrReplaceTempView('left')
self.right_df.createOrReplaceTempView('right')
sql_query = self.input_dict['DataTarget']['join_query']
self.logger.info(f"Query is {sql_query}")
self.join_df = self.spark.sql(sql_query)
# Rename duplicate columns
col_list = self.join_df.columns
for col in col_list:
count = col_list.count(col)
if count > 1:
idx = col_list.index(col)
col_list[idx] = col + '_1'
self.logger.info(self.join_df.columns)
self.join_df = self.join_df.toDF(*col_list)
self.logger.info(self.join_df.columns)
# Write df to HDFS or write to file
join_time_st = time.time()
if self.input_dict['ConnectionParams']['kerberos'] == 'false' \
and str(self.input_dict['DataTarget']['filePath']).endswith('.parquet'):
# Write to HDFS
exists = self.file_exits(self.hdfs_client, self.input_dict['DataTarget']['filePath'])
if exists:
if self.input_dict['DataTarget']['overwrite'] is True:
# remove file
self.delete_file(self.hdfs_client, self.input_dict['DataTarget']['filePath'])
else:
raise FileExistsError("File Already Exists: " + str(self.input_dict['DataTarget']['filePath']))
host = self.input_dict['ConnectionParams']['hostName']
port = self.input_dict['ConnectionParams']['port']
target_path = self.input_dict['DataTarget']['filePath']
protocol = 'https://' if self.input_dict['ConnectionParams']['https'] == 'true' else 'http://'
hdfs_path = protocol + host + port + '/' + target_path
self.join_df.write.mode("overwrite").option("header", "true").csv(hdfs_path)
join_time_end = time.time()
else:
# Write to file and upload to HDFS
temp_fp = self.base_temp_path + str(t1) + '.csv'
self.join_df.write.mode("overwrite").option("header", "false").csv(temp_fp)
join_time_end = time.time()
# combine to single file
combine_start_time = time.time()
files = list(pathlib.Path(temp_fp).glob('*.csv'))
files.sort()
if self.input_dict['DataTarget']['stream'] == 'true' or self.input_dict['DataTarget']['stream'] is True:
self.output_topic, self.producer = self.data_handler.create_producer(str(uuid.uuid4()))
self.part_files = files
self.stream()
output_dict["queueTopicName"] = self.output_topic
return output_dict
exists = self.file_exits(self.hdfs_client, self.input_dict['DataTarget']['filePath'])
if exists:
if self.input_dict['DataTarget']['overwrite'] is True:
# remove file
self.delete_file(self.hdfs_client, self.input_dict['DataTarget']['filePath'])
else:
raise FileExistsError("File Already Exists: " + str(self.input_dict['DataTarget']['filePath']))
single_out_file = self.base_temp_path + str(combine_start_time) + '.csv'
self.temp_file_paths.append(single_out_file)
with open(single_out_file, 'a') as out_file:
out_file.write(",".join(self.join_df.columns))
for f in files:
self.logger.info(f'{f} appended to single csv')
with open(str(f), 'r') as in_file:
for line in in_file:
if len(line)>1:
out_file.write('\n' + line)
self.logger.info(f'Last line is: {line}')
print(
f'Done with combining part files to a single file.\n Time taken: {time.time() - combine_start_time}')
write_start_time = time.time()
self.logger.info("Writing to HDFS:")
self.block_folder_write(self.hdfs_client, single_out_file, self.input_dict['DataTarget']['filePath']
, self.input_dict['DataTarget']['overwrite'])
self.logger.info("Time taken to write to HDFS: " + str(time.time() - write_start_time))
self.logger.info('Time for Join: ' + str(join_time_end - join_time_st))
except Exception as e:
some_track = traceback.format_exc()
self.logger.error(f"Error: {str(e)} \n trace: {some_track}")
self.logger.error(e)
raise e
self.clean_temp_files()
# Set the output parameter
output_dict['file_path'] = self.input_dict['DataTarget']['filePath']
# Set the status of the block as completed
self.block_status = "COMPLETED"
return output_dict
def clean_temp_files(self):
self.logger.info("Cleaning temp files:")
for file in self.temp_file_paths:
rmtree(path=file, ignore_errors=True)
# HDFS Client
def get_client(self, block_params=None, connection_params=None):
try:
kerb_auth = False
method = "https"
if "https" in connection_params:
if connection_params["https"]:
method = "https"
else:
method = "http"
host_name = connection_params["hostName"]
port = connection_params["port"]
if 'kerberos' in connection_params:
kerb_auth = bool(connection_params['kerberos'])
if kerb_auth:
principal = generate_ticket_granting_ticket(block_params, connection_params["authName"])
session = requests.Session()
session.verify = False
full_host = "%s://%s:%s" % (method, host_name, port)
client = KerberosClient(url=full_host, session=session, mutual_auth='OPTIONAL', principal=principal)
client.list('/')
return client
else:
hadoop_host = host_name + ":" + port
client = InsecureClient("http://" + hadoop_host)
client.list('/')
return client
except Exception as e:
self.logger.error("Error Occurred While Connecting to HDFS With Given Connection Details")
raise e
def get_df(self, spark_session, data_source, block_params, path):
if data_source['queueTopicName'] and len(data_source['queueTopicName']) > 3:
return self.get_streaming_df(spark_session, path)
elif data_source['type'] == 'hdfs':
return self.get_hdfs_df(spark_session, data_source, self.hdfs_client)
elif data_source['type'] == 'oracle':
return self.get_oracle_df(spark_session, data_source, block_params)
else:
raise Exception('please provide a valid type')
def get_hdfs_df(self, spark_session, data_source, hdfs_connection):
self.logger.info("Reading HDFS file to local")
local_file_path = self.base_temp_path + str(time.time()) + '.csv'
hdfs_connection.download(data_source['fileWithFullPath'], local_file_path, n_threads=-1,
chunk_size=5000000, overwrite=True)
self.logger.info("Creating dataframe from HDFS")
self.temp_file_paths.append(local_file_path)
return spark_session.read.format("csv").option("header", data_source['header']) \
.option("inferSchema", "true").option("delimiter", data_source['delimiter']) \
.load(local_file_path)
def get_oracle_df(self, spark_session, data_source, block_params):
self.logger.info("Creating Oracle dataframe")
credentials = get_oracle_creds(user_id=block_params["userAuthToken"],
authentication_name=data_source['connection_name'])
url = "jdbc:oracle:thin:@%s:%s:%s" % (credentials["host"], credentials["port"], credentials["sid"])
return spark_session.read.format("jdbc") \
.options(url=url
, driver="oracle.jdbc.driver.OracleDriver"
, dbtable=data_source['query']
, fetchSize=1000000
, user=credentials["username"]
, password=credentials["password"]).load()
def get_streaming_path(self, data_source):
topic_name = data_source['queueTopicName']
consumer_pool = {
"count": 1,
"groupId": str(uuid.uuid4()),
"registerId": "",
"topicsListToSubscribe": [
topic_name
]
}
try:
consumer_pool_res = self.kafka_api_instance.create_consumer_list_using_post(consumer_pool)
channel = consumer_pool_res.result
except Exception as e:
self.logger.error("Error Trying To Create a Consumer Of Topic:" + str(topic_name))
self.block_status = "FAILED"
raise e
req = {"topicName": topic_name}
schema = self.kafka_api_instance.get_topic_meta_using_post(req)
schema = json.loads(json.loads(schema.result)["schema"])
self.logger.info(f"Schema: {schema}")
f_path = self.base_temp_path + str(time.time())
if os.path.exists(f_path):
rmtree(f_path)
read_stream_thread = Thread(target=self.read_records,
args=(topic_name, schema, self.kafka_api_instance, f_path, channel, self.logger))
read_stream_thread.start()
self.temp_file_paths.append(f_path)
return f_path, read_stream_thread
def get_streaming_df(self, spark, path):
return spark.read.format("csv").option("header", "true") \
.option("inferSchema", "true").option("delimiter", ",") \
.load(path)
def read_records(self, topic, schema, kafka_api_instance, file_path=None, channel=None, logger=None):
try:
logger.info(f'Started reading topic {topic}')
read_msgs_channel = {
"channelId": channel,
"registerId": ""
}
t_records = 0
with open(file_path, 'a', buffering=50 * (1024 ** 2)) as writer:
writer.write(",".join(schema.keys()))
while True:
read_msgs_res = kafka_api_instance.read_messages_from_topic_using_post(read_msgs_channel)
msgs = read_msgs_res.result
logger.info(f'Read: {str(len(msgs))}, Total: {t_records}')
if len(msgs) == 0:
logger.info('Zero Messages')
topic = {"topicName": topic}
res = kafka_api_instance.get_producer_status_using_post(topic)
logger.info('Zero messages: ' + json.dumps(res.result))
if not res.result['value']:
break
# logger.info(f'message(0): {msgs[0]}')
for msg in msgs:
t_records = t_records + 1
writer.write('\n')
my_string = ','.join(map(str, literal_eval(msg)))
writer.write(my_string)
logger.info(f'Done writing topic: {topic} to file: {file_path} Records: {t_records}')
except Exception as e:
logger.error(e)
def file_exits(self, hdfs_connection, file_path):
self.logger.debug("Inside the file exists check method")
try:
return hdfs_connection.status(hdfs_path=file_path, strict=True)
except Exception as e:
return False
def delete_file(self, hdfs_connection, file_path):
self.logger.debug("Inside delete HDFS file method")
try:
return hdfs_connection.delete(file_path)
except Exception as e:
return False
def block_folder_write(self, hdfs_connection, local_file_path, upload_path, overwrite: bool):
self.logger.info("Inside the write method")
hdfs_connection.upload(upload_path, local_file_path, n_threads=4,
chunk_size=5000000, cleanup=True, overwrite=overwrite)
self.logger.info("Done writing")
@async
def stream(self):
print('Stream function says: Hello, world!')
try:
schema = self.get_df_schema(self.join_df)
print(f"Join Schema: {schema}")
self.set_schema(self.output_topic, schema)
acc_counter = self.spark.sparkContext.accumulator(0)
# kafka method
#
# def do_count(x):
# acc_counter.add(1)
# return x
# adf_count = self.join_df.rdd.map(do_count).toDF()
# adf_count.selectExpr("to_json(struct(*)) AS value") \
# .write \
# .format("kafka") \
# .option("kafka.bootstrap.servers", os.environ['KAFKA_LIST']) \
# .option("topic", self.output_topic) \
# .save()
### Normal forEach part
# def customFunction(rows):
# for row in rows:
# acc_counter.add(1)
# my_list = list(row.asDict().values())
# # csv_str = ",".join(str(item) for item in my_list)
# self.producer.send(my_list)
#
# self.join_df.rdd.foreachPartition(customFunction)
### using files
print(f"Partfiles = {self.part_files}")
for f in self.part_files:
self.logger.info(f'{f} appending to stream\n')
with open(str(f), 'r') as in_file:
for line in in_file:
acc_counter.add(1)
self.producer.send(line.split(","))
total_records = acc_counter.value
self.logger.info(f"Total records: {total_records}")
# Update meta to KML
temp_error = {
"noOfIgnoredRecords": 0,
"errorRecords": 0
}
ui_info = {"info": "", "error": ""}
meta_data = {
"schema": json.dumps(schema),
"readerInfo": json.dumps({"noOfRecordsRead": total_records}),
"readerInfoError": json.dumps(temp_error),
"ui_info": json.dumps(ui_info)
}
self.set_meta_data(self.output_topic, meta_data=meta_data)
self.logger.info(f"Total records read: {str(total_records)}")
except Exception as e:
self.block_status = "FAILED"
some_track = traceback.format_exc()
self.logger.error(f"Error: {str(e)} \n {str(some_track)}")
print(f"Error: {str(e)} \n {str(some_track)}")
raise e
finally:
self.producer.close()
self.clean_temp_files()
def get_df_schema(self, df):
try:
json_schema = df.schema.json()
json_schema = json.loads(json_schema)
b2s_dict = {}
for i, val in enumerate(json_schema['fields']):
if val['type'] == 'integer':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'IntegerType()'}
if val['type'] == 'string':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'StringType()'}
if val['type'] == 'float' or val['type'] == 'double':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'FloatType()'}
return b2s_dict
except Exception as e:
raise e
# +
join_blk = MyBlock()
bp = {
'userAuthToken': '<PASSWORD>'
}
ip = {
"LeftDataSource": {
"connection_name": "ora_con",
"query": "(SELECT * from tables)",
"type": "oracle",
"fileWithFullPath": "/data/hdfs-nfs/Data/6d72d2cf-45b6-464c-9811-d6cf21574ece/100000SalesRecords1.csv",
"header": True,
"delimiter": ",",
"queueTopicName": ""
},
"RightDataSource": {
"connection_name": "",
"query": "",
"type": "hdfs",
"fileWithFullPath": "/data/hdfs-nfs/Data/6d72d2cf-45b6-464c-9811-d6cf21574ece/100000SalesRecords1.csv",
"header": True,
"delimiter": ",",
"queueTopicName": ""
},
"DataTarget":{
"join_query": "SELECT * from left l JOIN right r ON l.`Order ID`=r.`Order ID`",
"stream": False,
"filePath": "/data/test_phani/join_out_note.csv",
"delimiter": ",",
"overwrite": True
},
"ConnectionParams":{
"hostName": "172.16.109.117",
"port": "9870",
"kerberos": False,
"authName": "",
"https": False
},
"SparkConf":{
"spark.memory.fraction": 0.9,
"spark.storage.memoryFraction": 0.3,
"spark.jars":"/home/jovyan/work/data/spark-libs/ojdbc8-12.2.0.1.jar"
}
}
join_blk.set_params(ip,bp)
# -
join_blk.run()
join_blk.join_df.columns
# ls /home/jovyan/work/data/spark-libs/ojdbc8-12.2.0.1.jar
import os
os.environ
# +
def stream(self):
print('Stream function says: Hello, world!')
try:
schema = self.get_df_schema(self.df)
print(f"Schema: {schema}")
self.set_schema(self.output_topic, schema)
acc_counter = self.spark.sparkContext.accumulator(0)
# Write dataframe to files
self.join_df.write.mode("overwrite").option("header", "false").csv(temp_fp)
part_files = list(pathlib.Path(temp_fp).glob('*.csv'))
part_files.sort()
# if possible do above steps in run method
# Using files
print(f"Partfiles = {self.part_files}")
for f in self.part_files:
self.logger.info(f'{f} appending to stream\n')
with open(str(f), 'r') as in_file:
for line in in_file:
acc_counter.add(1)
self.producer.send(line.split(","))
total_records = acc_counter.value
self.logger.info(f"Total records: {total_records}")
# Update meta to KML
temp_error = {
"noOfIgnoredRecords": 0,
"errorRecords": 0
}
ui_info = {"info": "", "error": ""}
meta_data = {
"schema": json.dumps(schema),
"readerInfo": json.dumps({"noOfRecordsRead": total_records}),
"readerInfoError": json.dumps(temp_error),
"ui_info": json.dumps(ui_info)
}
self.set_meta_data(self.output_topic, meta_data=meta_data)
self.logger.info(f"Total records read: {str(total_records)}")
except Exception as e:
self.block_status = "FAILED"
some_track = traceback.format_exc()
self.logger.error(f"Error: {str(e)} \n {str(some_track)}")
print(f"Error: {str(e)} \n {str(some_track)}")
raise e
finally:
self.producer.close()
self.clean_temp_files()
def get_df_schema(self, df):
try:
json_schema = df.schema.json()
json_schema = json.loads(json_schema)
b2s_dict = {}
for i, val in enumerate(json_schema['fields']):
if val['type'] == 'integer':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'IntegerType()'}
if val['type'] == 'string':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'StringType()'}
if val['type'] == 'float' or val['type'] == 'double':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'FloatType()'}
return b2s_dict
except Exception as e:
raise e
|
nexon/rzt.ai.notebook-40.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # 2D Optimal transport for different metrics
#
# 2D OT on empirical distributio with different gound metric.
#
# Stole the figure idea from Fig. 1 and 2 in
# https://arxiv.org/pdf/1706.07650.pdf
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 3
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
# -
# ## Dataset 1 : uniform sampling
#
#
# +
n = 20 # nb samples
xs = np.zeros((n, 2))
xs[:, 0] = np.arange(n) + 1
xs[:, 1] = (np.arange(n) + 1) * -0.001 # to make it strictly convex...
xt = np.zeros((n, 2))
xt[:, 1] = np.arange(n) + 1
a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples
# loss matrix
M1 = ot.dist(xs, xt, metric='euclidean')
M1 /= M1.max()
# loss matrix
M2 = ot.dist(xs, xt, metric='sqeuclidean')
M2 /= M2.max()
# loss matrix
Mp = np.sqrt(ot.dist(xs, xt, metric='euclidean'))
Mp /= Mp.max()
# Data
pl.figure(1, figsize=(7, 3))
pl.clf()
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
pl.title('Source and target distributions')
# Cost matrices
pl.figure(2, figsize=(7, 3))
pl.subplot(1, 3, 1)
pl.imshow(M1, interpolation='nearest')
pl.title('Euclidean cost')
pl.subplot(1, 3, 2)
pl.imshow(M2, interpolation='nearest')
pl.title('Squared Euclidean cost')
pl.subplot(1, 3, 3)
pl.imshow(Mp, interpolation='nearest')
pl.title('Sqrt Euclidean cost')
pl.tight_layout()
# -
# ## Dataset 1 : Plot OT Matrices
#
#
# +
G1 = ot.emd(a, b, M1)
G2 = ot.emd(a, b, M2)
Gp = ot.emd(a, b, Mp)
# OT matrices
pl.figure(3, figsize=(7, 3))
pl.subplot(1, 3, 1)
ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT Euclidean')
pl.subplot(1, 3, 2)
ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT squared Euclidean')
pl.subplot(1, 3, 3)
ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT sqrt Euclidean')
pl.tight_layout()
pl.show()
# -
# ## Dataset 2 : Partial circle
#
#
# +
n = 50 # nb samples
xtot = np.zeros((n + 1, 2))
xtot[:, 0] = np.cos(
(np.arange(n + 1) + 1.0) * 0.9 / (n + 2) * 2 * np.pi)
xtot[:, 1] = np.sin(
(np.arange(n + 1) + 1.0) * 0.9 / (n + 2) * 2 * np.pi)
xs = xtot[:n, :]
xt = xtot[1:, :]
a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples
# loss matrix
M1 = ot.dist(xs, xt, metric='euclidean')
M1 /= M1.max()
# loss matrix
M2 = ot.dist(xs, xt, metric='sqeuclidean')
M2 /= M2.max()
# loss matrix
Mp = np.sqrt(ot.dist(xs, xt, metric='euclidean'))
Mp /= Mp.max()
# Data
pl.figure(4, figsize=(7, 3))
pl.clf()
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
pl.title('Source and traget distributions')
# Cost matrices
pl.figure(5, figsize=(7, 3))
pl.subplot(1, 3, 1)
pl.imshow(M1, interpolation='nearest')
pl.title('Euclidean cost')
pl.subplot(1, 3, 2)
pl.imshow(M2, interpolation='nearest')
pl.title('Squared Euclidean cost')
pl.subplot(1, 3, 3)
pl.imshow(Mp, interpolation='nearest')
pl.title('Sqrt Euclidean cost')
pl.tight_layout()
# -
# ## Dataset 2 : Plot OT Matrices
#
#
# +
G1 = ot.emd(a, b, M1)
G2 = ot.emd(a, b, M2)
Gp = ot.emd(a, b, Mp)
# OT matrices
pl.figure(6, figsize=(7, 3))
pl.subplot(1, 3, 1)
ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT Euclidean')
pl.subplot(1, 3, 2)
ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT squared Euclidean')
pl.subplot(1, 3, 3)
ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT sqrt Euclidean')
pl.tight_layout()
pl.show()
|
_downloads/76a784f1b4907a99f6b908d9a32104cc/plot_OT_L1_vs_L2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Run this notebook before anything else!
import os
import requests
import time
from p4tools import io # from https://github.com/michaelaye/p4tools :
# +
#before starting: update base folder name to an absolute path on your system, or do not change any paths
base_folder = '../../'
#set up folders
DataSubFolders=['ClusteringResults','Figures','Images/HiRISE','Images/HiRISE_8bit_and_P4_mask','Models','SummaryResults']
for f in DataSubFolders:
if not os.path.exists(base_folder+'Data/'+f):
os.makedirs(base_folder+'Data/'+f)
#HiRISE data URL and save path
URL_base = 'https://hirise-pds.lpl.arizona.edu/PDS/EXTRAS/RDR/ESP/ORB_0'
data_folder = base_folder+'Data/Images/HiRISE/' #recommended path
# +
def DownloadHiRISE(URL_base,data_folder):
metadata_df = io.get_meta_data()
if not os.path.exists(data_folder):
os.makedirs(data_folder)
for index, row in metadata_df.iterrows():
t0 = time.time()
folder= str(int(int(row['OBSERVATION_ID'][4:10])/100)*100)+'_0'+str(int(int(row['OBSERVATION_ID'][4:10])/100)*100+99)
FileName = row['OBSERVATION_ID']+'_RGB.NOMAP.JP2'
if not os.path.isfile(os.path.join(data_folder, FileName)):
#have not lready downloaded file, so now download
Full_URL = URL_base+folder+'/'+row['OBSERVATION_ID']+'/'+FileName
r = requests.head(Full_URL)
if r.status_code == requests.codes.ok:
#file exists at URL
print('Downloading and saving HiRISE image ',str(index),' of ',str(metadata_df.shape[0]),'from: ',Full_URL)
r = requests.get(Full_URL)
with open(os.path.join(data_folder, FileName), 'wb') as f:
f.write(r.content)
else:
print('HiRISE file already exists at: ',os.path.join(data_folder, FileName))
t1 = time.time()
print("time for this image = ",t1-t0)
# -
t0 = time.time()
DownloadHiRISE(URL_base,data_folder)#total size of 221 JP2 files is 39.3 GB on disk
t1 = time.time()
print("time to download all images = ",t1-t0) #takes around an hour
|
Notebooks/DownloadHiRISE/DownloadHiRISE.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="kkRdSCnY3Vul"
import pandas as pd
import numpy as np
# + id="AmAh_Ou93tDQ"
data = pd.read_excel("/content/recenzje_z_sentymentem.xlsx")
# + id="JT6HdmZp4SJ-" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="d7afcc02-5847-4491-f874-7615d6c811cc"
data.head()
# + id="bSvKaah44T6-" colab={"base_uri": "https://localhost:8080/"} outputId="f2e9772e-4f21-4cbf-a7ca-e57abf8f75aa"
data['sentyment'].value_counts()
# + id="oTGTt0go4fqH"
def rescale_sentiment(x):
if x == 1:
return x
elif x == 0:
return 2
elif x == -1:
return 0
# + id="rBdFlc7v45oS"
data['sentyment'] = data['sentyment'].map(rescale_sentiment)
# + id="LrtWdK4T5Dai" colab={"base_uri": "https://localhost:8080/"} outputId="47d03d06-9aad-454e-c029-2c2acb496f0f"
data['sentyment'].value_counts()
# + id="I59qDNzp5Fzu"
with open('/content/stopwords.txt', "r") as f:
STOP_WORDS = set([line.rstrip("\n") for line in f])
# + id="C_1mS7ky5b2G"
import re
def preprocess(intext: str):
text = re.sub(r'\W+', ' ', intext.lower()).split()
text = " ".join([word for word in text if word not in STOP_WORDS])
return text
# + id="hCuMuqTS5hcl"
data['recenzja'] = data['recenzja'].map(preprocess)
# + id="gq-SBvuS5ndZ" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="5d309751-8bd9-4336-f9e0-04d001e98f3d"
data.head()
# + id="VdlyA-Q06S3z"
film_data = data
# + id="syWNOFK935x2"
film_data.to_csv('/content/drive/My Drive/filmweb_preprocessed.csv')
# + id="Uok5i7Qs5tj7" colab={"base_uri": "https://localhost:8080/"} outputId="9a38d654-5564-4a46-cfab-e23984ee9c48"
# !pip install transformers==2.8.0 -q
# + id="wp29kcct5ySF"
import numpy as np
import pandas as pd
from transformers import *
import torch
# + id="Jzd74kjZ50dJ" colab={"base_uri": "https://localhost:8080/"} outputId="4e5e7a20-f807-4e36-c113-dcc18656a904"
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
#model.cuda()
device = torch.device("cuda")
# + id="j2aAi5Lj4B7n"
film_data = pd.read_csv('/content/drive/My Drive/filmweb_preprocessed.csv')
# + id="Igo1T1VT52Vk"
from sklearn.model_selection import StratifiedShuffleSplit
X = film_data.recenzja.to_numpy()
y = film_data.sentyment.to_numpy()
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42)
for train_index, test_index in sss.split(X, y):
train_sentences, test_sentences = X[train_index], X[test_index]
train_labels, test_labels = y[train_index], y[test_index]
X = test_sentences
y = test_labels
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.33, random_state=42)
for train_index, test_index in sss.split(X, y):
dev_sentences, test_sentences = X[train_index], X[test_index]
dev_labels, test_labels = y[train_index], y[test_index]
# + id="v1z2LR_N6B8r" colab={"base_uri": "https://localhost:8080/"} outputId="8f7bda15-a06a-40d1-b7dd-acb7a586055d"
print('Train data')
print(len(train_sentences) / ( len(train_sentences) + len(dev_sentences) + len(test_sentences) ))
print('Dev data')
print(len(dev_sentences) / ( len(train_sentences) + len(dev_sentences) + len(test_sentences) ))
print('Test data')
print(len(test_sentences) / ( len(train_sentences) + len(dev_sentences) + len(test_sentences) ))
# + id="sBYOr09g-5mq"
dev_filmweb_data = pd.DataFrame([dev_sentences,dev_labels]).T
test_filmweb_data = pd.DataFrame([test_sentences,test_labels]).T
# + id="ZNpjQmE8_AfT"
dev_filmweb_data.to_csv('/content/drive/My Drive/dev_filmweb_data_preprocessed.csv')
test_filmweb_data.to_csv('/content/drive/My Drive/test_filmweb_data_preprocessed.csv')
# + id="O4Yb7Jua6eHG"
from sklearn.model_selection import StratifiedShuffleSplit
X = train_sentences
y = train_labels
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in sss.split(X, y):
train_sentences, test_sentences = X[train_index], X[test_index]
train_labels, test_labels = y[train_index], y[test_index]
X = test_sentences
y = test_labels
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=42)
for train_index, test_index in sss.split(X, y):
dev_sentences, test_sentences = X[train_index], X[test_index]
dev_labels, test_labels = y[train_index], y[test_index]
# + id="AboWQT2s6hxQ" colab={"base_uri": "https://localhost:8080/"} outputId="3ac6d1d8-26aa-4ecb-bd41-5c28a62e7243"
print('Train data')
print(len(train_sentences) / ( len(train_sentences) + len(dev_sentences) + len(test_sentences) ))
print('Dev data')
print(len(dev_sentences) / ( len(train_sentences) + len(dev_sentences) + len(test_sentences) ))
print('Test data')
print(len(test_sentences) / ( len(train_sentences) + len(dev_sentences) + len(test_sentences) ))
# + id="hzediaYd6mQK"
# Remove long sentences.
# TO-DO Possible cut?
def remove_big(sentences, labels):
to_remove = []
for i, sent in enumerate(sentences):
input_ids = tokenizer.encode(sent, add_special_tokens=True) # TO-DO: add_special_tokens
if len(input_ids) > MAX_LEN:
to_remove.append(i)
sentences = np.delete(sentences, to_remove)
labels = np.delete(labels, to_remove)
print('{} samples removed.'.format(len(to_remove)))
return sentences, labels
# + id="eejQt-gQ6p-R"
# Downloading tokenizer
# From Polbert - Polish BERT by <NAME>: https://github.com/kldarek/polbert
tokenizer = BertTokenizer.from_pretrained("dkleczek/bert-base-polish-uncased-v1")
# + id="4aah8bA36ri_" colab={"base_uri": "https://localhost:8080/"} outputId="60ea4e97-0f46-4f05-bf07-5a3b141f1b7f"
MAX_LEN = 128
train_sentences, train_labels = remove_big(train_sentences, train_labels)
test_sentences, test_labels = remove_big(test_sentences, test_labels)
dev_sentences, dev_labels = remove_big(dev_sentences, dev_labels)
# + id="-T37Rdx_6tVy"
from torch.utils.data import TensorDataset
# Create TensorDatasets for train/dev/test sets
def tensor_dataset(sentences, labels):
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = MAX_LEN,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt', # Return pytorch tensors.
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
dataset = TensorDataset(input_ids, attention_masks, labels)
return dataset
# + id="cz3ohY896vQ5"
BATCH_SIZE = 8
train_dataset = tensor_dataset(train_sentences, train_labels)
test_dataset = tensor_dataset(test_sentences, test_labels)
dev_dataset = tensor_dataset(dev_sentences, dev_labels)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
# Create the DataLoaders for train/dev/test sets.
train_dataloader = DataLoader(train_dataset, sampler = RandomSampler(train_dataset), batch_size = BATCH_SIZE)
validation_dataloader = DataLoader(dev_dataset, sampler = SequentialSampler(dev_dataset), batch_size = BATCH_SIZE)
test_dataloader = DataLoader(test_dataset, sampler = SequentialSampler(test_dataset), batch_size = BATCH_SIZE)
# + id="RSMGe7aU6yux" colab={"base_uri": "https://localhost:8080/"} outputId="45354f1b-140a-42a8-ae16-63be36e603fe"
# Load model with a sequence classification head
model = BertForSequenceClassification.from_pretrained(
"dkleczek/bert-base-polish-uncased-v1", # Polbert - Polish BERT by <NAME>: https://github.com/kldarek/polbert
num_labels = 3,
output_attentions = False,
output_hidden_states = False,
)
model.cuda()
# + id="lFkg2pWZ7KSN" colab={"base_uri": "https://localhost:8080/"} outputId="7b76b7f1-cdf4-4c8f-daab-d56a12b056f6"
import time, datetime
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW
from transformers import AutoModelForSequenceClassification
from transformers import get_linear_schedule_with_warmup
from sklearn.metrics import f1_score
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# Takes a time in seconds and returns a string hh:mm:ss
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
# Parameters:
epochs = 3
#lr = 3e-3 # Learning rate (Adam): 5e-5, 3e-5, 2e-5
lr = 5e-5 # Learning rate (Adam): 5e-5, 3e-5, 2e-5
adam_epsilon = 1e-10
WARM_UP = 0
optimizer = AdamW(model.parameters(), lr = lr, eps = adam_epsilon)
from transformers import get_linear_schedule_with_warmup
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = WARM_UP, num_training_steps = total_steps)
train_loss_values = []
dev_acc_values = []
model.zero_grad()
t0 = time.time()
for epoch_i in range(0, epochs):
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# https://github.com/huggingface/transformers/blob/master/examples/run_glue.py
# linie 168-183
epoch_train_loss = 0 # Cumulative loss
loss = 0 ; batch_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Loss: {:.3f} Elapsed: {:}.'.format(step, len(train_dataloader), batch_loss, elapsed))
batch_loss = 0
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# clear any previously calculated gradients before backward pass
optimizer.zero_grad()
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = outputs[0]
epoch_train_loss += loss.item()
batch_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Gradient clipping is not in AdamW anymore (so you can use amp without issue)
optimizer.step()
scheduler.step() # Update learning rate schedule
epoch_train_loss = epoch_train_loss / len(train_dataloader)
train_loss_values.append(epoch_train_loss)
print('Average training loss: {0:.2f}'.format(epoch_train_loss))
# Evaluation
total_eval_accuracy = 0
model.eval()
for batch in validation_dataloader:
input_ids = batch[0].to(device)
attention_masks = batch[1].to(device)
labels = batch[2].to('cpu').numpy()
with torch.no_grad():
outputs = model(input_ids, token_type_ids=None, attention_mask=attention_masks)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
predictions = np.argmax(logits, axis=1).flatten()
total_eval_accuracy += flat_accuracy(logits, labels)
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.4f}".format(avg_val_accuracy))
# + id="CLZ4SO8-7RF2"
predicted_labels = [] ; true_labels = []; logits_list = []
for batch in test_dataloader:
input_ids = batch[0].to(device)
attention_masks = batch[1].to(device)
labels = batch[2]
with torch.no_grad():
outputs = model(input_ids, token_type_ids=None, attention_mask=attention_masks)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
logits_list.append(logits)
predictions = np.argmax(logits, axis=1).flatten()
labels = labels.numpy().flatten()
predicted_labels.extend( predictions )
true_labels.extend( labels )
# + id="5nHMYnVc7T9t" colab={"base_uri": "https://localhost:8080/"} outputId="bed4d5f3-d93f-43ca-8964-969c8683cb2b"
# Parameters:
#epochs = 3
#lr = 2e-5 # Learning rate (Adam): 5e-5, 3e-5, 2e-5
#adam_epsilon = 1e-8
#WARM_UP = 0.1
#0.72-73
from sklearn.metrics import classification_report
print( classification_report(y_true=true_labels, y_pred=predicted_labels, zero_division=0) )
# + id="aLG6t-Qg7ZBw" colab={"base_uri": "https://localhost:8080/"} outputId="63c196a6-5758-481f-eeb0-44e375eb4c8c"
# Run this cell to mount your Google Drive.
from google.colab import drive
drive.mount('/content/drive')
# + id="kpQ4_cDrFzn3" colab={"base_uri": "https://localhost:8080/"} outputId="46c3a394-e1b0-4ae7-8012-7fbf9f11f046"
import os
from transformers import WEIGHTS_NAME, CONFIG_NAME
output_dir = "/content/drive/My Drive/model_bert_finetuned_3"
# Step 1: Save a model, configuration and vocabulary that you have fine-tuned
# If we have a distributed model, save only the encapsulated model
# (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
model_to_save = model.module if hasattr(model, 'module') else model
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_pretrained(output_dir)
# + id="SV-gxFs_GCcP"
# Step 2: Re-load the saved model and vocabulary
model = BertForSequenceClassification.from_pretrained(output_dir)
tokenizer = BertTokenizer.from_pretrained(output_dir)
# + id="keAArZNeGEyk"
|
filmweb_base_classifier.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="MHNr1zXp4diG" executionInfo={"status": "ok", "timestamp": 1628327509555, "user_tz": -330, "elapsed": 1818, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
from collections import deque
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pandas as pd
import numpy as np
import scipy
import scipy.special
import gym
import gym.spaces as spaces
# + id="zbZbbhdc4mbG" executionInfo={"status": "ok", "timestamp": 1628327541243, "user_tz": -330, "elapsed": 773, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
class SlateSpace(spaces.MultiDiscrete):
def __init__(self, nvec):
assert np.unique(nvec).size == 1, 'each slate position should allow all available items to display.'
assert len(nvec) <= nvec[0], f'slate size ({len(nvec)}) should be no larger than the number of items ({nvec[0]}).'
super().__init__(nvec)
def sample(self):
# since a slate is a permutation over items with a cut-off
# we implemented by using numpy for efficiency, avoid for-loop
return self.np_random.permutation(self.nvec[0])[:len(self.nvec)].astype(self.dtype)
def sample_batch(self, batch_size):
# for-loop will be very SLOW!
# NOTE: we use numpy's `permutation` and `apply_along_axis` to be very efficient!
n_item = self.nvec[0]
slate_size = len(self.nvec)
arr = np.arange(n_item)[None, :]
arr = np.tile(arr, (batch_size, 1))
arr = np.apply_along_axis(func1d=self.np_random.permutation, axis=1, arr=arr)
arr = arr[:, :slate_size]
return arr
def contains(self, x):
is_contained = super().contains(x)
is_unique = (np.unique(x).size == len(x))
return is_unique and is_contained
def __repr__(self):
return f'SlateSpace({self.nvec})'
def __eq__(self, other):
return isinstance(other, SlateSpace) and np.all(self.nvec == other.nvec)
# + id="BUzRa3uo4bIv" executionInfo={"status": "ok", "timestamp": 1628327564775, "user_tz": -330, "elapsed": 1326, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
class Env(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 3
}
reward_range = (-float('inf'), float('inf'))
def __init__(
self, user_ids, item_category, item_popularity,
hist_seq_len, slate_size,
user_state_model_callback, reward_model_callback,
):
self.user_ids = user_ids
assert len(item_category) == len(item_popularity)
item_category = [str(i) for i in item_category] # enforce str, otherwise visualization won't work well
self.item_category = item_category
item_popularity = np.asarray(item_popularity)
self.scaled_item_popularity = item_popularity/max(item_popularity)
self.hist_seq_len = hist_seq_len
self.slate_size = slate_size
self.user_state_model_callback = user_state_model_callback
self.reward_model_callback = reward_model_callback
self.nan_item_id = -1
self.user_id = None # enforce calling `env.reset()`
self.hist_seq = deque([self.nan_item_id]*hist_seq_len, maxlen=hist_seq_len) # FIFO que for user's historical interactions
assert len(self.hist_seq) == hist_seq_len
obs_dim = len(user_state_model_callback(user_ids[0], self.hist_seq))
self.observation_space = spaces.Box(
low=-float('inf'),
high=float('inf'),
shape=(obs_dim,),
dtype=np.float32
)
# NOTE: do NOT use `gym.spaces.MultiDiscrete`: it does NOT support unique sampling for slate
# i.e. a sampled action may contain multiple redundant item in the slate!
self.action_space = SlateSpace((len(item_category),)*slate_size)
# some loggings for visualization
self.user_logs = []
self.rs_logs = []
self.timestep = 0
self.viewer = None
self.fig, self.axes = None, None
self.seed()
def seed(self, seed=None):
self.rng = np.random.default_rng(seed=seed)
return self.rng.bit_generator._seed_seq.entropy # in case `seed=None`, system generated seed will be returned
def step(self, action):
assert action in self.action_space
assert np.unique(action).size == len(action), 'repeated items in slate are not allowed!'
# append a skip-item at the end of the slate to allow user to skip the slate
# pre-trained reward model will give a learned reward for skipping
action = [*action, self.nan_item_id]
action_item_reward = self.reward_model_callback(self.user_id, self.hist_seq, action)
assert action_item_reward.ndim == 1 and len(action_item_reward) == len(action)
# TODO: customize user choice model as input to the environment constructor
# for the moment, only sampling in proportion to predicted rewards
choice_dist = scipy.special.softmax(action_item_reward)
idx = self.rng.choice(len(action), size=None, p=choice_dist)
clicked_item_id = action[idx]
is_click = (clicked_item_id != self.nan_item_id)
# update user state transition
# NOTE: when user skips, `hist_seq` will not change.
# For RL agent training (e.g. DQN), it's important to have exploration!
# Otherwise, agent might get stuck with suboptimal behavior by repeated observation
# Also, replay buffer may be dominated by such transitions with identical observations
if is_click: # user clicked an item in the slate
self.hist_seq.append(clicked_item_id)
self.timestep += 1
# track interactions for visualization
self.user_logs.append({
'timestep': self.timestep,
'clicked_item_id': clicked_item_id, # NOTE: include skip activity
'choice_dist': choice_dist.tolist()
})
self.rs_logs.append({
'timestep': self.timestep,
'slate': action # NOTE: include skip pseudo-item
})
obs = self._get_obs()
# Alternative: reward = action_item_reward.min() - 1.*action_item_reward.std()
reward = action_item_reward[idx]
if reward <= action_item_reward[-1]:
reward = 0.
done = False
info = {
'is_click': is_click,
'clicked_item_id': clicked_item_id,
'action_item_reward': action_item_reward.tolist(),
'choice_dist': choice_dist.tolist()
}
return obs, reward, done, info
def _get_obs(self):
user_state = self.user_state_model_callback(self.user_id, self.hist_seq) # -> [user_state, ]
assert user_state in self.observation_space
return user_state
def reset(self, **kwargs):
if kwargs.get('user_id', None) is not None:
user_id = kwargs['user_id']
assert user_id in self.user_ids
self.user_id = user_id
else:
self.user_id = self.rng.choice(self.user_ids, size=None)
self.hist_seq = deque([self.nan_item_id]*self.hist_seq_len, maxlen=self.hist_seq_len)
assert len(self.hist_seq) == self.hist_seq_len
# some loggings for visualization
self.user_logs = []
self.rs_logs = []
self.timestep = 0
return self._get_obs()
def _get_img(self):
# clear all previous images
[ax.cla() for ax in self.axes.flatten()]
# we require strict ordering of the category type in the plot
# so we use `pd.Categorical` below in `sns.lineplot` to enforce consistent ordering
categories = np.unique(self.item_category).tolist()
categories = ['@skip', *categories]
# enforce str for each category, otherwise `pd.Categorical` breaks with NaN
categories = [str(c) for c in categories]
cat_dist_all = pd.Categorical(self.item_category, categories=categories, ordered=True).value_counts()
cat_dist_all /= cat_dist_all.sum() # `normalize` keyword NOT existed for `pd.Categorical`
def _barplot_cat_dist_all(cat_dist_all, categories, ax):
sns.barplot(x=cat_dist_all.index, y=cat_dist_all.values, order=categories, alpha=.3, ax=ax)
for patch in ax.patches: # draw dashed edge on top for each true_category, better visual
x = [patch.get_x(), patch.get_x() + patch.get_width()]
y = [patch.get_height()]*2
ax.plot(x, y, ls='--', lw=1.5, c=patch.get_edgecolor(), alpha=1.)
df_user_logs = pd.DataFrame(self.user_logs).sort_values(by='timestep', ascending=True)
df_rs_logs = pd.DataFrame(self.rs_logs).sort_values(by='timestep', ascending=True)
user_click_cat = df_user_logs['clicked_item_id'].apply(
lambda item_id: str(self.item_category[item_id]) if item_id != self.nan_item_id else '@skip'
)
user_click_cat = pd.Categorical(user_click_cat, categories=categories, ordered=True)
# figure [0, 0]: Overall User Choices
cat_dist_user = user_click_cat.value_counts()
cat_dist_user /= cat_dist_user.sum() # `normalize` keyword NOT existed for `pd.Categorical`
_barplot_cat_dist_all(cat_dist_all, categories, ax=self.axes[0, 0])
g = sns.barplot(x=cat_dist_user.index, y=cat_dist_user.values, order=categories, alpha=.8, ax=self.axes[0, 0])
g.set(title='Overall User Choices', ylim=(0., 1.), xlabel='Category', ylabel='Percent')
# figure [1, 0]: Overall Recommendations
cat_dist_rs = df_rs_logs.explode('slate')
cat_dist_rs = cat_dist_rs[cat_dist_rs['slate'] != self.nan_item_id] # remove skip pseudo-item in slate for visualization
cat_dist_rs = cat_dist_rs['slate'].apply(
lambda item_id: str(self.item_category[item_id])
)
cat_dist_rs = pd.Categorical(cat_dist_rs, categories=categories, ordered=True).value_counts()
cat_dist_rs /= cat_dist_rs.sum() # `normalize` keyword NOT existed for `pd.Categorical`
_barplot_cat_dist_all(cat_dist_all, categories, ax=self.axes[1, 0])
g = sns.barplot(x=cat_dist_rs.index, y=cat_dist_rs.values, order=categories, alpha=.8, ax=self.axes[1, 0])
g.set(title='Overall Recommendations', ylim=(0., 1.), xlabel='Category', ylabel='Percent')
# figure [0, 1]: Sequential User Choices
g = sns.lineplot(
x=range(1, self.timestep+1), y=user_click_cat,
marker='o', markersize=8, linestyle='--', alpha=.8,
ax=self.axes[0, 1]
)
g.set( # gym animation wrapper `Monitor` requires both `yticks` and `yticklabels`
title='Sequential User Choices', yticks=range(len(categories)), yticklabels=categories,
xlabel='Timestep', ylabel='Category'
)
if self.spec is not None:
g.set_xlim(1, self.spec.max_episode_steps)
# figure [1, 1]: Intra-Slate Diversity (Shannon)
rs_diversity = df_rs_logs['slate'].apply(lambda slate: list(filter(lambda x: x != self.nan_item_id, slate)))
rs_diversity = rs_diversity.apply(
lambda slate: [str(self.item_category[item_id]) for item_id in slate]
)
_categories_wo_skip = list(filter(lambda c: c != '@skip', categories))
rs_diversity = rs_diversity.apply(lambda slate: pd.Categorical(slate, categories=_categories_wo_skip, ordered=True))
rs_diversity = rs_diversity.apply(lambda slate: slate.value_counts().values)
rs_diversity = rs_diversity.apply(lambda slate: slate/slate.sum())
rs_diversity = rs_diversity.apply(lambda slate: scipy.stats.entropy(slate, base=len(slate)))
g = sns.lineplot(
x=range(1, self.timestep+1), y=rs_diversity,
marker='o', markersize=8, linestyle='--', alpha=.8,
ax=self.axes[1, 1]
)
g.set(
title='Intra-Slate Diversity (Shannon)',
xlabel='Timestep', ylabel='Shannon Entropy',
ylim=(0., 1.)
)
if self.spec is not None:
g.set_xlim(1, self.spec.max_episode_steps)
# figure [0, 2]: User Choice Distribution
# make sure the skip pesudo-item is located in the final position
assert df_rs_logs['slate'].tail(1).item()[-1] == self.nan_item_id
choice_dist = df_user_logs['choice_dist'].tail(1).item()
slate_position = list(range(1, self.slate_size+1+1)) # add one more: for skip pseudo-item
slate_position = [str(i) for i in slate_position]
slate_position[-1] = '@skip'
df = pd.DataFrame({'slate_pos': slate_position, 'click_prob': choice_dist})
g = sns.barplot(
x='slate_pos', y='click_prob',
order=slate_position, alpha=.8, color='b', data=df,
ax=self.axes[0, 2]
)
g.set(title='User Choice Distribution', xlabel='Slate Position', ylabel='Click Probability')
# figure [1, 2]: Expected Popularity Complement (EPC)
# EPC: measures the ability to recommend long-tail items in top positions
# formula: Eq. (7) in https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.1089.1342&rep=rep1&type=pdf
slate_epc = df_rs_logs['slate'].apply(lambda slate: list(filter(lambda x: x != self.nan_item_id, slate)))
_rank_discount = np.log2(np.arange(1, self.slate_size+1) + 1)
slate_epc = slate_epc.apply(
lambda slate: np.asarray([1. - self.scaled_item_popularity[item_id] for item_id in slate])/_rank_discount
)
slate_epc = slate_epc.apply(
lambda slate: np.sum(slate)/np.sum(1./_rank_discount)
)
g = sns.lineplot(
x=range(1, self.timestep+1), y=slate_epc,
marker='o', markersize=8, linestyle='--', alpha=.8,
ax=self.axes[1, 2]
)
g.set(
title='Expected Popularity Complement (EPC)',
xlabel='Timestep', ylabel='EPC',
ylim=(0., 1.)
)
if self.spec is not None:
g.set_xlim(1, self.spec.max_episode_steps)
self.fig.suptitle(f'User ID: {self.user_id}, Time step: {self.timestep}', y=1.0, size='x-large')
self.fig.tight_layout()
self.fig.canvas.draw()
img = Image.frombytes('RGB', self.fig.canvas.get_width_height(), self.fig.canvas.tostring_rgb())
img = np.asarray(img)
return img
def render(self, mode='human', **kwargs):
if self.fig is None and self.axes is None:
self.fig, self.axes = plt.subplots(2, 3, figsize=(3*2*6, 2*2*4))
sns.set()
if self.timestep == 0: # gym Monitor may call `render` at very first step, so return empty image
self.fig.canvas.draw()
img = Image.frombytes('RGB', self.fig.canvas.get_width_height(), self.fig.canvas.tostring_rgb())
img = np.asarray(img)
else:
img = self._get_img()
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control.rendering import SimpleImageViewer
if self.viewer is None:
maxwidth = kwargs.get('maxwidth', int(4*500))
self.viewer = SimpleImageViewer(maxwidth=maxwidth)
self.viewer.imshow(img)
return self.viewer.isopen
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
plt.close('all') # close all with matplotlib, free memory
self.fig = None
self.axes = None
# + id="bfx3V6IF4sxh" executionInfo={"status": "ok", "timestamp": 1628327979429, "user_tz": -330, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# This describes a list of available user IDs for the simulation.
# Normally, a user ID is an integer.
# user_ids = [0, 1, 2]
# user ID will be taken as an input to user_state_model_callback to generate observations of the user state.
# This describes the categories of a list of available items.
# The data type should be a list of strings.
# The indices of the list is assumed to correspond to item IDs.
# item_category = ['sci-fi', 'romance', 'sci-fi']
# The category information is mainly used for visualization via env.render().
# This describe the popularity measure of a list of available items.
# The data type should be a list (or 1-dim array) of integers.
# The indices of the list is assumed to correspond to item IDs.
# item_popularity = [5, 3, 1]
# The popularity information is used for calculating Expected Popularity Complement (EPC) in the visualization.
# This is an integer describing the number of most recently clicked items by the user to encode as the current state of the user.
# hist_seq = [-1, 2, 0]
# The item ID -1 indicates an empty event. In this case, the user clicked two items in the past, first item ID 2 followed by a second item ID 0.
# The internal FIFO queue hist_seq will be taken as an input to both user_state_model_callback and reward_model_callback to generate observations of the user state.
# This is an integer describing the size of the slate (display list of recommended items).
# slate_size = 2
# It induces a combinatorial action space for the RL agent.
# This is a Python callback function taking user_id and hist_seq as inputs to generate an observation of current user state.
# user_state_model_callback
# Note that it is generic.
# Either pre-defined heuristic computations or pre-trained neural network models using user/item embeddings can be wrapped as a callback function.
# This is a Python callback function taking user_id, hist_seq and action as inputs to generate a reward value for each item in the slate. (i.e. action)
# reward_model_callback
# Note that it is generic.
# Either pre-defined heuristic computations or pre-trained neural network models using user/item embeddings can be wrapped as a callback function.
# + id="7IrHc0js6STN" executionInfo={"status": "ok", "timestamp": 1628328689207, "user_tz": -330, "elapsed": 442, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# First, let us sample random embeddings for one user and five items:
user_features = np.random.randn(1, 10)
item_features = np.random.randn(5, 10)
# Now let us define the category and popularity score for each item:
item_category = ['sci-fi', 'romance', 'sci-fi', 'action', 'sci-fi']
item_popularity = [5, 3, 1, 2, 3]
# Then, we define callback functions for user state and reward values:
def user_state_model_callback(user_id, hist_seq):
return user_features[user_id]
def reward_model_callback(user_id, hist_seq, action):
return np.inner(user_features[user_id], item_features[action])
# Finally, we are ready to create a simulation environment with OpenAI Gym API:
env_kws = dict(
user_ids=[0],
item_category=item_category,
item_popularity=item_popularity,
hist_seq_len=3,
slate_size=2,
user_state_model_callback=user_state_model_callback,
reward_model_callback=reward_model_callback
)
env = Env(**env_kws)
# we created the environment with slate size of two items and historical interactions of the recent 3 steps.
# The horizon is 50 time steps.
# + colab={"base_uri": "https://localhost:8080/"} id="1C3Xmvpd-hre" executionInfo={"status": "ok", "timestamp": 1628329110772, "user_tz": -330, "elapsed": 13679, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="73382baa-2392-49f7-d758-f0c2e5592d58"
# !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
# !pip install -U colabgymrender
# + colab={"base_uri": "https://localhost:8080/"} id="OhjNN-J1-d8s" outputId="b6234b45-4dff-4be3-ebba-f1ecc197cde4"
from colabgymrender.recorder import Recorder
directory = './video'
env = Recorder(env, directory)
observation = env.reset()
terminal = False
while not terminal:
action = env.action_space.sample()
observation, reward, terminal, info = env.step(action)
env.play()
|
_docs/nbs/recsys-gym-simulation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Cesar17188/EDA/blob/master/Regresion_y_evaluacion_de_hipotesis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="XFmPTyTPGq8T"
# # Regresión y evaluación de hipótesis
# + [markdown] id="mkNIQ0HFGv0k"
# * Neural Network Regressior
# * Decision Tree Regression
# * LASSO Regression
# * Ridge Regression
# * ElasticNet Regression
#
# + id="LPrWw-mxGm-b"
import pandas as pd
from sklearn import datasets
import numpy as np
# + id="_Yxzfs27HC_i"
iris=datasets.load_iris()
# + id="SG6glMvYHeo5"
data=pd.DataFrame(iris.data)
data.columns=iris.feature_names
data['target']=iris.target
# + colab={"base_uri": "https://localhost:8080/"} id="fF5i2DWpHfqa" outputId="1da9f423-6144-4195-c794-e0ad7094ecfd"
iris.target_names
# + id="of5dj0mZH1Tg"
data['name_t']=data['target'].map({0:'setosa',1:'versicolor',2:'virginica'})
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="zlNAvcSEIKKC" outputId="ca700049-caf2-4312-a7c5-7fe793a4103b"
data.head()
# + id="Y4iOTIVUIPua"
from sklearn.model_selection import train_test_split
# + id="OD7lvzK9Id3X"
target=['target','name_t']
desc=list(set(list(data.columns))-set(target))
# + colab={"base_uri": "https://localhost:8080/"} id="lUKgkZRxJEnV" outputId="dad4e431-c6e6-4000-dfe3-5607c5fb0a5b"
desc
# + id="eACIJ6_FJNtu"
x=data[desc].values
y=data['target'].values
# + id="pKXjAaPXJXxn"
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.30,random_state=40)
# + id="YRzGuiUsKbly"
from sklearn.tree import DecisionTreeRegressor
# + colab={"base_uri": "https://localhost:8080/"} id="VHUgiaO3KiMi" outputId="00be1be0-2b42-4297-81ae-fa6b15edea94"
reg_1=DecisionTreeRegressor(max_depth=4)
reg_1.fit(x_train,y_train)
# + id="lJeQbDF_KwnU"
predict_train=reg_1.predict(x_train)
predict_test=reg_1.predict(x_test)
# + id="M6r24o1BLGXE"
from sklearn.metrics import classification_report, confusion_matrix
# + colab={"base_uri": "https://localhost:8080/"} id="5UpIYkAxLRv6" outputId="34503825-def5-464a-ec66-a4750c9f8f49"
print(classification_report(y_test,predict_test))
# + colab={"base_uri": "https://localhost:8080/"} id="E5_KCa35Lbei" outputId="80523d56-eb41-4337-8207-b786847099f4"
print(confusion_matrix(y_test,predict_test))
# + id="XjqiWNzwMA4H"
|
Regresion_y_evaluacion_de_hipotesis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Redistribution Function
#
# **<NAME>**
#
# **January 2021**
#
# **version 2**
import numpy as np
import matplotlib.pyplot as plt
import iadpython
# ## Redistribution function.
#
# The single scattering phase function $p(\nu)$ for a tissue determines the
# amount of light scattered at an angle $\nu=\cos\theta$ from the direction of
# incidence. The
# subtended angle $\nu$ is the dot product
# of the unit vectors $\hat{\bf s}_i$ and $\hat{\bf s}_j$
#
# $$
# \nu=\hat{\bf s}_i\cdot\hat{\bf s}_j=\nu_i\nu_j+\sqrt{1-\nu_i^2}\sqrt{1-\nu_j^2}\cos\phi
# $$
#
# where $\hat{\bf s}_i$ is the incident and $\hat{\bf s}_j$ is the scattered light directions
#
# The redistribution function ${\bf h}_{ij}$ determines the fraction of light
# scattered from an incidence cone with angle $\nu_i$ into a cone with angle
# $\nu_j$. The redistribution function is calculated by averaging the phase
# function over all possible azimuthal angles for fixed angles $\nu_i$ and
# $\nu_j$,
#
# $$
# h(\nu_i,\nu_j) = {1\over2\pi}
# \int_0^{2\pi} p(\nu_i\nu_j+\sqrt{1-\nu_i^2}\sqrt{1-\nu_j^2}\cos\phi)\,d\phi
# $$
#
# Note that the angles $\nu_i$ and $\nu_j$ may also be negative (light
# travelling in the opposite direction). The full redistribution matrix may be
# expressed in terms a $2\times2$ matrix of |n|$\times$|n| matrices
#
# $$
# \mathbf{h}=\left[\matrix{\mathbf{h}^{--}&\mathbf{h}^{-+}\cr
# \mathbf{h}^{+-}&\mathbf{h}^{++}\cr}
# \right]
# $$
#
# The first plus or minus sign indicates the sign in front of the incident
# angle and the second is the sign of the direction of the scattered light.
#
# When the cosine of the angle of incidence or exitance is unity ($\nu_i=1$ or
# $\nu_j=1$), then the redistribution function $h(1,\nu_j)$ is equivalent to the phase
# function $p(\nu_j)$. In the case of isotropic scattering, the
# redistribution function is a constant
#
# $$
# h(\nu_i,\nu_j) = p(\nu) = {1\over4\pi}.
# $$
#
# Other phase functions require numerical integration of the phase
# function. If the phase function is highly anisotropic, then the
# integration over the azimuthal angle is particularly difficult and care
# must be taken to ensure that the integration is accurate. This is
# important because errors in the redistribution function enter directly
# into the reflection and transmission matrices for thin layers. Any
# errors will be doubled with each successive addition of layers and small
# errors will rapidly increase.
# ## Redistribution Matrix using Legendre Polynomials
#
# One way to calculate the redistribution function is the
# $\delta$-$M$ method of Wiscombe. This method works especially
# well for highly anisotropic phase functions. The number of quadrature
# points is specified by $M$. The $\delta$-$M$ method approximates the
# true phase function by a phase function consisting of a Dirac delta
# function and $M-1$ Legendre polynomials
#
# $$
# p^*(\nu)= 2 g^M\delta(1-\nu) + (1-g^M) \sum_{k=0}^{M-1} (2k+1)\chi_k^* P_k(\nu)
# $$
#
# where
#
# $$
# \chi_k^*={\chi_k-g^M\over 1-g^M}
# \qquad\mbox{and}\qquad
# \chi_k = {1\over2}\int_0^1 p(\nu) P_k(\nu) \,d\nu
# $$
#
# When the $\delta$-$M$ method substitutes $p^*(\nu)\rightarrow p(\nu)$,
# then both the albedo and optical thickness must also be changed,
# $a^*\rightarrow a$ and $\tau^*\rightarrow\tau$. This approximation is
# analogous to the similarity transformation often used to improve the
# diffusion approximation by moving a part ($g^M$) of the scattered light
# into the unscattered component. The new optical
# thickness and albedo are
#
# $$
# \tau^*=(1-ag^M)\tau
# \qquad\mbox{and}\qquad
# a^* = a {1-g^M\over1-ag^M}
# $$
#
# This is equivalent transforming the scattering coefficient as
# $\mu_s^* = \mu_s(1-g^M)$. The redistribution function can now be written
# as
#
# $$
# h^*(\nu_i,\nu_j) = \sum_{k=0}^{M-1} (2k+1)\chi_k^* P_k(\nu_i)P_k(\nu_j)
# $$
#
# For the special case of a Henyey-Greenstein phase function,
# $$
# \chi_k^*={g^k-g^M\over1-g^M}.
# $$
#
# The current implementation is somewhat inefficient, but it works.
# +
s = iadpython.Sample(g=0.9, quad_pts=4)
hp, hm = iadpython.hg_legendre(s)
print(hp)
print()
print(hm)
# -
# ## Redistribution Matrix using Elliptic Functions
#
# For Henyey-Greenstein scattering, the redistribution function can be expressed
# in terms of the complete elliptic integral of the second kind $E(x)$
# $$
# h(\nu_i,\nu_j) = {2\over\pi}{1-g^2\over (\alpha-\gamma)\sqrt{\alpha+\gamma} }
# \,E\left(\sqrt{2\gamma\over\alpha+\gamma}\,\right)
# $$
# where $g$ is the average cosine of the Henyey-Greenstein phase function and
# $$
# \alpha=1+g^2-2 g \nu_i \nu_j
# \qquad\mbox{and}\qquad
# \gamma=2 g \sqrt{1-\nu_i^2}\sqrt{1-\nu_j^2}
# $$
# The function $E(x)$ may be calculated using `scipy.special.ellipe()`.
#
# The drawback to this approach is that the $\delta-M$ method cannot be used and therefore it doesn't work well for highly anisotropic phase functions.
# +
s = iadpython.Sample(g=0.9, quad_pts=4)
hpe, hme = iadpython.hg_elliptic(s)
print(hp)
print()
print(hm)
# -
|
docs/redistribution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from dotenv import load_dotenv, find_dotenv
import sys
sys.path.append('../src')
from azure_cognitive import ComputerVision
# load the .env settings
load_dotenv(find_dotenv())
# +
img_ar = 'https://amp.businessinsider.com/images/5acfa583146e711c008b4720-750-375.jpg'
weapon_tags = ['weapon', 'gun', 'knife', 'axe']
def is_weapon(image):
vision = ComputerVision(os.getenv('AZURE_KEY'), os.getenv('AZURE_REGION_CODE'))
resp = vision.submit(img_ar)
return not set(weapon_tags).isdisjoint(resp['description']['tags'])
# -
is_weapon(img_ar)
type(img_ar) is str
|
notebooks/01_Azure_Vision.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="TBFXQGKYUc4X"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="1z4xy2gTUc4a"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="FE7KNzPPVrVV"
# # Image classification
# + [markdown] colab_type="text" id="KwQtSOz0VrVX"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/images/classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/images/classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/images/classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/images/classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="gN7G9GFmVrVY"
# This tutorial shows how to classify images of flowers. It creates an image classifier using a `keras.Sequential` model, and loads data using `preprocessing.image_dataset_from_directory`. You will gain practical experience with the following concepts:
#
# * Efficiently loading a dataset off disk.
# * Identifying overfitting and applying techniques to mitigate it, including data augmentation and Dropout.
#
# This tutorial follows a basic machine learning workflow:
#
# 1. Examine and understand data
# 2. Build an input pipeline
# 3. Build the model
# 4. Train the model
# 5. Test the model
# 6. Improve the model and repeat the process
# + colab={} colab_type="code" id="MQb2NgWuDgi5"
# !pip install tf-nightly
# + [markdown] colab_type="text" id="zF9uvbXNVrVY"
# ## Import TensorFlow and other libraries
# + colab={} colab_type="code" id="L1WtoaOHVrVh"
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# + [markdown] colab_type="text" id="UZZI6lNkVrVm"
# ## Download and explore the dataset
# + [markdown] colab_type="text" id="DPHx8-t-VrVo"
# This tutorial uses a dataset of about 3,700 photos of flowers. The dataset contains 5 sub-directories, one per class:
#
# ```
# flower_photo/
# daisy/
# dandelion/
# roses/
# sunflowers/
# tulips/
# ```
# + colab={} colab_type="code" id="57CcilYSG0zv"
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
# + [markdown] colab_type="text" id="VpmywIlsVrVx"
# After downloading, you should now have a copy of the dataset available. There are 3,670 total images:
# + colab={} colab_type="code" id="SbtTDYhOHZb6"
image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)
# + [markdown] colab_type="text" id="PVmwkOSdHZ5A"
# Here are some roses:
# + colab={} colab_type="code" id="N1loMlbYHeiJ"
roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
# + colab={} colab_type="code" id="RQbZBOTLHiUP"
PIL.Image.open(str(roses[1]))
# + [markdown] colab_type="text" id="DGEqiBbRHnyI"
# And some tulips:
# + colab={} colab_type="code" id="HyQkfPGdHilw"
tulips = list(data_dir.glob('tulips/*'))
PIL.Image.open(str(tulips[0]))
# + colab={} colab_type="code" id="wtlhWJPAHivf"
PIL.Image.open(str(tulips[1]))
# + [markdown] colab_type="text" id="gIjgz7_JIo_m"
# # Load using keras.preprocessing
#
# Let's load these images off disk using the helpful [image_dataset_from_directory](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory) utility. This will take you from a directory of images on disk to a `tf.data.Dataset` in just a couple lines of code. If you like, you can also write your own data loading code from scratch by visiting the [load images](https://www.tensorflow.org/tutorials/load_data/images) tutorial.
# + [markdown] colab_type="text" id="xyDNn9MbIzfT"
# ## Create a dataset
# + [markdown] colab_type="text" id="anqiK_AGI086"
# Define some parameters for the loader:
# + colab={} colab_type="code" id="H74l2DoDI2XD"
batch_size = 32
img_height = 180
img_width = 180
# + [markdown] colab_type="text" id="pFBhRrrEI49z"
# It's good practice to use a validation split when developing your model. We will use 80% of the images for training, and 20% for validation.
# + colab={} colab_type="code" id="fIR0kRZiI_AT"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# + colab={} colab_type="code" id="iscU3UoVJBXj"
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# + [markdown] colab_type="text" id="WLQULyAvJC3X"
# You can find the class names in the `class_names` attribute on these datasets. These correspond to the directory names in alphabetical order.
# + colab={} colab_type="code" id="ZHAxkHX5JD3k"
class_names = train_ds.class_names
print(class_names)
# + [markdown] colab_type="text" id="_uoVvxSLJW9m"
# ## Visualize the data
#
# Here are the first 9 images from the training dataset.
# + colab={} colab_type="code" id="wBmEA9c0JYes"
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
# + [markdown] colab_type="text" id="5M6BXtXFJdW0"
# You will train a model using these datasets by passing them to `model.fit` in a moment. If you like, you can also manually iterate over the dataset and retrieve batches of images:
# + colab={} colab_type="code" id="2-MfMoenJi8s"
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
# + [markdown] colab_type="text" id="Wj4FrKxxJkoW"
# The `image_batch` is a tensor of the shape `(32, 180, 180, 3)`. This is a batch of 32 images of shape `180x180x3` (the last dimension referes to color channels RGB). The `label_batch` is a tensor of the shape `(32,)`, these are corresponding labels to the 32 images.
#
# You can call `.numpy()` on the `image_batch` and `labels_batch` tensors to convert them to a `numpy.ndarray`.
#
# + [markdown] colab_type="text" id="4Dr0at41KcAU"
# ## Configure the dataset for performance
#
# Let's make sure to use buffered prefetching so we can yield data from disk without having I/O become blocking. These are two important methods you should use when loading data.
#
# `Dataset.cache()` keeps the images in memory after they're loaded off disk during the first epoch. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache.
#
# `Dataset.prefetch()` overlaps data preprocessing and model execution while training.
#
# Interested readers can learn more about both methods, as well as how to cache data to disk in the [data performance guide](https://www.tensorflow.org/guide/data_performance#prefetching).
# + colab={} colab_type="code" id="nOjJSm7DKoZA"
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# + [markdown] colab_type="text" id="8GUnmPF4JvEf"
# ## Standardize the data
# + [markdown] colab_type="text" id="e56VXHMWJxYT"
# The RGB channel values are in the `[0, 255]` range. This is not ideal for a neural network; in general you should seek to make your input values small. Here, we will standardize values to be in the `[0, 1]` by using a Rescaling layer.
# + colab={} colab_type="code" id="PEYxo2CTJvY9"
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
# + [markdown] colab_type="text" id="8aGpkwFaIw4i"
# Note: The Keras Preprocesing utilities and layers introduced in this section are currently experimental and may change.
# + [markdown] colab_type="text" id="Bl4RmanbJ4g0"
# There are two ways to use this layer. You can apply it to the dataset by calling map:
# + colab={} colab_type="code" id="X9o9ESaJJ502"
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixels values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image))
# + [markdown] colab_type="text" id="XWEOmRSBJ9J8"
# Or, you can include the layer inside your model definition, which can simplify deployment. We will use the second approach here.
# + [markdown] colab_type="text" id="XsRk1xCwKZR4"
# Note: we previously resized images using the `image_size` argument of `image_dataset_from_directory`. If you want to include the resizing logic in your model as well, you can use the [Resizing](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Resizing) layer.
# + [markdown] colab_type="text" id="WcUTyDOPKucd"
# # Create the model
#
# The model consists of three convolution blocks with a max pool layer in each of them. There's a fully connected layer with 128 units on top of it that is activated by a `relu` activation function. This model has not been tuned for high accuracy, the goal of this tutorial is to show a standard approach.
# + colab={} colab_type="code" id="QR6argA1K074"
num_classes = 5
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# + [markdown] colab_type="text" id="EaKFzz72Lqpg"
# ## Compile the model
#
# For this tutorial, choose the `optimizers.Adam` optimizer and `losses.SparseCategoricalCrossentropy` loss function. To view training and validation accuracy for each training epoch, pass the `metrics` argument.
# + colab={} colab_type="code" id="jloGNS1MLx3A"
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + [markdown] colab_type="text" id="aMJ4DnuJL55A"
# ## Model summary
#
# View all the layers of the network using the model's `summary` method:
# + colab={} colab_type="code" id="llLYH-BXL7Xe"
model.summary()
# + [markdown] colab_type="text" id="NiYHcbvaL9H-"
# ## Train the model
# + colab={} colab_type="code" id="5fWToCqYMErH"
epochs=10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# + [markdown] colab_type="text" id="SyFKdQpXMJT4"
# ## Visualize training results
# + [markdown] colab_type="text" id="dFvOvmAmMK9w"
# Create plots of loss and accuracy on the training and validation sets.
# + colab={} colab_type="code" id="jWnopEChMMCn"
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# + [markdown] colab_type="text" id="hO_jT7HwMrEn"
# As you can see from the plots, training accuracy and validation accuracy are off by large margin and the model has achieved only around 60% accuracy on the validation set.
#
# Let's look at what went wrong and try to increase the overall performance of the model.
# + [markdown] colab_type="text" id="hqtyGodAMvNV"
# ## Overfitting
# + [markdown] colab_type="text" id="ixsz9XFfMxcu"
# In the plots above, the training accuracy is increasing linearly over time, whereas validation accuracy stalls around 60% in the training process. Also, the difference in accuracy between training and validation accuracy is noticeable—a sign of [overfitting](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit).
#
# When there are a small number of training examples, the model sometimes learns from noises or unwanted details from training examples—to an extent that it negatively impacts the performance of the model on new examples. This phenomenon is known as overfitting. It means that the model will have a difficult time generalizing on a new dataset.
#
# There are multiple ways to fight overfitting in the training process. In this tutorial, you'll use *data augmentation* and add *Dropout* to your model.
# + [markdown] colab_type="text" id="BDMfYqwmM1C-"
# ## Data augmentation
# + [markdown] colab_type="text" id="GxYwix81M2YO"
# Overfitting generally occurs when there are a small number of training examples. [Data augmentation](https://www.tensorflow.org/tutorials/images/data_augmentation) takes the approach of generating additional training data from your existing examples by augmenting then using random transformations that yield believable-looking images. This helps expose the model to more aspects of the data and generalize better.
#
# We will implement data augmentation using experimental [Keras Preprocessing Layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/?version=nightly). These can be included inside your model like other layers, and run on the GPU.
# + colab={} colab_type="code" id="9J80BAbIMs21"
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
# + [markdown] colab_type="text" id="PN4k1dK3S6eV"
# Let's visualize what a few augmented examples look like by applying data augmentation to the same image several times:
# + colab={} colab_type="code" id="7Z90k539S838"
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
# + [markdown] colab_type="text" id="tsjXCBLYYNs5"
# We will use data augmentation to train a model in a moment.
# + [markdown] colab_type="text" id="ZeD3bXepYKXs"
# ## Dropout
#
# Another technique to reduce overfitting is to introduce [Dropout](https://developers.google.com/machine-learning/glossary#dropout_regularization) to the network, a form of *regularization*.
#
# When you apply Dropout to a layer it randomly drops out (by setting the activation to zero) a number of output units from the layer during the training process. Dropout takes a fractional number as its input value, in the form such as 0.1, 0.2, 0.4, etc. This means dropping out 10%, 20% or 40% of the output units randomly from the applied layer.
#
# Let's create a new neural network using `layers.Dropout`, then train it using augmented images.
# + colab={} colab_type="code" id="2Zeg8zsqXCsm"
model = Sequential([
data_augmentation,
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# + [markdown] colab_type="text" id="L4nEcuqgZLbi"
# ## Compile and train the model
# + colab={} colab_type="code" id="EvyAINs9ZOmJ"
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + colab={} colab_type="code" id="wWLkKoKjZSoC"
model.summary()
# + colab={} colab_type="code" id="LWS-vvNaZDag"
epochs = 15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# + [markdown] colab_type="text" id="Lkdl8VsBbZOu"
# ## Visualize training results
#
# After applying data augmentation and Dropout, there is less overfitting than before, and training and validation accuracy are closer aligned.
# + colab={} colab_type="code" id="dduoLfKsZVIA"
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# + [markdown] colab_type="text" id="dtv5VbaVb-3W"
# ## Predict on new data
# + [markdown] colab_type="text" id="10buWpJbcCQz"
# Finally, let's use our model to classify an image that wasn't included in the training or validation sets.
# + [markdown] colab_type="text" id="NKgMZ4bDcHf7"
# Note: Data augmentation and Dropout layers are inactive at inference time.
# + colab={} colab_type="code" id="dC40sRITBSsQ"
sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
img = keras.preprocessing.image.load_img(
sunflower_path, target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
|
site/en/tutorials/images/classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
# %matplotlib inline
# Load the CSV dataset as pandas dataframe
df = pd.read_csv('./data/survey_results_public.csv')
# View head of elements at pandas df
df.head()
# + pycharm={"is_executing": false, "name": "#%%\n"}
def clean_data(df):
"""
INPUT
df - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
Perform to obtain the correct X and y objects
This function cleans df using the following steps to produce X and y:
1. Drop all the rows with no salaries
2. Create X as all the columns that are not the Salary column
3. Create y as the Salary column
4. Drop the Salary, Respondent, and the ExpectedSalary columns from X
5. For each numeric variable in X, fill the column with the mean value of the column.
6. Create dummy columns for all the categorical variables in X, drop the original columns
"""
# Drop rows with missing salary values
df = df.dropna(subset=['Salary'], axis=0)
y = df['Salary']
# Drop respondent and expected salary colomns
df = df.drop(['Respondent', 'ExpectedSalary','Salary'], axis=1)
# Fill numeric values with the mean
num_vars = df.select_dtypes(include=['float','int']).columns
for col in num_vars:
df[col].fillna((df[col].mean()), inplace=True)
# Dummy the categorical variables
cat_vars = df.select_dtypes(include=['object']).copy().columns
for var in cat_vars:
# For each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
X = df
return X, y
# + pycharm={"is_executing": false, "name": "#%%\n"}
#Use the function to create X and y
X, y = clean_data(df)
# + pycharm={"name": "#%%\n"}
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
"""
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
"""
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
# reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
# split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
# fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
# append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
# reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
# split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
# fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
# + pycharm={"name": "#%%\n"}
# Cutoffs here pertains to the number of missing values allowed in the used columns.
# Therefore, lower values for the cutoff provides more predictors in the model.
cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 25]
r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test = find_optimal_lm_mod(X, y, cutoffs)
# + pycharm={"name": "#%%\n"}
def coef_weights(coefficients, X_train):
"""
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
"""
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = lm_model.coef_
coefs_df['abs_coefs'] = np.abs(lm_model.coef_)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
# + pycharm={"name": "#%%\n"}
# Use the function
coef_df = coef_weights(lm_model.coef_, X_train)
# A quick look at the top results
coef_df.head(20)
# -
|
Stackoverflow_Survay_Results_Analytics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import matplotlib.pyplot as plt
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from IPython.display import clear_output
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from keras.layers import Conv2D, MaxPooling2D,Flatten, Dropout
from keras.models import model_from_json
exec(open("utils.py").read())
exec(open("cnn-model.py").read())
exec(open("image_aug.py").read())
FTRAIN = 'training.csv'
FTEST = 'test.csv'
FIdLookup = 'IdLookupTable.csv'
# -
## load data
X, y = load2d()
print("X.shape == {} y.shape == {}".format(X.shape, y.shape))
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape)
# +
from keras.preprocessing.image import ImageDataGenerator
generator = ImageDataGenerator()
modifier = FlipPic()
fig = plt.figure(figsize=(7,7))
count = 1
for batch in generator.flow(X_train[:2],y_train[:2]):
X_batch, y_batch = modifier.fit(*batch)
ax = fig.add_subplot(3,3, count,xticks=[],yticks=[])
plot_sample(X_batch[0],y_batch[0],ax)
count += 1
if count == 10:
break
plt.show()
# -
# %%time
model3 = SimpleCNN()
hist3 = fit_model(model3,
modifier,
train=(X_train,y_train),
validation=(X_val,y_val),
batch_size=32,
epochs=2000,
print_every=100)
clear_output()
print("done...", end=" ")
plot_loss(hist3,"model+fliping",plt)
plt.legend()
plt.grid()
plt.yscale("log")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
save_model(model3,"model-fliping")
# +
shiftFlipPic = ShiftFlipPic(prop=0.1)
fig = plt.figure(figsize=(7,7))
count = 1
for batch in generator.flow(X_train[:2],y_train[:2]):
X_batch, y_batch = shiftFlipPic.fit(*batch)
ax = fig.add_subplot(3,3, count,xticks=[],yticks=[])
plot_sample(X_batch[0],y_batch[0],ax)
count += 1
if count == 10:
break
plt.show()
# -
del model3
# %%time
model4 = SimpleCNN()
hist4 = fit_model(model4,
shiftFlipPic,
train=(X_train,y_train),
validation=(X_val,y_val),
batch_size=32,
epochs=2000,
print_every=50,
patience=100)
clear_output()
print("done...", end=" ")
# +
plt.figure(figsize=(8,8))
plot_loss(hist3,"model 3",plt)
plot_loss(hist4,"model 4",plt)
plt.legend()
plt.grid()
plt.yscale("log")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
# -
save_model(model4,"model-fliping-shift")
|
02.Facial-keypoint-image-augmentation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import Image
# # CNTK 599A: Sequence to Sequence Networks with Text Data
#
#
# ## Introduction and Background
#
# This hands-on tutorial will take you through both the basics of sequence-to-sequence networks, and how to implement them in the Microsoft Cognitive Toolkit. In particular, we will implement a sequence-to-sequence model to perform grapheme to phoneme translation. We will start with some basic theory and then explain the data in more detail, and how you can download it.
#
# <NAME> has a [nice visualization](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) of the five paradigms of neural network architectures:
# Figure 1
Image(url="http://cntk.ai/jup/paradigms.jpg", width=750)
# In this tutorial, we are going to be talking about the fourth paradigm: many-to-many, also known as sequence-to-sequence networks. The input is a sequence with a dynamic length, and the output is also a sequence with some dynamic length. It is the logical extension of the many-to-one paradigm in that previously we were predicting some category (which could easily be one of `V` words where `V` is an entire vocabulary) and now we want to predict a whole sequence of those categories.
#
# The applications of sequence-to-sequence networks are nearly limitless. It is a natural fit for machine translation (e.g. English input sequences, French output sequences); automatic text summarization (e.g. full document input sequence, summary output sequence); word to pronunciation models (e.g. character [grapheme] input sequence, pronunciation [phoneme] output sequence); and even parse tree generation (e.g. regular text input, flat parse tree output).
#
# ## Basic theory
#
# A sequence-to-sequence model consists of two main pieces: (1) an encoder; and (2) a decoder. Both the encoder and the decoder are recurrent neural network (RNN) layers that can be implemented using a vanilla RNN, an LSTM, or GRU cells (here we will use LSTM). In the basic sequence-to-sequence model, the encoder processes the input sequence into a fixed representation that is fed into the decoder as a context. The decoder then uses some mechanism (discussed below) to decode the processed information into an output sequence. The decoder is a language model that is augmented with some "strong context" by the encoder, and so each symbol that it generates is fed back into the decoder for additional context (like a traditional LM). For an English to German translation task, the most basic setup might look something like this:
# Figure 2
Image(url="http://cntk.ai/jup/s2s.png", width=700)
# The basic sequence-to-sequence network passes the information from the encoder to the decoder by initializing the decoder RNN with the final hidden state of the encoder as its initial hidden state. The input is then a "sequence start" tag (`<s>` in the diagram above) which primes the decoder to start generating an output sequence. Then, whatever word (or note or image, etc.) it generates at that step is fed in as the input for the next step. The decoder keeps generating outputs until it hits the special "end sequence" tag (`</s>` above).
#
# A more complex and powerful version of the basic sequence-to-sequence network uses an attention model. While the above setup works well, it can start to break down when the input sequences get long. At each step, the hidden state `h` is getting updated with the most recent information, and therefore `h` might be getting "diluted" in information as it processes each token. Further, even with a relatively short sequence, the last token will always get the last say and therefore the thought vector will be somewhat biased/weighted towards that last word. To deal with this problem, we use an "attention" mechanism that allows the decoder to look not only at all of the hidden states from the input, but it also learns which hidden states, for each step in decoding, to put the most weight on. We will discuss an attention implementation in a later version of this tutorial.
# ## Problem: Grapheme-to-Phoneme Conversion
#
# The [grapheme](https://en.wikipedia.org/wiki/Grapheme) to [phoneme](https://en.wikipedia.org/wiki/Phoneme) problem is a translation task that takes the letters of a word as the input sequence (the graphemes are the smallest units of a writing system) and outputs the corresponding phonemes; that is, the units of sound that make up a language. In other words, the system aims to generate an unambigious representation of how to pronounce a given input word.
#
# ### Example
#
# The graphemes or the letters are translated into corresponding phonemes:
#
# > **Grapheme** : **|** T **|** A **|** N **|** G **|** E **|** R **|**
# **Phonemes** : **|** ~T **|** ~AE **|** ~NG **|** ~ER **|** null **|** null **|**
#
#
#
# ## Task and Model Structure
#
# As discussed above, the task we are interested in solving is creating a model that takes some sequence as an input, and generates an output sequence based on the contents of the input. The model's job is to learn the mapping from the input sequence to the output sequence that it will generate. The job of the encoder is to come up with a good representation of the input that the decoder can use to generate a good output. For both the encoder and the decoder, the LSTM does a good job at this.
#
# We will use the LSTM implementation from the CNTK Blocks library. This implements the "smarts" of the LSTM and we can more or less think of it as a black box. What is important to understand, however, is that there are two pieces to think of when implementing an RNN: the recurrence, which is the unrolled network over a sequence, and the block, which is the piece of the network run for each element of the sequence. We only need to implement the recurrence.
#
# It helps to think of the recurrence as a function that keeps calling `step(x)` on the block (in our case, LSTM). At a high level, it looks like this:
#
# ```
# class LSTM {
# float hidden_state
#
# init(initial_value):
# hidden_state = initial_value
#
# step(x):
# hidden_state = LSTM_function(x, hidden_state)
# return hidden_state
# }
# ```
#
# So, each call to the `step(x)` function takes some input `x`, modifies the internal `hidden_state`, and returns it. Therefore, with every input `x`, the value of the `hidden_state` evolves. Below we will import some required functionality, and then implement the recurrence that makes use of this mechanism.
# ## Importing CNTK and other useful libraries
#
# CNTK is a Python module that contains several submodules like `io`, `learner`, `graph`, etc. We make extensive use of numpy as well.
# +
from __future__ import print_function
import numpy as np
import os
import cntk as C
# -
# In the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU).
# Select the right target device when this notebook is being tested:
if 'TEST_DEVICE' in os.environ:
if os.environ['TEST_DEVICE'] == 'cpu':
C.device.try_set_default_device(C.device.cpu())
else:
C.device.try_set_default_device(C.device.gpu(0))
# ## Downloading the data
#
# In this tutorial we will use a lightly pre-processed version of the CMUDict (version 0.7b) dataset from http://www.speech.cs.cmu.edu/cgi-bin/cmudict. The CMUDict data is the Carnegie Mellon University Pronouncing Dictionary is an open-source machine-readable pronunciation dictionary for North American English. The data is in the CNTKTextFormatReader format. Here is an example sequence pair from the data, where the input sequence (S0) is in the left column, and the output sequence (S1) is on the right:
#
# ```
# 0 |S0 3:1 |# <s> |S1 3:1 |# <s>
# 0 |S0 4:1 |# A |S1 32:1 |# ~AH
# 0 |S0 5:1 |# B |S1 36:1 |# ~B
# 0 |S0 4:1 |# A |S1 31:1 |# ~AE
# 0 |S0 7:1 |# D |S1 38:1 |# ~D
# 0 |S0 12:1 |# I |S1 47:1 |# ~IY
# 0 |S0 1:1 |# </s> |S1 1:1 |# </s>
# ```
#
# The code below will download the required files (training, the single sequence above for validation, and a small vocab file) and put them in a local folder (the training file is ~34 MB, testing is ~4MB, and the validation file and vocab file are both less than 1KB).
# +
import requests
def download(url, filename):
""" utility function to download a file """
response = requests.get(url, stream=True)
with open(filename, "wb") as handle:
for data in response.iter_content():
handle.write(data)
data_dir = os.path.join('..', 'Examples', 'SequenceToSequence', 'CMUDict', 'Data')
# If above directory does not exist, just use current.
if not os.path.exists(data_dir):
data_dir = '.'
valid_file = os.path.join(data_dir, 'tiny.ctf')
train_file = os.path.join(data_dir, 'cmudict-0.7b.train-dev-20-21.ctf')
vocab_file = os.path.join(data_dir, 'cmudict-0.7b.mapping')
files = [valid_file, train_file, vocab_file]
for file in files:
if os.path.exists(file):
print("Reusing locally cached: ", file)
else:
url = "https://github.com/Microsoft/CNTK/blob/release/2.5/Examples/SequenceToSequence/CMUDict/Data/%s?raw=true"%file
print("Starting download:", file)
download(url, file)
print("Download completed")
# -
# ### Select the notebook run mode
#
# There are two run modes:
# - *Fast mode*: `isFast` is set to `True`. This is the default mode for the notebooks, which means we train for fewer iterations or train / test on limited data. This ensures functional correctness of the notebook though the models produced are far from what a completed training would produce.
#
# - *Slow mode*: We recommend the user to set this flag to `False` once the user has gained familiarity with the notebook content and wants to gain insight from running the notebooks for a longer period with different parameters for training.
isFast = True
# ## Reader
#
# To efficiently collect our data, randomize it for training, and pass it to the network, we use the CNTKTextFormat reader. We will create a small function that will be called when training (or testing) that defines the names of the streams in our data, and how they are referred to in the raw training data.
# +
# Helper function to load the model vocabulary file
def get_vocab(path):
# get the vocab for printing output sequences in plaintext
vocab = [w.strip() for w in open(path).readlines()]
i2w = { i:ch for i,ch in enumerate(vocab) }
return (vocab, i2w)
# Read vocabulary data and generate their corresponding indices
vocab, i2w = get_vocab(vocab_file)
input_vocab_size = len(vocab)
label_vocab_size = len(vocab)
# -
# Print vocab and the correspoding mapping to the phonemes
print("Vocabulary size is", len(vocab))
print("First 15 letters are:")
print(vocab[:15])
print()
print("Print dictionary with the vocabulary mapping:")
print(i2w)
# We will use the above to create a reader for our training data. Let's create it now:
# +
def create_reader(path, randomize, size=C.io.INFINITELY_REPEAT):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features = C.io.StreamDef(field='S0', shape=input_vocab_size, is_sparse=True),
labels = C.io.StreamDef(field='S1', shape=label_vocab_size, is_sparse=True)
)), randomize=randomize, max_samples = size)
# Train data reader
train_reader = create_reader(train_file, True)
# Validation/Test data reader
valid_reader = create_reader(valid_file, False)
# -
# ### Now let's set our model hyperparameters...
# Our input vocabulary size is 69, and those ones represent the label as well. Additionally we have 1 hidden layer with 128 nodes.
# +
model_dir = "." # we downloaded our data to the local directory above # TODO check me
# model dimensions
input_vocab_dim = input_vocab_size
label_vocab_dim = label_vocab_size
hidden_dim = 128
num_layers = 1
# -
# ## Step 1: setup the input to the network
#
# ### Dynamic axes in CNTK (Key concept)
#
# One of the important concepts in understanding CNTK is the idea of two types of axes:
# - **static axes**, which are the traditional axes of a variable's shape, and
# - **dynamic axes**, which have dimensions that are unknown until the variable is bound to real data at computation time.
#
# The dynamic axes are particularly important in the world of recurrent neural networks. Instead of having to decide a maximum sequence length ahead of time, padding your sequences to that size, and wasting computation, CNTK's dynamic axes allow for variable sequence lengths that are automatically packed in minibatches to be as efficient as possible.
#
# When setting up sequences, there are *two dynamic axes* that are important to consider. The first is the *batch axis*, which is the axis along which multiple sequences are batched. The second is the dynamic axis particular to that sequence. The latter is specific to a particular input because of variable sequence lengths in your data. For example, in sequence to sequence networks, we have two sequences: the **input sequence**, and the **output (or 'label') sequence**. One of the things that makes this type of network so powerful is that the length of the input sequence and the output sequence do not have to correspond to each other. Therefore, both the input sequence and the output sequence require their own unique dynamic axis.
#
# When defining the input to a network, we set up the required dynamic axes and the shape of the input variables. Below, we define the shape (vocabulary size) of the inputs, create their dynamic axes, and finally create input variables that represent input nodes in our network.
# +
# Source and target inputs to the model
input_seq_axis = C.Axis('inputAxis')
label_seq_axis = C.Axis('labelAxis')
raw_input = C.sequence.input_variable(shape=(input_vocab_dim), sequence_axis=input_seq_axis, name='raw_input')
raw_labels = C.sequence.input_variable(shape=(label_vocab_dim), sequence_axis=label_seq_axis, name='raw_labels')
# -
# ### Questions
#
# 1. Why do the shapes of the input variables correspond to the size of our dictionaries in sequence to sequence networks?
# ## Step 2: define the network
#
# As discussed before, the sequence-to-sequence network is, at its most basic, an RNN encoder followed by an RNN decoder, and a dense output layer. We could do this in a few lines with the layers library, but let's go through things in a little more detail without adding too much complexity. The first step is to perform some manipulations on the input data; let's look at the code below and then discuss what we're doing.
# +
# Instantiate the sequence to sequence translation model
input_sequence = raw_input
# Drop the sentence start token from the label, for decoder training
label_sequence = C.sequence.slice(raw_labels,
1, 0, name='label_sequence') # <s> A B C </s> --> A B C </s>
label_sentence_start = C.sequence.first(raw_labels) # <s>
is_first_label = C.sequence.is_first(label_sequence) # 1 0 0 0 ...
label_sentence_start_scattered = C.sequence.scatter( # <s> 0 0 0 ... (up to the length of label_sequence)
label_sentence_start, is_first_label)
# -
# We have two input variables, `raw_input` and `raw_labels`. Typically, the labels would not have to be part of the network definition because they would only be used in a criterion node when we compare the network's output with the ground truth. However, in sequence-to-sequence networks, the labels themselves form part of the input to the network during training as they are fed as the input into the decoder.
#
# To make use of these input variables, we will pass them through computation nodes. We first set `input_sequence` to `raw_input` as a convenience step. We then perform several modifications to `label_sequence` so that it will work with our network. For now you'll just have to trust that we will make good use of this stuff later.
#
# First, we slice the first element off of `label_sequence` so that it's missing the sentence-start token. This is because the decoder will always first be primed with that token, both during training and evaluation. When the ground truth isn't fed into the decoder, we will still feed in a sentence-start token, so we want to consistently view the input to the decoder as a sequence that starts with an actual value.
#
# Then, we get `label_sequence_start` by getting the `first` element from the sequence `raw_labels`. This will be used to compose a sequence that is the first input to the decoder regardless of whether we're training or decoding. Finally, the last two statements set up an actual sequence, with the correct dynamic axis, to be fed into the decoder. The function `sequence.scatter` takes the contents of `label_sentence_start` (which is `<s>`) and turns it into a sequence with the first element containing the sequence start symbol and the rest of the elements containing 0's.
# ### Let's create the LSTM recurrence
def LSTM_layer(input,
output_dim,
recurrence_hook_h=C.sequence.past_value,
recurrence_hook_c=C.sequence.past_value):
# we first create placeholders for the hidden state and cell state which we don't have yet
dh = C.placeholder(shape=(output_dim), dynamic_axes=input.dynamic_axes)
dc = C.placeholder(shape=(output_dim), dynamic_axes=input.dynamic_axes)
# we now create an LSTM_cell function and call it with the input and placeholders
LSTM_cell = C.layers.LSTM(output_dim)
f_x_h_c = LSTM_cell(dh, dc, input)
h_c = f_x_h_c.outputs
# we setup the recurrence by specifying the type of recurrence (by default it's `past_value` -- the previous value)
h = recurrence_hook_h(h_c[0])
c = recurrence_hook_c(h_c[1])
replacements = { dh: h.output, dc: c.output }
f_x_h_c.replace_placeholders(replacements)
h = f_x_h_c.outputs[0]
c = f_x_h_c.outputs[1]
# and finally we return the hidden state and cell state as functions (by using `combine`)
return C.combine([h]), C.combine([c])
# ### Exercise 1: Create the encoder
#
# We will use the LSTM recurrence that we defined just above. Remember that its function signature is:
#
# `def LSTM_layer(input, output_dim, recurrence_hook_h=sequence.past_value, recurrence_hook_c=sequence.past_value):`
#
# and it returns a tuple `(hidden_state, hidden_cell)`. We will complete the following four exercises below. If possible, try them out before looking at the answers.
#
# 1. Create the encoder (set the `output_dim` and `cell_dim` to `hidden_dim` which we defined earlier).
# 2. Set `num_layers` to something higher than 1 and create a stack of LSTMs to represent the encoder.
# 3. Get the output of the encoder and put it into the right form to be passed into the decoder [hard]
# 4. Reverse the order of the `input_sequence` (this has been shown to help especially in machine translation)
# +
# 1.
# Create the encoder (set the output_dim to hidden_dim which we defined earlier).
(encoder_output_h, encoder_output_c) = LSTM_layer(input_sequence, hidden_dim)
# 2.
# Set num_layers to something higher than 1 and create a stack of LSTMs to represent the encoder.
num_layers = 2
output_h = C.alias(input_sequence) # get a copy of the input_sequence
for i in range(0, num_layers):
(output_h, output_c) = LSTM_layer(output_h.output, hidden_dim)
# 3.
# Get the output of the encoder and put it into the right form to be passed into the decoder [hard]
thought_vector_h = C.sequence.first(output_h)
thought_vector_c = C.sequence.first(output_c)
thought_vector_broadcast_h = C.sequence.broadcast_as(thought_vector_h, label_sequence)
thought_vector_broadcast_c = C.sequence.broadcast_as(thought_vector_c, label_sequence)
# 4.
# Reverse the order of the input_sequence (this has been shown to help especially in machine translation)
(encoder_output_h, encoder_output_c) = LSTM_layer(input_sequence,
hidden_dim,
C.sequence.future_value,
C.sequence.future_value)
# -
# ### Exercise 2: Create the decoder
#
# In our basic version of the sequence-to-sequence network, the decoder generates an output sequence given the input sequence by setting the initial state of the decoder to the final hidden state of the encoder. The hidden state is represented by a tuple `(encoder_h, encoder_c)` where `h` represents the output hidden state and `c` represents the value of the LSTM cell.
#
# Besides setting the initial state of the decoder, we also need to give the decoder LSTM some input. The first element will always be the special sequence start tag `<s>`. After that, there are two ways that we want to wire up the decoder's input: one during training, and the other during evaluation (i.e. generating sequences on the trained network).
#
# For training, the input to the decoder is the output sequence from the training data, also known as the label(s) for the input sequence. During evaluation, we will instead redirect the output from the network back into the decoder as its history. Let's first set up the input for training...
decoder_input = C.element_select(is_first_label, label_sentence_start_scattered, C.sequence.past_value(label_sequence))
# Above, we use the function `element_select` which will return one of two options given the condition `is_first_label`. Remember that we're working with sequences so when the decoder LSTM is run its input will be unrolled along with the network. The above allows us to have a dynamic input that will return a specific element given what time step we're currently processing.
#
# Therefore, the `decoder_input` will be `label_sentence_start_scattered` (which is simply `<s>`) when we are at the first time step, and otherwise it will return the `past_value` (i.e. the previous element given what time step we're currently at) of `label_sequence`.
#
# Next, we need to setup our actual decoder. Before, for the encoder, we did the following:
(output_h, output_c) = LSTM_layer(input_sequence, hidden_dim,
recurrence_hook_h=C.sequence.past_value,
recurrence_hook_c=C.sequence.past_value)
# To be able to set the first hidden state of the decoder to be equal to the final hidden state of the encoder, we can leverage the parameters `recurrence_hookH` and `recurrent_hookC`. The default `past_value` is a function that returns, for time `t`, the element in the sequence at time `t-1`. See if you can figure out how to set that up.
#
# 1. Create the recurrence hooks for the decoder LSTM.
# * Hint: you'll have to create a `lambda operand:` and you will make use of the `is_first_label` mask we used earlier and the `thought_vector_broadcast_h` and `thought_vector_broadcast_c` representations of the output of the encoder.
#
# 2. With your recurrence hooks, create the decoder.
# * Hint: again we'll use the `LSTMP_component_with_self_stabilization()` function and again use `hidden_dim` for the `output_dim` and `cell_dim`.
#
# 3. Create a decoder with multiple layers. Note that you will have to use different recurrence hooks for the lower layers that feed back into the stack of layers.
# +
# 1.
# Create the recurrence hooks for the decoder LSTM.
recurrence_hook_h = lambda operand: C.element_select(is_first_label,
thought_vector_broadcast_h,
C.sequence.past_value(operand))
recurrence_hook_c = lambda operand: C.element_select(is_first_label,
thought_vector_broadcast_c,
C.sequence.past_value(operand))
# 2.
# With your recurrence hooks, create the decoder.
(decoder_output_h, decoder_output_c) = LSTM_layer(decoder_input, hidden_dim, recurrence_hook_h, recurrence_hook_c)
# 3.
# Create a decoder with multiple layers.
# Note that you will have to use different recurrence hooks for the lower layers
num_layers = 3
decoder_output_h = C.alias(decoder_input)
for i in range(0, num_layers):
if (i > 0):
recurrence_hook_h = C.sequence.past_value
recurrence_hook_c = C.sequence.past_value
else:
recurrence_hook_h = lambda operand: C.element_select(
is_first_label, thought_vector_broadcast_h, C.sequence.past_value(operand))
recurrence_hook_c = lambda operand: C.element_select(
is_first_label, thought_vector_broadcast_c, C.sequence.past_value(operand))
(decoder_output_h, decoder_output_c) = LSTM_layer(decoder_output_h.output, hidden_dim,
recurrence_hook_h, recurrence_hook_c)
# -
# ### Exercise 3: Fully connected layer (network output)
#
# Now we're almost at the end of defining the network. All we need to do is take the output of the decoder, and run it through a linear layer. Ultimately it will be put into a `softmax` to get a probability distribution over the possible output words. However, we will include that as part of our criterion nodes (below).
#
# 1. Add the linear layer (a weight matrix, a bias parameter, a times, and a plus) to get the final output of the network
# +
# 1.
# Add the linear layer
W = C.parameter(shape=(decoder_output_h.shape[0], label_vocab_dim), init=C.glorot_uniform())
B = C.parameter(shape=(label_vocab_dim), init=0)
z = C.plus(B, C.times(decoder_output_h, W))
# -
# ## Putting the model together
#
# With the above we have defined some of the network and asked you to define parts of it as exercises. Here let's put the whole thing into a function called `create_model()`. Remember, all this does is create a skeleton of the network that defines how data will flow through it. No data is running through it yet.
def create_model():
# Source and target inputs to the model
input_seq_axis = C.Axis('inputAxis')
label_seq_axis = C.Axis('labelAxis')
raw_input = C.sequence.input_variable(
shape=(input_vocab_dim), sequence_axis=input_seq_axis, name='raw_input')
raw_labels = C.sequence.input_variable(
shape=(label_vocab_dim), sequence_axis=label_seq_axis, name='raw_labels')
# Instantiate the sequence to sequence translation model
input_sequence = raw_input
# Drop the sentence start token from the label, for decoder training
label_sequence = C.sequence.slice(raw_labels, 1, 0,
name='label_sequence') # <s> A B C </s> --> A B C </s>
label_sentence_start = C.sequence.first(raw_labels) # <s>
# Setup primer for decoder
is_first_label = C.sequence.is_first(label_sequence) # 1 0 0 0 ...
label_sentence_start_scattered = C.sequence.scatter(
label_sentence_start, is_first_label)
# Encoder
stabilize = C.layers.Stabilizer()
encoder_output_h = stabilize(input_sequence)
for i in range(0, num_layers):
(encoder_output_h, encoder_output_c) = LSTM_layer(
encoder_output_h.output, hidden_dim, C.sequence.future_value, C.sequence.future_value)
# Prepare encoder output to be used in decoder
thought_vector_h = C.sequence.first(encoder_output_h)
thought_vector_c = C.sequence.first(encoder_output_c)
thought_vector_broadcast_h = C.sequence.broadcast_as(
thought_vector_h, label_sequence)
thought_vector_broadcast_c = C.sequence.broadcast_as(
thought_vector_c, label_sequence)
# Decoder
decoder_history_hook = C.alias(label_sequence, name='decoder_history_hook') # copy label_sequence
decoder_input = C.element_select(is_first_label, label_sentence_start_scattered, C.sequence.past_value(
decoder_history_hook))
decoder_output_h = stabilize(decoder_input)
for i in range(0, num_layers):
if (i > 0):
recurrence_hook_h = C.sequence.past_value
recurrence_hook_c = C.sequence.past_value
else:
recurrence_hook_h = lambda operand: C.element_select(
is_first_label, thought_vector_broadcast_h, C.sequence.past_value(operand))
recurrence_hook_c = lambda operand: C.element_select(
is_first_label, thought_vector_broadcast_c, C.sequence.past_value(operand))
(decoder_output_h, decoder_output_c) = LSTM_layer(
decoder_output_h.output, hidden_dim, recurrence_hook_h, recurrence_hook_c)
# Linear output layer
W = C.parameter(shape=(decoder_output_h.shape[0], label_vocab_dim), init=C.glorot_uniform())
B = C.parameter(shape=(label_vocab_dim), init=0)
z = C.plus(B, C.times(stabilize(decoder_output_h), W))
return z
# ## Training
#
# Now that we've created the model, we are ready to train the network and learn its parameters. For sequence-to-sequence networks, the loss we use is cross-entropy. Note that we have to find the `label_sequences` node from the model because it was defined in our network and we want to compare the model's predictions specifically to the outputs of that node.
# +
model = create_model()
label_sequence = model.find_by_name('label_sequence')
# Criterion nodes
ce = C.cross_entropy_with_softmax(model, label_sequence)
errs = C.classification_error(model, label_sequence)
# let's show the required arguments for this model
print([x.name for x in model.arguments])
# -
# Next, we'll setup a bunch of parameters to drive our learning, we'll create the learner, and finally create our trainer:
# training parameters
lr_per_sample = C.learning_parameter_schedule_per_sample(0.007)
minibatch_size = 72
momentum_schedule = C.momentum_schedule(0.9366416204111472, minibatch_size=minibatch_size)
clipping_threshold_per_sample = 2.3
gradient_clipping_with_truncation = True
learner = C.momentum_sgd(model.parameters,
lr_per_sample, momentum_schedule,
gradient_clipping_threshold_per_sample=clipping_threshold_per_sample,
gradient_clipping_with_truncation=gradient_clipping_with_truncation)
trainer = C.Trainer(model, (ce, errs), learner)
# And now we bind the features and labels from our `train_reader` to the inputs that we setup in our network definition. First however, we'll define a convenience function to help find an argument name when pointing the reader's features to an argument of our model.
# +
# helper function to find variables by name
def find_arg_by_name(name, expression):
vars = [i for i in expression.arguments if i.name == name]
assert len(vars) == 1
return vars[0]
train_bind = {
find_arg_by_name('raw_input' , model) : train_reader.streams.features,
find_arg_by_name('raw_labels', model) : train_reader.streams.labels
}
# -
# Finally, we define our training loop and start training the network!
# +
training_progress_output_freq = 100
max_num_minibatch = 100 if isFast else 1000
for i in range(max_num_minibatch):
# get next minibatch of training data
mb_train = train_reader.next_minibatch(minibatch_size, input_map=train_bind)
trainer.train_minibatch(mb_train)
# collect epoch-wide stats
if i % training_progress_output_freq == 0:
print("Minibatch: {0}, Train Loss: {1:.3f}, Train Evaluation Criterion: {2:2.3f}".format(i,
trainer.previous_minibatch_loss_average, trainer.previous_minibatch_evaluation_average))
# -
# ## Model evaluation: greedy decoding
#
# Once we have a trained model, we of course then want to make use of it to generate output sequences! In this case, we will use greedy decoding. What this means is that we will run an input sequence through our trained network, and when we generate the output sequence, we will do so one element at a time by taking the `hardmax()` of the output of our network. This is obviously not optimal in general. Given the context, some word may always be the most probable at the first step, but another first word may be preferred given what is output later on. Decoding the optimal sequence is intractable in general. But we can do better doing a beam search where we keep around some small number of hypotheses at each step. However, greedy decoding can work surprisingly well for sequence-to-sequence networks because so much of the context is kept around in the RNN.
#
# To do greedy decoding, we need to hook in the previous output of our network as the input to the decoder. During training we passed the `label_sequences` (ground truth) in. You'll notice in our `create_model()` function above the following lines:
decoder_history_hook = C.alias(label_sequence, name='decoder_history_hook') # copy label_sequence
decoder_input = C.element_select(is_first_label, label_sentence_start_scattered,
C.sequence.past_value(decoder_history_hook))
# This gives us a way to modify the `decoder_history_hook` after training to something else. We've already trained our network, but now we need a way to evaluate it without using a ground truth. We can do that like this:
# +
model = create_model()
# get some references to the new model
label_sequence = model.find_by_name('label_sequence')
decoder_history_hook = model.find_by_name('decoder_history_hook')
# and now replace the output of decoder_history_hook with the hardmax output of the network
def clone_and_hook():
# network output for decoder history
net_output = C.hardmax(model)
# make a clone of the graph where the ground truth is replaced by the network output
return model.clone(C.CloneMethod.share, {decoder_history_hook.output : net_output.output})
# get a new model that uses the past network output as input to the decoder
new_model = clone_and_hook()
# -
# The `new_model` now contains a version of the original network that shares parameters with it but that has a different input to the decoder. Namely, instead of feeding the ground truth labels into the decoder, it will feed in the history that the network has generated!
#
# Finally, let's see what it looks like if we train, and keep evaluating the network's output every `100` iterations by running a word's graphemes ('A B A D I') through our network. This way we can visualize the progress learning the best model... First we'll define a more complete `train()` action. It is largely the same as above but has some additional training parameters included; some additional smarts for printing out statistics as we go along; we now see progress over our data as epochs (one epoch is one complete pass over the training data); and we setup a reader for the single validation sequence we described above so that we can visually see our network's progress on that sequence as it learns.
# +
########################
# train action #
########################
def train(train_reader, valid_reader, vocab, i2w, model, max_epochs):
# do some hooks that we won't need in the future
label_sequence = model.find_by_name('label_sequence')
decoder_history_hook = model.find_by_name('decoder_history_hook')
# Criterion nodes
ce = C.cross_entropy_with_softmax(model, label_sequence)
errs = C.classification_error(model, label_sequence)
def clone_and_hook():
# network output for decoder history
net_output = C.hardmax(model)
# make a clone of the graph where the ground truth is replaced by the network output
return model.clone(C.CloneMethod.share, {decoder_history_hook.output : net_output.output})
# get a new model that uses the past network output as input to the decoder
new_model = clone_and_hook()
# Instantiate the trainer object to drive the model training
lr_per_sample = C.learning_rate_schedule(0.007, C.UnitType.sample)
minibatch_size = 72
momentum_time_constant = C.momentum_as_time_constant_schedule(1100)
clipping_threshold_per_sample = 2.3
gradient_clipping_with_truncation = True
learner = C.momentum_sgd(model.parameters,
lr_per_sample, momentum_time_constant,
gradient_clipping_threshold_per_sample=clipping_threshold_per_sample,
gradient_clipping_with_truncation=gradient_clipping_with_truncation)
trainer = C.Trainer(model, (ce, errs), learner)
# Get minibatches of sequences to train with and perform model training
i = 0
mbs = 0
# Set epoch size to a larger number for lower training error
epoch_size = 5000 if isFast else 908241
training_progress_output_freq = 100
# bind inputs to data from readers
train_bind = {
find_arg_by_name('raw_input' , model) : train_reader.streams.features,
find_arg_by_name('raw_labels', model) : train_reader.streams.labels
}
valid_bind = {
find_arg_by_name('raw_input' , new_model) : valid_reader.streams.features,
find_arg_by_name('raw_labels', new_model) : valid_reader.streams.labels
}
for epoch in range(max_epochs):
loss_numer = 0
metric_numer = 0
denom = 0
while i < (epoch+1) * epoch_size:
# get next minibatch of training data
mb_train = train_reader.next_minibatch(minibatch_size, input_map=train_bind)
trainer.train_minibatch(mb_train)
# collect epoch-wide stats
samples = trainer.previous_minibatch_sample_count
loss_numer += trainer.previous_minibatch_loss_average * samples
metric_numer += trainer.previous_minibatch_evaluation_average * samples
denom += samples
# every N MBs evaluate on a test sequence to visually show how we're doing; also print training stats
if mbs % training_progress_output_freq == 0:
print("Minibatch: {0}, Train Loss: {1:2.3f}, Train Evaluation Criterion: {2:2.3f}".format(mbs,
trainer.previous_minibatch_loss_average, trainer.previous_minibatch_evaluation_average))
mb_valid = valid_reader.next_minibatch(minibatch_size, input_map=valid_bind)
e = new_model.eval(mb_valid)
print_sequences(e, i2w)
i += mb_train[find_arg_by_name('raw_labels', model)].num_samples
mbs += 1
print("--- EPOCH %d DONE: loss = %f, errs = %f ---" % (epoch, loss_numer/denom, 100.0*(metric_numer/denom)))
return 100.0*(metric_numer/denom)
# -
# Now that we have our three important functions defined -- `create_model()` and `train()`, let's make use of them:
# +
# Given a vocab and tensor, print the output
def print_sequences(sequences, i2w):
for s in sequences:
print([i2w[np.argmax(w)] for w in s], sep=" ")
# hook up data
train_reader = create_reader(train_file, True)
valid_reader = create_reader(valid_file, False)
vocab, i2w = get_vocab(vocab_file)
# create model
model = create_model()
# train
error = train(train_reader, valid_reader, vocab, i2w, model, max_epochs=1)
# -
# Print the training error
print(error)
# ## Task
# Note the error is very high. This is largely due to the minimum training we have done so far. Please change the `epoch_size` to be a much higher number and re-run the `train` function. This might take considerably longer time but you will see a marked reduction in the error.
# ## Next steps
#
# An important extension to sequence-to-sequence models, especially when dealing with long sequences, is to use an attention mechanism. The idea behind attention is to allow the decoder, first, to look at any of the hidden state outputs from the encoder (instead of using only the final hidden state), and, second, to learn how much attention to pay to each of those hidden states given the context. This allows the outputted word at each time step `t` to depend not only on the final hidden state and the word that came before it, but instead on a weighted combination of *all* of the input hidden states!
#
# In the next version of this tutorial, we will talk about how to include attention in your sequence to sequence network.
|
Tutorials/CNTK_599A_Sequence_To_Sequence.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import os
import random
import numpy as np
# +
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# -
train_transforms = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# +
train_data = datasets.MNIST('data', train=True, download=True, transform=data_transforms)
test_data = datasets.MNIST('data', train=False, download=True, transform=data_transforms)
n_train_examples = int(len(train_data)*0.9)
n_valid_examples = len(train_data) - n_train_examples
train_data, valid_data = torch.utils.data.random_split(train_data, [n_train_examples, n_valid_examples])
# -
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
# +
BATCH_SIZE = 64
train_iterator = torch.utils.data.DataLoader(train_data, shuffle=True, batch_size=BATCH_SIZE)
valid_iterator = torch.utils.data.DataLoader(valid_data, batch_size=BATCH_SIZE)
test_iterator = torch.utils.data.DataLoader(test_data, batch_size=BATCH_SIZE)
# -
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
# input channel = 1, output channel = 6, kernel_size = 5
# input size = (32, 32), output size = (28, 28)
self.conv1 = nn.Conv2d(1, 6, 5)
# input channel = 6, output channel = 16, kernel_size = 5
# input size = (14, 14), output size = (10, 10)
self.conv2 = nn.Conv2d(6, 16, 5)
# input dim = 16*5*5, output dim = 120
self.fc1 = nn.Linear(16 * 5 * 5, 120)
# input dim = 120, output dim = 84
self.fc2 = nn.Linear(120, 84)
# input dim = 84, output dim = 10
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# pool size = 2
# input size = (28, 28), output size = (14, 14), output channel = 6
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
# pool size = 2
# input size = (10, 10), output size = (5, 5), output channel = 16
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# flatten as one dimension
x = x.view(x.shape[0], -1)
# input dim = 16*5*5, output dim = 120
x = F.relu(self.fc1(x))
# input dim = 120, output dim = 84
x = F.relu(self.fc2(x))
# input dim = 84, output dim = 10
x = self.fc3(x)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = LeNet().to(device)
model
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
def calculate_accuracy(fx, y):
preds = fx.max(1, keepdim=True)[1]
correct = preds.eq(y.view_as(preds)).sum()
acc = correct.float()/preds.shape[0]
return acc
def train(model, device, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for (x, y) in iterator:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
fx = model(x)
loss = criterion(fx, y)
acc = calculate_accuracy(fx, y)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, device, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for (x, y) in iterator:
x = x.to(device)
y = y.to(device)
fx = model(x)
loss = criterion(fx, y)
acc = calculate_accuracy(fx, y)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# +
EPOCHS = 10
SAVE_DIR = 'models'
MODEL_SAVE_PATH = os.path.join(SAVE_DIR, 'lenet-mnist.pt')
best_valid_loss = float('inf')
if not os.path.isdir(f'{SAVE_DIR}'):
os.makedirs(f'{SAVE_DIR}')
for epoch in range(EPOCHS):
train_loss, train_acc = train(model, device, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, device, valid_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), MODEL_SAVE_PATH)
print(f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:05.2f}% | Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:05.2f}% |')
# +
model.load_state_dict(torch.load(MODEL_SAVE_PATH))
test_loss, test_acc = evaluate(model, device, valid_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:05.2f}% |')
|
2 - LeNet-5 MNIST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
from statsmodels.nonparametric.smoothers_lowess import lowess
from matplotlib import collections as matcoll
from mplot_plots import Mplot
# +
#import warnings
#warnings.filterwarnings('ignore')
# read in data
df = pd.read_stata('http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta')
# use a few columns
cols = ['lwage', 'exper', 'expersq', 'educ', 'age', 'kidslt6', 'kidsge6']
df = df[cols]
# droprows with missing data
df = df.dropna(how = 'any')
# fitting the model using statsmodels.formula.api
OLS_model = smf.ols('lwage ~ exper + educ + age + kidslt6 + kidsge6', data = df).fit()
# Generating plots
for i in range(1,8):
Mplot(OLS_model, i).plot()
# -
|
mplot_example.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Exercise 4 - Random vector
# **The content of this script is only as a supplementary illustration to the exercise, it is not necessary to know at the exam. It is important to be able to calculate manually.**
#
# ## <NAME>, <NAME>, <NAME>
#
#
# # Example
#
# Random vector $Z =(Y;X)^T$ has a probability function specified by the table </br>
# 
#
# ## a) Determine the missing value of the combined probability function,
#
#
data = c(0.01, 0.04, 0.12,
0.02, 0.16, 0.07,
0.03, 0, 0.06,
0.25, 0.05, 0.01)
P = matrix(data , nrow=3, ncol=4) # possibly byrow=...
X = c(3, 5, 7)
Y = c(1, 2, 3, 4)
dimnames(P) = list(X,Y)
P
sum(P)
# do not run this cell twice, otherwise you will set the value back to 0,
# Do you know why?
p_5_3 = 1 - sum(P)
P["5","3"] = p_5_3
P
# ## b) Specify the distribution function
#
# **Attention! The vector Z is $(Y,X)^T$ so the first parameter is the value Y and the second value X.**
#
#
# F(2.8; 7.1)
# P(Y<2.8, X<7.1)
P[X<7.1, Y<2.8]
sum(P[X<7.1, Y<2.8])
F = matrix(rep(0,4*5), nrow=4, ncol=5)
dimnames(F) = list(c('(-inf,3>', '(3,5>', '(5,7>', '(7,inf)'),
c('(-inf,1>', '(1,2>', '(2,3>', '(3,4>', '(4,inf)'))
F
# we go through the rows and columns, we always take one value
# from the relevant row or column
x_vals = c(3,5,7,8)
y_vals = c(1,2,3,4,5)
for(i in 1:4){
for(j in 1:5){
x = x_vals[i]
y = y_vals[j]
F[i,j] = sum(P[X<x, Y<y])
}
}
F
# ## c) Determine the marginal distribution
#
#
P_x = rowSums(P)
P_x
F_x = c(0, cumsum(P_x))
F_x
P_y = colSums(P)
P_y
F_y = c(0, cumsum(P_y))
F_y
# ## d) Conditional probabilities and conditional probability functions $P(x|y), P(y|x)$
#
#
# P(Y>2.1|X<5.3)
# P(Y>2.1 ∧ X<5.3)/P(X<5.3)
sum(P[X<5.3, Y>2.1])
sum(P[X<5.3,])
sum(P[X<5.3, Y>2.1])/sum(P[X<5.3,])
# P(X=5|Y=1)
# P(X=5 ∧ Y=1)/P(Y=1)
P['5','1']/sum(P[,'1'])
P['5','1']/sum(P_y['1'])
# **$P(x|y)=\frac{P(X=x,Y=y)}{P_Y(y)}$**
#
#
P_xy = P # it's the same size, so we'll steal the formatting
X_lab = c('3', '5', '7')
Y_lab = c('1', '2', '3', '4')
for(x in X_lab){
for(y in Y_lab){
P_xy[x, y] = P[x, y]/P_y[y]
}
}
P_xy
colSums(P_xy)
# **$P(y|x)$**
#
#
P_yx = P # it's the same size, so we'll steal the formatting
for(x in X_lab){
for(y in Y_lab){
P_yx[x, y] = P[x, y]/P_x[x]
}
}
P_yx
rowSums(P_yx)
# ## e) basic characteristics of random variables X and Y
#
#
E_X = sum(X*P_x)
E_X
E_XX = sum(X*X*P_x)
D_X = E_XX - E_X^2
D_X
E_Y = sum(Y*P_y)
E_Y
E_YY = sum(Y*Y*P_y)
D_Y = E_YY - E_Y^2
D_Y
# ## f) conditional mean E(X|Y=2)
#
#
# P(x|Y=2)
P_xy[,'2']
E_X_Y2 = sum(X*P_xy[,'2'])
E_X_Y2
# ## g) covariance and correlation
#
#
X_Y = P # matrix where in each column is the value x * y
for(x in X){
for(y in Y){
X_Y[x==X, y==Y] = x*y
}
}
X_Y
# mean value of E(X * Y)
E_XY = sum(X_Y*P)
E_XY
# covariance
cov_XY = E_XY-E_X*E_Y
cov_XY
# correlation
cov_XY/sqrt(D_X*D_Y)
|
support_files/en/.ipynb_checkpoints/T6_random_vector-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
import seaborn as sns
import scipy.io as sio
import models as md
a = [1, 2, 2, 3, 4]
a[-2:]
data = sio.loadmat('../data/external/LVsVersusSubtendedAngle.mat')
data.keys()
data['subtendedAngleAtResponse'].shape
clean_dict = {'lv': np.squeeze(data['LVs']), 'resp_angle': np.squeeze(data['subtendedAngleAtResponse'])}
df = pd.DataFrame(clean_dict)
df.describe()
sns.set('poster')
g = sns.pairplot(df, size=8, diag_kws={'bins':50})
import matplotlib as mpl
mpl.rcParams['font.size'] = 40
mpl.rcParams['xtick.labelsize'] = 40
mpl.rcParams['ytick.labelsize'] = 40
mpl.rcParams['axes.labelsize'] = 40
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['axes.edgecolor'] = 'k'
mpl.rcParams['axes.grid'] = False
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.linewidth'] = 3
mpl.rcParams['axes.labelpad'] = 10
fig = plt.figure(figsize=(10,10))
plt.plot(df['lv'], df['resp_angle'], 'o', color='#00d4f9', ms=18)
plt.xlabel('L/V (s)')
plt.ylabel('Response angle ($\degree$)')
plt.gca().set_facecolor('w')
plt.xticks(np.arange(7)*0.2, [0, '', 0.4, '', 0.8, '', 1.2])
plt.yticks(np.arange(8)*25, np.arange(8)*25)
plt.savefig('../figures/expm_theta_vs_lv.eps', bbox_inches='tight')
plt.savefig('../figures/expm_theta_vs_lv.jpg', bbox_inches='tight')
df['lv_grouped'] = df['lv'].map(md.lv_map)
fig = plt.figure(figsize=(10, 10))
ax = sns.stripplot(x='lv_grouped', y='resp_angle', jitter=0.05, color='k', alpha=0.2, data=df, size=10)
ax = sns.boxplot(x='lv_grouped', y='resp_angle', hue='lv_grouped', palette='inferno', data=df)
|
code/analysis_expm_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
def sinplot(flip=1):
x = np.linspace(0, 14, 100)
for i in range(1, 7):
plt.plot(x, np.sin(x + i * 0.5) * (7 - i) * flip)
sinplot()
# +
# Seaborn模板 5种主题风格
# -
sns.set()
sinplot()
sns.set_style('whitegrid')
data = np.random.normal(size=(20,6)) + np.arange(6) / 2
sns.boxplot(data=data)
sns.set_style('dark')
sinplot()
sns.set_style('white')
sinplot()
sns.set_style('ticks')
sinplot()
sinplot()
sns.despine()
sns.violinplot(data)
sns.despine(offset=10)
sns.set_style('whitegrid')
sns.boxplot(data=data, palette='deep')
sns.despine(left=True)
with sns.axes_style('darkgrid'):
plt.subplot(311)
sinplot()
plt.subplot(313)
sinplot(-1)
sns.set()
sns.set_context('paper')
plt.figure(figsize=(8, 6))
sinplot()
sns.set_context('talk')
plt.figure(figsize=(8, 6))
sinplot()
sns.set_context('poster')
plt.figure(figsize=(8, 6))
sinplot()
sns.set_context('notebook', font_scale=1.5, rc={"lines.linewidth":2.5})
sinplot()
sns.set_context('notebook', font_scale=2.5, rc={"lines.linewidth":2.5})
sinplot()
# +
# 颜色 调色板palette
# -
sns.set(rc={'figure.figsize':(6, 6)})
current_palette = sns.color_palette()
# 6个默认主题色 deep muted pastel bright dark colorblind
sns.palplot(current_palette)
sns.palplot(sns.color_palette('hls', 8))
sns.palplot(sns.color_palette('hls', 80))
sns.set_style('whitegrid')
a = np.random.random(size=(20, 8)) + np.arange(8) / 2
sns.boxplot(data=a, palette=sns.color_palette('hls', 8))
sns.palplot(sns.hls_palette(8, l=.3, s=.8)) # l-亮度 lightness s-饱和 saturation
sns.palplot(sns.color_palette('Paired', 10)) # 明显的对比色(每两个一对)
# +
# 使用xkcd颜色来命名颜色。包含共954个随机RGB色的命名
# -
plt.plot([0,1], [0, 1], sns.xkcd_rgb['pale red'], lw=3)
plt.plot([0,1], [0, 2], sns.xkcd_rgb['medium green'], lw=3)
plt.plot([0,1], [0, 3], sns.xkcd_rgb['denim blue'], lw=3)
colors = ['windows blue', 'amber', 'greyish', 'faded green', 'dusty purple']
sns.palplot(sns.xkcd_palette(colors))
# +
# 连续色板 颜色随数据变换,如weight👆->深
# -
sns.palplot(sns.color_palette('Blues'))
sns.palplot(sns.color_palette('Blues_r'))
sns.palplot(sns.color_palette('BuGn_r', 10))
# +
# cubehelix_palette() 色调线性变换
# -
sns.palplot(sns.color_palette('cubehelix', 8))
sns.palplot(sns.cubehelix_palette(8, start=.5, rot=.75))
sns.palplot(sns.light_palette('green'))
sns.palplot(sns.dark_palette('purple'))
sns.palplot(sns.dark_palette('purple', reverse=True))
x, y = np.random.multivariate_normal([0,0],[[1,-.5], [-.5,1]], size=300).T
pal = sns.light_palette('green', as_cmap=True)
sns.kdeplot(x, y, cmap=pal)
|
Seaborn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo 1: Spreadsheets
#
# ## USAspending
# [](http://www.usaspending.gov)
#
# ## Federal accounts
# [](https://www.usaspending.gov/#/federal_account)
#
# ## Federal accounts as spreadsheet
# [](https://docs.google.com/spreadsheets/d/1C7YusuHvSR0o0pIBvPA1AcwqYDeSVwrS1Ed8MWZMOTo)
# ## Pandas: create dataframe from spreadsheet
# +
import pandas as pd
# tweak the pandas display settings so dollar amounts are more readable
pd.options.display.float_format = '{:,.0f}'.format
acct = pd.read_excel('data/federal_accounts.xlsx')
acct.account_id = acct.account_id.astype(str)
acct.agency_identifier = acct.agency_identifier.astype(str)
acct.head(5)
# -
# ## Do spreadsheet-y things
#
# ### Sort
acct = acct.sort_values(['fiscal_year', 'account_name'])
acct.head(5)
# ### Filter
# Federal accounts with budgetary resources > $100,000,000,000
big_acct = acct[acct['budgetary_resources']>=100000000000]
print(f'\n{len(big_acct)} accounts had more than $100,000,000,000 in fiscal year 2017:')
big_acct.sort_values(by='budgetary_resources', ascending=False).head(3)
# ### Functions
# +
# Show dollar amounts as per-capita
# U.S. population estimate on 1/1/18: 326,971,407
# source: https://www.census.gov/newsroom/press-releases/2017/new-years-2018.html
acct['budgetary_resources_per_capita'] = acct['budgetary_resources']/326971407
acct[['account_name', 'fiscal_year', 'budgetary_resources', 'budgetary_resources_per_capita']].head(5)
# -
# ### Reshape and Pivot
pd.pivot_table(
acct,
values='budgetary_resources',
index=['managing_agency', 'account_number', 'account_name', 'account_id', 'managing_agency_acronym'],
columns=['fiscal_year']
)
# Sum budgetary resources by agency
acct_group = acct.groupby(['managing_agency', 'fiscal_year']).sum()
acct_group
|
demos/01-Spreadsheets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jahidhasan299/jahidhasan299-Deep-Learning-With-7-Real-Projects/blob/main/3D_Unet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="91j4KQKXl8Pr"
# This code uses 3D Unet to train a network on 3D subvolumes (64x64x64).
# It also segments a large volume and outputs a multidimensional OMETIFF file.<p>
# Custom dataset is used for this code but it should work on any dataset.
# <p>
# + id="5iIMqTEEOuUZ"
# Building Unet by dividing encoder and decoder into blocks
#This is exactly the same unet used in earlier exercises for 2D images.
#Except here we change conv2D to conv3D, Maxpooling2D to Maxpooling3D
#upsampling2D to upsampling3D and Conv2DTranspose to Conv3DTranspose.
#We also change the filter size and pooling sizes from 3x3 and 2x2 to
# 3x3x3 and 2x2x2, respectively.
from keras.models import Model
from keras.layers import Input, Conv3D, MaxPooling3D, UpSampling3D, concatenate, Conv3DTranspose, BatchNormalization, Dropout, Lambda
from tensorflow.keras.optimizers import Adam
from keras.layers import Activation, MaxPool2D, Concatenate
def conv_block(input, num_filters):
x = Conv3D(num_filters, 3, padding="same")(input)
x = BatchNormalization()(x) #Not in the original network.
x = Activation("relu")(x)
x = Conv3D(num_filters, 3, padding="same")(x)
x = BatchNormalization()(x) #Not in the original network
x = Activation("relu")(x)
return x
#Encoder block: Conv block followed by maxpooling
def encoder_block(input, num_filters):
x = conv_block(input, num_filters)
p = MaxPooling3D((2, 2, 2))(x)
return x, p
#Decoder block
#skip features gets input from encoder for concatenation
def decoder_block(input, skip_features, num_filters):
x = Conv3DTranspose(num_filters, (2, 2, 2), strides=2, padding="same")(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
#Build Unet using the blocks
def build_unet(input_shape, n_classes):
inputs = Input(input_shape)
s1, p1 = encoder_block(inputs, 64)
s2, p2 = encoder_block(p1, 128)
s3, p3 = encoder_block(p2, 256)
s4, p4 = encoder_block(p3, 512)
b1 = conv_block(p4, 1024) #Bridge
d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)
if n_classes == 1: #Binary
activation = 'sigmoid'
else:
activation = 'softmax'
outputs = Conv3D(n_classes, 1, padding="same", activation=activation)(d4) #Change the activation based on n_classes
print(activation)
model = Model(inputs, outputs, name="U-Net")
return model
# + colab={"base_uri": "https://localhost:8080/"} id="dWrKrCXtPLon" outputId="dcb15ac5-ea59-4455-951b-b99c37d24984"
my_model = build_unet((64,64,64,3), n_classes=4)
# + id="xFiKt9hRPlSQ" colab={"base_uri": "https://localhost:8080/"} outputId="0ed970a1-5e39-4d78-ce7b-1133d0c424f6"
print(my_model.summary())
# + colab={"base_uri": "https://localhost:8080/"} id="uusLfD-VzEN-" outputId="b1806f00-af93-4670-9965-b618da7629bc"
my_model.input_shape
# + id="DFJYJc1p_LRy" colab={"base_uri": "https://localhost:8080/"} outputId="08a2ff4b-f02c-491c-b15b-5f2ba44938a8"
#Use patchify to break large volumes into smaller for training
#and also to put patches back together after prediction.
# !pip install patchify
# + id="foXdS4Uil4WP" colab={"base_uri": "https://localhost:8080/"} outputId="6a39a81f-6ac7-4cf3-d4b1-6d706c5f1f64"
import tensorflow as tf
import keras
print(tf.__version__)
print(keras.__version__)
# + id="45aZsgManbdG" colab={"base_uri": "https://localhost:8080/"} outputId="481702b1-9f04-4c47-a023-9ffa5a4fee8f"
#Make sure the GPU is available.
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# + id="d7hKivzw-0V9"
from skimage import io
from patchify import patchify, unpatchify
import numpy as np
from matplotlib import pyplot as plt
from keras import backend as K
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
# + colab={"base_uri": "https://localhost:8080/"} id="bdE84tORQ7vP" outputId="964a7981-8a3c-43a5-f2ed-553dc9bfed2d"
from google.colab import drive
drive.mount('/content/drive')
# + id="HWZiqrMK-kVb"
#Load input images and masks.
#Here we load 256x256x256 pixel volume. We will break it into patches of 64x64x64 for training.
image = io.imread('/content/drive/MyDrive/Colab Notebooks/datasets/sandstone_data_for_ML 2/data_for_3D_Unet/train_images_256_256_256.tif')
img_patches = patchify(image, (64, 64, 64), step=64) #Step=64 for 64 patches means no overlap
mask = io.imread('/content/drive/MyDrive/Colab Notebooks/datasets/sandstone_data_for_ML 2/data_for_3D_Unet/train_masks_256_256_256.tif')
mask_patches = patchify(mask, (64, 64, 64), step=64)
# + id="A8a8BumA_qpa" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="52bebfb3-cdba-4cc4-abd3-c339435ae9a9"
plt.imshow(img_patches[1,2,3,:,:,32])
#
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="JfyLfO5-z5f-" outputId="91fe1880-0b1c-43d7-f7b8-31efb62ac94a"
plt.imshow(mask_patches[1,2,3,:,:,32])
# + id="K08wiQ0y-ogu" colab={"base_uri": "https://localhost:8080/"} outputId="db5d80ca-2954-459b-d6ce-daddaf4f2239"
input_img = np.reshape(img_patches, (-1, img_patches.shape[3], img_patches.shape[4], img_patches.shape[5]))
input_mask = np.reshape(mask_patches, (-1, mask_patches.shape[3], mask_patches.shape[4], mask_patches.shape[5]))
print(input_img.shape) # n_patches, x, y, z
# + id="3FqzxzsX_4YQ"
n_classes=4
# + id="av8BBXU0-rdw"
#Convert grey image to 3 channels by copying channel 3 times.
#We do this as our unet model expects 3 channel input.
train_img = np.stack((input_img,)*3, axis=-1)
train_img = train_img / 255.
train_mask = np.expand_dims(input_mask, axis=4)
train_mask_cat = to_categorical(train_mask, num_classes=n_classes)
X_train, X_test, y_train, y_test = train_test_split(train_img, train_mask_cat, test_size = 0.10, random_state = 0)
# + id="Higavd5lmj6A"
# Loss Function and coefficients to be used during training:
def dice_coefficient(y_true, y_pred):
smoothing_factor = 1
flat_y_true = K.flatten(y_true)
flat_y_pred = K.flatten(y_pred)
return (2. * K.sum(flat_y_true * flat_y_pred) + smoothing_factor) / (K.sum(flat_y_true) + K.sum(flat_y_pred) + smoothing_factor)
def dice_coefficient_loss(y_true, y_pred):
return 1 - dice_coefficient(y_true, y_pred)
# + id="tfAFB3uPtYVx"
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense
# + id="mWiQsv6IqGGF"
#Define parameters for our model.
patch_size = 64
channels=3
LR = 0.0001
optim = Adam(LR)
# + colab={"base_uri": "https://localhost:8080/"} id="SYR2h6i3SPLU" outputId="0a7d01af-67ae-4897-83ad-64ecd0811a6e"
model = build_unet((patch_size,patch_size,patch_size,channels), n_classes=n_classes)
# + id="BS6ojei4SfyF" colab={"base_uri": "https://localhost:8080/"} outputId="ae1ded7f-3efd-4631-f088-127d6d8ac869"
model.compile(optimizer = optim, loss=dice_coefficient_loss, metrics=dice_coefficient)
print(model.summary())
# + colab={"base_uri": "https://localhost:8080/"} id="sivlad34SyKP" outputId="12696885-0bc5-4211-f093-b8fce0f8ea16"
print(model.input_shape)
print(X_train.shape)
print(model.output_shape)
print(y_train.shape)
print("-------------------")
print(X_train.max()) #Shpuld be 1 after scaling. If it shows 255, go back and normalize/scale inputs
# + id="f69gK8DoAQ1C" colab={"base_uri": "https://localhost:8080/"} outputId="851114b7-6b0d-4698-9c80-9365824b373f"
#Fit the model
history=model.fit(X_train,
y_train,
batch_size=8,
epochs=120,
verbose=1,
validation_data=(X_test, y_test))
# + id="LVTe3TrrE6DS"
#Save model for future use
model.save('/content/drive/MyDrive/Colab Notebooks/models/sandstone_3D_120epochs.h5')
# + id="5Nk4OFMzAoLk" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="767b4dce-a99f-4d68-e9ea-8a9bbe2903ed"
###
#plot the training and validation IoU and loss at each epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc = history.history['dice_coefficient']
val_acc = history.history['val_dice_coefficient']
plt.plot(epochs, acc, 'y', label='Training Dice')
plt.plot(epochs, val_acc, 'r', label='Validation Dice')
plt.title('Training and validation Dice')
plt.xlabel('Epochs')
plt.ylabel('Dice')
plt.legend()
plt.show()
# + id="lwjlR25nF54w"
#Load the pretrained model for testing and predictions.
from keras.models import load_model
my_model = load_model('/content/drive/MyDrive/Colab Notebooks/models/sandstone_3D_100epochs.h5', compile=False)
#If you load a different model do not forget to preprocess accordingly.
# + id="kTAQpcUQA7SN"
#Predict on the test data
y_pred=my_model.predict(X_test)
y_pred_argmax=np.argmax(y_pred, axis=4)
y_test_argmax = np.argmax(y_test, axis=4)
# + id="FjHYiOqYBifK" colab={"base_uri": "https://localhost:8080/"} outputId="244135a1-bdc5-45f1-f9d9-e38bb604fb58"
print(y_pred_argmax.shape)
print(y_test_argmax.shape)
print(np.unique(y_pred_argmax))
# + id="3lv4EhPCHsrz" colab={"base_uri": "https://localhost:8080/"} outputId="8c262a40-51ba-4423-b99c-b04524a98e57"
#Using built in keras function for IoU
#Only works on TF > 2.0
from keras.metrics import MeanIoU
from keras.metrics import MeanIoU
n_classes = 4
IOU_keras = MeanIoU(num_classes=n_classes)
IOU_keras.update_state(y_test_argmax, y_pred_argmax)
print("Mean IoU =", IOU_keras.result().numpy())
# + id="BPcRSfobCBrE" colab={"base_uri": "https://localhost:8080/"} outputId="d2773810-4a15-4806-94d4-cf1031177479"
#Test some random images
import random
test_img_number = random.randint(0, len(X_test))
test_img = X_test[test_img_number]
ground_truth=y_test[test_img_number]
test_img_input=np.expand_dims(test_img, 0)
test_pred = my_model.predict(test_img_input)
test_prediction = np.argmax(test_pred, axis=4)[0,:,:,:]
ground_truth_argmax = np.argmax(ground_truth, axis=3)
print(ground_truth_argmax.shape)
# + id="7W9XsGK7FMuN" colab={"base_uri": "https://localhost:8080/", "height": 261} outputId="864dd4ee-b5b9-40a8-df07-5bd5a9659f4c"
#Plot individual slices from test predictions for verification
slice = random.randint(0, ground_truth_argmax.shape[2]-1)
plt.figure(figsize=(12, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img[slice,:,:,0], cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth_argmax[slice,:,:])
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(test_prediction[slice,:,:])
plt.show()
# + [markdown] id="CNi7_zeC7SmG"
# Now segment the full volume using the trained model.
# + id="C1eYttiN7tH4" colab={"base_uri": "https://localhost:8080/"} outputId="4a9feb36-7e4c-4ec5-deed-f5a5292b73a2"
#Break the large image (volume) into patches of same size as the training images (patches)
large_image = io.imread('/content/drive/MyDrive/Colab Notebooks/datasets/sandstone_data_for_ML 2/data_for_3D_Unet/448_images_512x512.tif')
patches = patchify(large_image, (64, 64, 64), step=64) #Step=256 for 256 patches means no overlap
print(large_image.shape)
print(patches.shape)
# + id="G6KXGtea89wi"
# Predict each 3D patch
predicted_patches = []
for i in range(patches.shape[0]):
for j in range(patches.shape[1]):
for k in range(patches.shape[2]):
#print(i,j,k)
single_patch = patches[i,j,k, :,:,:]
single_patch_3ch = np.stack((single_patch,)*3, axis=-1)
single_patch_3ch = single_patch_3ch/255.
single_patch_3ch_input = np.expand_dims(single_patch_3ch, axis=0)
single_patch_prediction = my_model.predict(single_patch_3ch_input)
single_patch_prediction_argmax = np.argmax(single_patch_prediction, axis=4)[0,:,:,:]
predicted_patches.append(single_patch_prediction_argmax)
# + id="yxX9vrPMAvK2" colab={"base_uri": "https://localhost:8080/"} outputId="5f148153-7089-4b8b-fe12-fb57bc1cc5b6"
#Convert list to numpy array
predicted_patches = np.array(predicted_patches)
print(predicted_patches.shape)
# + id="qYQi_STcA6gw" colab={"base_uri": "https://localhost:8080/"} outputId="3c222238-36ac-4c59-b334-1daa258e37fd"
#Reshape to the shape we had after patchifying
predicted_patches_reshaped = np.reshape(predicted_patches,
(patches.shape[0], patches.shape[1], patches.shape[2],
patches.shape[3], patches.shape[4], patches.shape[5]) )
print(predicted_patches_reshaped.shape)
# + id="SQcYSuaeBP2i" colab={"base_uri": "https://localhost:8080/"} outputId="9cf40450-897b-4fc7-9843-3a71b950caf5"
#Repach individual patches into the orginal volume shape
reconstructed_image = unpatchify(predicted_patches_reshaped, large_image.shape)
print(reconstructed_image.shape)
# + id="rFF2FwCGDR1A" colab={"base_uri": "https://localhost:8080/"} outputId="c8392218-c085-4f63-dc57-39c31555b55f"
print(reconstructed_image.dtype)
# + id="6VF_g8oLDjKp" colab={"base_uri": "https://localhost:8080/"} outputId="9c742b9e-ced2-4902-95da-0552edeff999"
#Convert to uint8 so we can open image in most image viewing software packages
reconstructed_image=reconstructed_image.astype(np.uint8)
print(reconstructed_image.dtype)
# + id="OdjWCDYoKLgM"
#Now save it as segmented volume.
from tifffile import imsave
imsave('/content/drive/MyDrive/Colab Notebooks/datasets/sandstone_data_for_ML 2/data_for_3D_Unet/448_images_segmented.tif', reconstructed_image)
# + id="SsBu290SNNSD"
#If you would like to save the volume as multichannel dataset....
# + id="EMFGGwT8FpEl" colab={"base_uri": "https://localhost:8080/"} outputId="ee1b1183-d18e-451a-fc27-2fe2222f459a"
print(np.unique(reconstructed_image))
# + id="USCSJT4rGBDt"
#Seperate each channel/segment to be combined as multiple channels.
num_segments=4
segm0 = (reconstructed_image == 0)
segm1 = (reconstructed_image == 1)
segm2 = (reconstructed_image == 2)
segm3 = (reconstructed_image == 3)
final = np.empty((reconstructed_image.shape[0], reconstructed_image.shape[1], reconstructed_image.shape[2], num_segments))
final[:,:,:,0] = segm0
final[:,:,:,1] = segm1
final[:,:,:,2] = segm2
final[:,:,:,3] = segm3
# + id="F4ZfPm41J4yk" colab={"base_uri": "https://localhost:8080/"} outputId="3e227f5a-0e85-4fa7-aa55-e54af400e88f"
#Use APEER OMETIFF library to read and write multidimensional images
# !pip install apeer-ometiff-library
# + id="enE_X9SCJoYA"
from apeer_ometiff_library import io
# + id="bj0QC8OWINAD"
# Expand image array to 5D of order (T, Z, C, X, Y)
# This is the convention for OMETIFF format as written by APEER library
final = np.expand_dims(final, axis=0)
final=np.swapaxes(final, 2, 4)
final = final.astype(np.int8)
# + id="StgpqkJbIa2F" colab={"base_uri": "https://localhost:8080/"} outputId="71c62eab-8ef0-43c6-9bbf-33d8fd0d8e70"
print("Shape of the segmented volume is: T, Z, C, X, Y ", final.shape)
print(final.dtype)
# + id="oT2L6880WY1k"
# Write dataset as multi-dimensional OMETIFF *image*
io.write_ometiff("/content/drive/MyDrive/Colab Notebooks/datasets/sandstone_data_for_ML 2/data_for_3D_Unet/448_segmented_multi_channel.ome.tiff", final)
# + [markdown] id="cFuW0zJBnXqd"
# You can view multichannel images for free on: www.apeer.com <p>
|
3D_Unet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: python
# ---
# + [markdown] nbgrader={"grade": false, "grade_id": "jupyter", "locked": true, "schema_version": 3, "solution": false}
# For this problem set, we'll be using the Jupyter notebook:
#
# 
# -
# ---
# ## Part A (2 points)
#
# Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\leq i \leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`.
# + nbgrader={"grade": false, "grade_id": "squares", "locked": false, "schema_version": 3, "solution": true}
def squares(n):
"""Compute the squares of numbers from 1 to n, such that the
ith element of the returned list equals i^2.
"""
### BEGIN SOLUTION
if n < 1:
raise ValueError("n must be greater than or equal to 1")
return [i ** 2 for i in range(1, n + 1)]
### END SOLUTION
# -
# Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:
squares(10)
# + nbgrader={"grade": true, "grade_id": "correct_squares", "locked": false, "points": 1.0, "schema_version": 3, "solution": false}
"""Check that squares returns the correct output for several inputs"""
assert squares(1) == [1]
assert squares(2) == [1, 4]
assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]
# + nbgrader={"grade": true, "grade_id": "squares_invalid_input", "locked": false, "points": 1.0, "schema_version": 3, "solution": false}
"""Check that squares raises an error for invalid inputs"""
try:
squares(0)
except ValueError:
pass
else:
raise AssertionError("did not raise")
try:
squares(-4)
except ValueError:
pass
else:
raise AssertionError("did not raise")
# -
# ---
#
# ## Part B (1 point)
#
# Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality.
# + nbgrader={"grade": false, "grade_id": "sum_of_squares", "locked": false, "schema_version": 3, "solution": true}
def sum_of_squares(n):
"""Compute the sum of the squares of numbers from 1 to n."""
### BEGIN SOLUTION
return sum(squares(n))
### END SOLUTION
# -
# The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:
sum_of_squares(10)
# + nbgrader={"grade": true, "grade_id": "correct_sum_of_squares", "locked": false, "points": 0.5, "schema_version": 3, "solution": false}
"""Check that sum_of_squares returns the correct answer for various inputs."""
assert sum_of_squares(1) == 1
assert sum_of_squares(2) == 5
assert sum_of_squares(10) == 385
assert sum_of_squares(11) == 506
# + nbgrader={"grade": true, "grade_id": "sum_of_squares_uses_squares", "locked": false, "points": 0.5, "schema_version": 3, "solution": false}
"""Check that sum_of_squares relies on squares."""
orig_squares = squares
del squares
try:
sum_of_squares(1)
except NameError:
pass
else:
raise AssertionError("sum_of_squares does not use squares")
finally:
squares = orig_squares
# -
# ---
# ## Part C (1 point)
#
# Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function.
# + [markdown] nbgrader={"grade": true, "grade_id": "sum_of_squares_equation", "locked": false, "points": 1.0, "schema_version": 3, "solution": true}
# $\sum_{i=1}^n i^2$
# -
# ---
# ## Part D (2 points)
#
# Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below.
# + nbgrader={"grade": true, "grade_id": "sum_of_squares_application", "locked": false, "points": 2.0, "schema_version": 3, "solution": true}
def pyramidal_number(n):
"""Returns the n^th pyramidal number"""
return sum_of_squares(n)
|
nbgrader/tests/apps/files/test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use Python function to recognize hand-written digits with `ibm-watson-machine-learning`
#
# Create and deploy a function that receives HTML canvas image data from a web app and then processes and sends that data to a model trained to recognize handwritten digits.
# See: <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-deployed-func-mnist-tutorial.html" target="_blank">MNIST function deployment tutorial</a>
#
# This notebook runs on Python 3.8.
#
# ## Learning goals
#
# The learning goals of this notebook are:
#
# - AI function definition
# - Store AI function
# - Deployment creation
#
# ## Contents
#
# This notebook contains the following parts:
# 1. [Setup](#setup)
# 2. [Get an ID for a model deployment](#step4)
# 3. [Get sample canvas data](#step5)
# 4. [Create a deployable function](#step6)
# 5. [Store and deploy the function](#step7)
# 6. [Test the deployed function](#step8)
# 7. [Clean up](#cleanup)
# <a id="setup"></a>
# ## 1. Set up the environment
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# - Contact your Cloud Pack for Data administrator and ask them for your account credentials
# ### Connection to WML
#
# Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `api_key`.
username = 'PASTE YOUR USERNAME HERE'
api_key = 'PASTE YOUR API_KEY HERE'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"apikey": api_key,
"url": url,
"instance_id": 'openshift',
"version": '4.0'
}
# Alternatively you can use `username` and `password` to authenticate WML services.
#
# ```
# wml_credentials = {
# "username": ***,
# "password": ***,
# "url": ***,
# "instance_id": 'openshift',
# "version": '4.0'
# }
#
# ```
# ### Install and import the `ibm-watson-machine-learning` package
# **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
# !pip install -U ibm-watson-machine-learning
# +
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
# -
# ### Working with spaces
#
# First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one.
#
# - Click New Deployment Space
# - Create an empty space
# - Go to space `Settings` tab
# - Copy `space_id` and paste it below
#
# **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd4.0/notebooks/python_sdk/instance-management/Space%20management.ipynb).
#
# **Action**: Assign space ID below
space_id = 'PASTE YOUR SPACE ID HERE'
# You can use `list` method to print all existing spaces.
client.spaces.list(limit=10)
# To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
client.set.default_space(space_id)
# ## 2. <a id="step4"></a> Get an ID for a model deployment
#
# The deployed function created in this notebook is designed to send payload data to a TensorFlow model created in the <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-mnist-tutorials.html" target="_blank" rel="noopener noreferrer">MNIST tutorials</a>.
import os, wget, json
import numpy as np
import matplotlib.pyplot as plt
import requests
# ### Option 1: Use your own, existing model deployment
#
# If you already deployed a model while working through one of the following MNIST tutorials, you can use that model deployment:
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml_dlaas_tutorial_tensorflow_experiment-builder.html" target="_blank" rel="noopener noreferrer">Experiment builder MNIST tutorial</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml_dlaas_tutorial_tensorflow_experiment-builder_hpo.html" target="_blank" rel="noopener noreferrer">Experiment builder (HPO) MNIST tutorial</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-python-mnist-tutorial.html" target="_blank" rel="noopener noreferrer">Python client (notebook) MNIST tutorial</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml_dlaas_tutorial_tensorflow_cli.html" target="_blank" rel="noopener noreferrer">CLI MNIST tutorial</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml_dlaas_cli_with_hpo.html" target="_blank" rel="noopener noreferrer">CLI (HPO) MNIST tutorial</a>
#
# Paste the model deployment ID in the following cell.
#
# See: <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-get-endpoint-url.html" target="_blank" rel="noopener noreferrer">Looking up an online deployment ID</a>
#
for x in client.deployments.get_details()['resources']:
if (x['entity']['name'] == 'Scikit German Risk Deployment WML V4'):
deployment_uid = x['metadata']['id']
model_deployment_id = ""
# ### Option 2: Download, store, and deploy a sample model
# You can deployed a sample model and get its deployment ID by running the code in the following four cells.
# +
# Download a sample model to the notebook working directory
sample_saved_model_filename = 'mnist-tf-hpo-saved-model.tar.gz'
url = 'https://github.com/IBM/watson-machine-learning-samples/raw/master/cpd4.0/models/tensorflow/mnist/' + sample_saved_model_filename
if not os.path.isfile(sample_saved_model_filename):
wget.download(url)
# +
# Look up software specification for the MNIST model
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.8")
# +
# Store the sample model in your Watson Machine Learning repository
metadata = {
client.repository.ModelMetaNames.NAME: 'Saved MNIST model',
client.repository.ModelMetaNames.TYPE: 'tensorflow_2.4',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
model_details = client.repository.store_model(
model=sample_saved_model_filename,
meta_props=metadata
)
# -
model_details
# +
# Get published model ID
published_model_uid = client.repository.get_model_uid(model_details)
# +
# Deploy the stored model
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "MNIST saved model deployment",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
model_deployment_details = client.deployments.create(published_model_uid, meta_props=metadata)
# +
# Get the ID of the model deployment just created
model_deployment_id = client.deployments.get_uid(model_deployment_details)
print(model_deployment_id)
# -
# ## <a id="step5"></a> 3. Get sample canvas data
#
# The deployed function created in this notebook is designed to accept RGBA image data from an HTML canvas object in one of these sample apps:
#
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-nodejs-mnist-tutorial.html" target="_blank" rel="noopener noreferrer">Node.js MNIST sample app</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-python-flask-mnist-tutorial.html" target="_blank" rel="noopener noreferrer">Python Flask MNIST sample app</a>
#
# Run the following cells to download and view sample canvas data for testing the deployed function.
# ### 3.1 Download sample data file
# +
# Download the file containing the sample data
sample_canvas_data_filename = 'mnist-html-canvas-image-data.json'
url = 'https://github.com/IBM/watson-machine-learning-samples/raw/master/cpd4.0/data/mnist/' + sample_canvas_data_filename
if not os.path.isfile(sample_canvas_data_filename):
wget.download(url)
# +
# Load the sample data from the file into a variable
with open(sample_canvas_data_filename) as data_file:
sample_cavas_data = json.load(data_file)
# -
# ### 3.2 View sample data
# +
# View the raw contents of the sample data
print("Height (n): " + str(sample_cavas_data["height"]) + " pixels\n")
print("Num image data entries: " + str(len( sample_cavas_data["data"])) + " - (n * n * 4) elements - RGBA values\n")
print(json.dumps(sample_cavas_data, indent=3)[:75] + "...\n" + json.dumps(sample_cavas_data, indent=3)[-50:])
# +
# See what hand-drawn digit the sample data represents
rgba_arr = np.asarray(sample_cavas_data["data"]).astype('uint8')
n = sample_cavas_data["height"]
plt.figure()
plt.imshow( rgba_arr.reshape(n, n, 4))
plt.xticks([])
plt.yticks([])
plt.show()
# -
# ## <a id="step6"></a> 4. Create a deployable function
#
# The basics of creating and deploying functions in Watson Machine Learning are given here:
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-deploy-functions.html" target="_blank" rel="noopener noreferrer">Creating and deploying functions</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-functions.html" target="_blank" rel="noopener noreferrer">Implementation details of deployable functions</a>
#
# ### 4.1 Define the function
# 1. Define a Python closure with an inner function named "score".
# 2. Use default parameters to save your Watson Machine Learning credentials and the model deployment ID with the deployed function.
# 3. Process the canvas data (reshape and normalize) and then send the processed data to the model deployment.
# 4. Process the results from the model deployment so the deployed function returns simpler results.
# 5. Implement error handling so the function will behave gracefully if there is an error.
# +
ai_parms = {"wml_credentials": wml_credentials, "space_id": space_id, "model_deployment_id": model_deployment_id}
def my_deployable_function( parms=ai_parms ):
def getRGBAArr(canvas_data):
import numpy as np
dimension = canvas_data["height"]
rgba_data = canvas_data["data"]
rgba_arr = np.asarray(rgba_data).astype('uint8')
return rgba_arr.reshape(dimension, dimension, 4)
def getNormAlphaList(img):
import numpy as np
alpha_arr = np.array(img.split()[-1])
norm_alpha_arr = alpha_arr / 255
norm_alpha_list = norm_alpha_arr.reshape(1, 784).tolist()
return norm_alpha_list
def score(function_payload):
try:
from PIL import Image
canvas_data = function_payload["input_data"][0]["values"][0] # Read the payload received by the function
rgba_arr = getRGBAArr(canvas_data) # Create an array object with the required shape
img = Image.fromarray(rgba_arr, 'RGBA') # Create an image object that can be resized
sm_img = img.resize((28, 28), Image.LANCZOS) # Resize the image to 28 x 28 pixels
alpha_list = getNormAlphaList(sm_img) # Create a 1 x 784 array of values between 0 and 1
model_payload = {"input_data": [{"values" : alpha_list}]} # Create a payload to be sent to the model
#print( "Payload for model:" ) # For debugging purposes
#print( model_payload ) # For debugging purposes
from ibm_watson_machine_learning import APIClient
client = APIClient(parms["wml_credentials"])
client.set.default_space(parms["space_id"])
model_result = client.deployments.score(parms["model_deployment_id"], model_payload)
digit_class = model_result["predictions"][0]["values"][0]
return model_result
except Exception as e:
return {'predictions': [{'values': [repr(e)]}]}
#return {"error" : repr(e)}
return score
# -
# ### 4.2 Test locally
# You can test your function in the notebook before deploying the function.
#
# To see debugging info:
# 1. Uncomment the print statements inside the score function
# 2. Rerun the cell defining the function
# 3. When you rerun the this cell, you will see the debugging info
# +
# Pass the sample canvas data to the function as a test
func_result = my_deployable_function()({"input_data": [{"values": [sample_cavas_data]}]})
print(func_result)
# -
# ## <a id="step7"></a> 5. Store and deploy the function
# Before you can deploy the function, you must store the function in your Watson Machine Learning repository.
# +
# Look up software specification for the deployable function
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.8")
# +
# Store the deployable function in your Watson Machine Learning repository
meta_data = {
client.repository.FunctionMetaNames.NAME: 'MNIST deployable function',
client.repository.FunctionMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
function_details = client.repository.store_function(meta_props=meta_data, function=my_deployable_function)
# +
# Get published function ID
function_uid = client.repository.get_function_uid(function_details)
# +
# Deploy the stored function
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "MNIST function deployment",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
function_deployment_details = client.deployments.create(function_uid, meta_props=metadata)
# -
# ## <a id="step8"></a> 6. Test the deployed function
#
# You can use the Watson Machine Learning Python client or REST API to send data to your function deployment for processing in exactly the same way you send data to model deployments for processing.
# +
# Get the endpoint URL of the function deployment just created
function_deployment_id = client.deployments.get_uid(function_deployment_details)
function_deployment_endpoint_url = client.deployments.get_scoring_href(function_deployment_details)
print(function_deployment_id)
print(function_deployment_endpoint_url)
# -
payload = {"input_data": [{"values": [sample_cavas_data]}]}
# ### 6.1 Watson Machine Learning Python client
result = client.deployments.score(function_deployment_id, payload)
if "error" in result:
print(result["error"])
else:
print(result)
# ## <a id="cleanup"></a> 7. Clean up
# If you want to clean up all created assets:
# - experiments
# - trainings
# - pipelines
# - model definitions
# - models
# - functions
# - deployments
#
# please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
# ## Summary and next steps
# In this notebook, you created a Python function that receives HTML canvas image data and then processes and sends that data to a model trained to recognize handwritten digits.
#
# To learn how you can use this deployed function in a web app, see:
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-nodejs-mnist-tutorial.html" target="_blank" rel="noopener noreferrer">Sample Node.js app that recognizes hand-drawn digits</a>
# - <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/ml-python-flask-mnist-tutorial.html" target="_blank" rel="noopener noreferrer">Sample Python Flask app that recognizes hand-drawn digits</a>
# ### <a id="authors"></a>Authors
#
# **<NAME>** is a member of the IBM Watson Studio Content Design team in Canada.
#
# <hr>
# Copyright © IBM Corp. 2018-2021. This notebook and its source code are released under the terms of the MIT License.
|
cpd4.0/notebooks/python_sdk/deployments/python_function/Use function to recognize hand-written digits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #%matplotlib notebook
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
fig = plt.figure()
# fig.set_dpi(100)
fig.set_size_inches(6, 6)
ax = plt.axes(xlim=(0, 10), ylim=(0, 10))
patch = plt.Circle((5, -5), 0.75, fc='y')
def init():
patch.center = (5, 5)
ax.add_patch(patch)
return patch,
def animate(i):
x, y = patch.center
x = 5 + 3 * np.sin(np.radians(i*4))
y = 5 + 3 * np.cos(np.radians(i*4))
patch.center = (x, y)
return patch,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=360,
interval=20,
blit=True)
plt.show()
# -
|
drafts/Animation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # <NAME>'s First Ascend to Everest
#
# The question was how close the GPX track from the Strava activity https://www.strava.com/activities/1022238076 got to the summit of Mt. Everest.
# This repository holds the gpx track, downloaded with the bookmarklet from https://mapstogpx.com/strava/ and this notebook does some processing on it.
#
# ## TL;DR
#
# With [WGS84](https://en.wikipedia.org/wiki/World_Geodetic_System) and distance calculation by [pyproj.Geod.inv](https://jswhit.github.io/pyproj/pyproj.Geod-class.html#inv) the calculated distance is 54 meters.
# +
# %matplotlib inline
import gpxpy #.parser as parser
from pandas import DataFrame
import geopandas as gpd
import shapely.wkt
from shapely.geometry import Point
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('bmh')
# +
gpx_file = open('../../../data//strava_full_activities-1022238076.gpx', 'r')
gpx = gpxpy.parse(gpx_file)
# +
# create a Pandas DataFrame as a starter
data = []
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
data.append([point.longitude, point.latitude, point.elevation, point.time, point.name])
columns = ['Longitude', 'Latitude', 'Altitude', 'Time', 'Name']
df = DataFrame(data, columns=columns)
df.head()
# -
# Now create a GeoPandas DataFrame from it
#https://gis.stackexchange.com/a/174168/87254 et al
gdf = gpd.GeoDataFrame(df.drop(['Longitude', 'Latitude'], axis=1),
crs={'init': 'epsg:4326'},
geometry=df.apply(lambda row: shapely.geometry.Point((row.Longitude, row.Latitude)), axis=1))
gdf.head()
gdf.plot()
# +
from shapely.ops import nearest_points
gdfmp = gdf.unary_union
everest = Point(86.925278, 27.988056)
nearestpoints = nearest_points(everest, gdfmp)
# -
import pyproj
geod = pyproj.Geod(ellps='WGS84')
angle1,angle2,distance = geod.inv(nearestpoints[0].x, nearestpoints[0].y, nearestpoints[1].x, nearestpoints[1].y)
print("The nearest points are located {0:8.4f} meters from each other.".format(distance))
# ## appendix
#
# Not sure if a more local CRS would bring a much different result, but my morning train commutes comes to an end.
# https://gis.stackexchange.com/questions/126546/transformation-parameters-for-everest-datum-to-wgs84 might hold the next step.
#
|
application/physical/notebooks/MrJornetEverest.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
from __future__ import division
from os.path import join, isfile
import pandas as pd
import numpy as np
conditions = ['Physics', 'NonPhysics']
tr = 2.
n_vols = 167
cutoff = .3333333333333333333333
hrf = np.array([0, 0.0866, 0.3749, 0.3849, 0.2161, 0.0769,
0.0016, -0.0306, -0.0373, -0.0308, -0.0205,
-0.0116, -0.0058, -0.0026, -0.0011, -0.0004, -0.0001])
sum_f = '/home/data/nbc/physics-learning/physics-learning/fci-motion-censor-summary.txt'
sum_df = pd.read_csv(sum_f, sep='\t')
sum_df = pd.melt(sum_df, value_vars=['fci-0', 'fci-1', 'fci-2'],
value_name='prop_bad',
id_vars=['subject', 'session'], var_name='run')
for sub_num in sum_df['subject'].unique():
sub = str(sub_num)
sub_df = sum_df.loc[sum_df['subject']==sub_num]
for ss in sub_df['session'].unique():
sess = 'session-{0}'.format(int(ss))
sess_df = sub_df.loc[sub_df['session']==ss]
for run in sess_df['run'].unique():
fold = join('/home/data/nbc/physics-learning/data/behavioral-data/vectors',
sub, sess, 'fci/')
fold2 = join('/home/data/nbc/physics-learning/data/first-level',
sub, sess, 'fci/', run)
f2 = join(fold2, '{0}-{1}-motion-outliers-censored.txt'.format(sub, sess))
if isfile(f2):
censor_data = np.loadtxt(f2)
if censor_data.shape == 2:
censor_data = np.sum(censor_data, axis=1)
censor_vols = np.where(censor_data)[0]
for c in conditions:
f = join(fold, '{0}-{1}.txt'.format(run, c))
df = pd.read_csv(f, sep='\t', names=['onset', 'duration', 'amplitude'])
temp_dat = np.zeros(n_vols)
# convert vectors from secs to trs
df['onset'] = np.round(df['onset'] / tr).astype(int)
df['duration'] = np.round(df['duration'] / tr).astype(int)
for i in range(df.shape[0]):
s = df.loc[i]['onset']
e = s + df.iloc[i]['duration']
temp_dat[s:e] = 1
pred_dat = np.convolve(temp_dat, hrf)[:n_vols]
cond_vols = np.where(pred_dat!=0)[0]
inter = np.intersect1d(cond_vols, censor_vols)
prop_bad = len(inter) / len(cond_vols)
if prop_bad >= cutoff:
print('{0} {1} {2} {3}: {4}'.format(sub, sess, run, c, prop_bad))
else:
pass
# for c in conditions:
# This shouldn't be here. If the censored vols file doesn't
# exist, it should skip that run, since there are no
# outliers. Currently, it is comparing the prop_bad from
# the *last* run to the cutoff
# if prop_bad >= cutoff:
# print('{0} {1} {2} {3}: 0'.format(sub, sess, run, c))
# +
|
FCI/fmri-processing-scripts/identify_bad_fci_blocks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # array 对象
#
# 2.1 ndarray 对象
# 2.1.1 创建 array
# +
import numpy as np
from __future__ import print_function
#print(help(print))
debug = True
#debug = False
def printd(*args):
if debug:
print(args)
a = np.array((1,2,3,4), dtype = np.int16)
b = np.array([5,6,7,8], dtype = np.int32)
c = np.array([[1,2,3,4], [5,6,7,8], [9, 10, 11, 12]])
printd(a.shape, b.shape, c.shape)
c.shape = (4,3) # 按照元素在内存中的存储顺序重新分配行和列
c.shape = (1, -1) # 第二个维度自动推算
d = c.reshape(2,-1) # 使用reshape()方法可以创建指定形状的新数组,而原数组的形状保持不变.需要注意的是c和d会共享存储空间.
printd('c:\n',c,'\nd:\n',d)
# -
# 2.1.2 元素类型
# +
l = [key for key,val in np.typeDict.items() if val is np.float64]
print(l)
s = [0,1,2,3,4,5,6,7,8,9]
l = [i for i in s if i % 2== 0]
l = [(key, val) for key,val in np.typeDict.items()]
print(l)
print(set(np.typeDict.values()))
printd(a.dtype.type, b.dtype.type, c.dtype.type)
n = np.int16(200) # 创建一个16位的符号整数对象
printd(n*n)
v1 = 3.14
v2 = np.float64(v1) # 创建一个16位的符号整数对象
# %timeit v1*v1
# %timeit v2*v2
# 使用 astype() 方法可以对数组的元素类型进行转换
t1 = np.array([1,2,3,4], dtype = np.float)
t2 = np.array([1,2,3,4], dtype = np.complex)
t3 = t1.astype(np.int32)
t4 = t2.astype(np.complex64)
printd(t1.dtype.type, t2.dtype.type, t3.dtype.type, t4.dtype.type)
# -
# 2.1.3 自动生成数组
# (1) arange(), linspace(), logspace()
# logspace()中默认的 base 为 10;
# (2) zeros() ones() empty() full()
# 其中 empty() 只分配数组所使用的内存, 不对数组元素进行初始化操作, 因此其运行速度速度是最快的.
# zeros()将数组初始化为 0;
# ones()将数组初始化为 1;
# full() 将数组指定为特定值;
# (3) zeros_like() ones_like() empty_like() full_like()
# 这些函数创建与参数数组形状和类型相同的数组, 因此, zeros_like(a) 和 zeros(a.shape, a.dtype)的效果相同.
# (4) fromebuffer() fromstring() fromfile()等函数可以从字节序列或文件创建数组.
# fromstring() 会复制字符串的字节序列的一份副本;
# frombuffer() 创建的数组和字符串共享存储空间;
# 由于字符串是只读的(不可变)对象,因此不能对数组元素进行修改.
# (5) fromfunction()
# 可以先定义一个使用下标计算数值的函数,然后用fromfunction()调用此函数创建数组.
# +
# arange(), linspace(), logspace()
a = np.arange(0,1,0.1)
b = np.linspace(0,1,10,endpoint = False)
c = np.logspace(0,2,10, base = 2) # 默认的 base 为 10, 这里重写为 2, 从 10^0 到 10^2
printd(a, '\n', b, '\n', c)
# zeros() ones() empty()
# 其中 empty() 只分配数组所使用的内存, 不对数组元素进行初始化操作, 因此其运行速度速度是最快的.
a = np.empty((2,3), np.int)
b = np.zeros((4,5), np.int)
c = np.ones((3,4), np.int)
d = np.full(4, np.pi) # np.pi 就是 π
printd(a,'\n', b, '\n', c, '\n', d)
# zeros_like() ones_like() empty_like() full_like()
# 等函数创建与参数数组形状和类型相同的数组, 因此, zeros_like(a) 和 zeros(a.shape, a.dtype)的效果相同
# fromebuffer() fromstring() fromfile()等函数可以从字节序列或文件创建数组.
s = "abcdefgh"
s1 = np.fromstring(s, dtype = np.int8) # 创建一个 8 位的整数数组, 每个字符表示一个元素
s2 = np.fromstring(s, dtype = np.int16) # 创建一个 16 位的整数数组, 两个字符表示一个元素
printd(s1, s2)
# fromfunction()
def func(i, j):
return (i+1)*(j+1)
a = np.fromfunction(func, (9,9))
printd(a)
# -
# 2.1.4 存取元素
# (1) 切片存取;
# (2) 整数列表存取, 使用列表中的每个元素作为下标;
# (3) 整数数组存取, 当数组为一维数组时,效果和整数列表相同. 当下标数组为多维数组时. 得到的也是和下标数组形状相同的数组;
# (4) 布尔数组存取, 注意区别于布尔列表.它获得的数组不和原始数组共享数据内存;
# 在numpy 1.10 之后的版本中, 布尔列表会被当做布尔数组.
# 布尔数组一般不是手工产生,而是使用布尔运算函数ufunc()函数产生.
# +
a = np.arange(10)
# 切片存取
print(a[:-1]) # 选取除最后一个元素之外的所有元素
print(a[::2]) # 正向每隔一个元素选择
print(a[::-2]) # 逆向每隔一个元素选择
print(a[4:1:-2]) # 从第五个元素开始切片选择
a[2:4] = 10, 12 # 左闭右开
# 整数列表存取
idx1 = [3,3,1,8]
idx2 = [3,-3,1,8]
print(a[idx1], a[idx2])
# 布尔数组存取
x = np.random.randint(0, 10, 6) # 生成数组长度为 6 , 元素值为 0 到 9 的随机数组.
print(x, x[x > 5])
# -
# python 的广播原理
a = np.arange(0, 60, 10).reshape(-1,1)
b = np.arange(0, 6)
c = a + b
printd(a, "\n", b, "\n", c)
printd(c[1,2], c[(1,2)])
print(c[0, 3:5]) # 第一行, 第四个到第五个元素
print(c[4:, 4:]) # 从第五行和第五列的元素开始到右下角
print(c[2::2, ::2]) # 从第三行开始每隔一行选取,从第一列开始,每隔一列选取.
# 切片对象
# (1) 单独生成切片对象要使用slice()来创建, 三个参数分别为开始值, 结束值, 间隔步长.
# 当这些值需要省略时,可以使用 None.
# 例如 (slice(None, None, None), 2) 和 (:, 2) 相同.
# (2) 使用 s_ 对象来帮助我们创建数组下标, s_ 是 IndexExpression 类的一个对象.
# np.s_[::2, 2:]
#
#
#
np.s_[::2, 2:]
# 结构数组
# ```python
# person_type = np.dtype({
# 'names': ['name', 'age', 'weight'],
# 'formats': ['|S30', '<i4', '<f4']
#
# }, align = True)
# ```
# 这里使用类型字符串定义字段类型:
# - 'S30': 长度为30个字节的字符串类型;
# - 'i': 32 位的整数类型;
# - 'f': 32 位的单精度浮点类型, 相当于np.float32;
# 还可以使用字节顺序字段:
# - '|': 忽视字节顺序;
# - '>': 低位字节在前, 即小端模式(little end);
# - '<': 高位字节在前, 即大端模式(big end);
# (2) a.tostring() 和 a.tofile() 可以将数组 a 以二进制的方式转换成字符串或写入文件.
# (3) %%file 为 IPython 的魔法指令, 它将该单元格中的文本保存成文件 read_srtuct_array.c .
# +
# 使用类型字符串定义字段类型
person_type = np.dtype({
'names': ['name', 'age', 'weight'],
'formats': ['|S30', '<i4', '<f2']
}, align = True)
a = np.array([("zhang", 23, 75.5), ("Wang", 24, 65.2)], dtype=person_type)
print(a.dtype, a)
# 访问字段值
b = a[0]
print(b['name'], b['age'], b['weight'])
b = a[1]
print(b['name'], b['age'], b['weight'])
# 保存到二进制文件
a.tofile("test.bin")
# +
# %%file read_struct_array.c
#include <stdio.h>
struct person{
char name[30];
int age;
float weight;
};
struct person p[3];
void main()
{
FILE *fp = NULL;
int i = 0;
fp = fopen("test.bin", "rb");
fread(p, sizeof(struct person), 2, fp);
fclose(fp);
for(i = 0; i < 2; i++){
printf("%s, %d, %.4f\n", p[i].name, p[i].age, p[i].weight);
}
}
# -
# ## ufunc 函数
# ufunc 是 universal function 函数的缩写, 它是一种能对数组的每个元素进行运算的函数.
# 1. np.sin() 函数
# np.sin() 是一个 ufunc 函数, 可以通过 out 参数来指定计算结果的保存变量.
# 对于数组, np.sin() 函数比 math.sin() 函数快 10 倍多.
# 对于单个数值, np.sin() 的计算速度只有 math.sin() 的 1/6.
# math.sin() 返回的是标准的 float 类型; 而 np.sin() 的返回值是 np.float64.
# 通过使用数组的 item() 方法可以直接以标准的 python 数值类型返回数组中的单个元素.
# 2. np.<op>.reduce(array, axis = 0, dtype = None)
# 沿着参数 axis 指定的轴对数组进行操作,相当于将 <op> 运算符插入到沿着 axis 轴的所有元素之间.
# 3. accumulate()
# 和 reduce() 类似, 只是它返回的数组和输入数组的形状相同,保存所有的中间计算结果.
# 4. outer()
# 是通过广播的方式计算出来的.
#
# +
# sin()
import math
x = np.linspace(0, 2*np.pi, 10)
# %time y = np.sin(x)
# %time y = [math.sin(t) for t in x]
print(x)
y is x
print(x.item(1), type(x.item(1)), type(x[1]))
## reduce()
x = np.linspace(0, 9, 10)
y = np.array([[1,2,3], [4,5,6]], dtype = np.int)
r1 = np.add.reduce(x)
r2 = np.add.reduce(y, axis = 1) #(1+2+3), (4+5+6)
print(r1, "\n", r2)
## accumulate
r1 = np.add.accumulate(x)
r2 = np.add.accumulate(y, axis = 1) #(1+2+3), (4+5+6)
print(r1, "\n", r2)
# -
# ## 四则运算
#
# ### 1. ufunc 函数
#
# np.add() # np.add(a, b, a) => a += b
# np.substract()
# np.multiply()
# np.divide()
# np.true_divide()
# np.floor_divide()
# np.negtive()
# np.power()
# np.remainder() = np.mod() 取模.
x = np.linspace(0, 9, 10)
print(x, np.power(x,x))
print(np.remainder(x,x))
# ## 布尔数组作为下标
#
# 当使用布尔数组直接作为下标对象或者元组下标对象中有布尔数组时, 都相当于用 nonzero()将布尔数组转换为一组整数数组, 然后使用整数数组进行下标运算.
#
# nonzero(a) 返回 a 中值不为 0 的元素的下标, 他的返回值是一个长度为 a.ndim(数组 a 的轴的个数)的元组. 元组的每一个元素都是一个整数数组, 其值为非零元素的下标在对应轴上的值.
#
# 因此, 当布尔数组直接作为下标时, 相当于使用由 nonzero()转换之后的元组作为下标对象. 对于下标对象是元组并且其中有布尔数组时, 相当于将布尔数组展开为由 nonzero() 转换之后的各个整数数组.
b1 = np.array([True, False, True, False])
np.nonzero(b1)
print(type(np.nonzero(b1)[0]))
# +
# b[0][0], b[2][0], b[1][0] 这三个元素不为 0
b2 = np.array([[True, False, True], [True, True, False]])
np.nonzero(b2)
a = np.arange(2*3).reshape(2,3)
print(a)
print(a[b2])
print(a[np.nonzero(b2)])
# 区分 np.nonzero(a) 和 np.nonzero(b2) 之间的区别
print(a[np.nonzero(a)])
# -
|
develop_language/python/doc/science_compute/ipynb_files/chapter2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp core
# -
# # Core
#
# > API details.
#hide
from nbdev.showdoc import *
# export
from attrdict import AttrDict
from fastcore.basics import Path
import subprocess
# ## Running bash commands in Python
# export
def run_bash(cmd, return_output=True):
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if not error:
print(output.decode('utf-8'))
if return_output:
return output.decode('utf-8')
else:
print(error.decode('utf-8'))
if return_output:
return error.decoe('utf-8')
out = run_bash('ls')
# ## Setting up in colab
#
# If you're in colab, you may not have the proper packages installed.
# Running this function will set you up to work in colab.
# export
def colab_setup():
"""
Sets up for development in Google Colab.
Repo must be cloned in drive/colab/ directory.
"""
try:
from google.colab import drive
print('Running in colab')
drive.mount('/content/drive', force_remount=True)
_ = run_bash("pip install -Uqq nbdev")
import os
os.chdir('/content/drive/MyDrive/colab/dessiccate/')
print("Working in", os.getcwd())
_ = run_bash('pip install -e . --quiet')
except:
import os
print("Working in", os.getcwd())
print('Running locally')
colab_setup()
# ## Path
#
# Often, you want to create a new directory.
# Even if all you have is a file path, you can now call `mkdir_if_not_exists` to create the parent directory.
# +
# export
def mkdir_if_not_exists(self, parents=True):
"""
Creates the directory of the path if ot doesn't exist.
If the path is a file, will not make the file itself,
but will create the parent directory.
"""
if path.is_dir():
p = path
else:
p = path.parent
if not p.exists():
p.mkdir(parents=parents)
Path.mkdir_if_not_exists = mkdir_if_not_exists
# -
path = Path('testdir/test.txt')
assert not path.exists()
path.mkdir_if_not_exists()
assert not path.exists()
assert path.parent.exists()
path.parent.rmdir()
assert not path.parent.exists()
|
00_core.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DataFrames
#
# DataFrames are the workhorse of pandas and are directly inspired by the R programming language. We can think of a DataFrame as a bunch of Series objects put together to share the same index. Let's use pandas to explore this topic!
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5,4),index='A B C D E'.split(),columns='W X Y Z'.split())
df
# ## Selection and Indexing
#
# Let's learn the various methods to grab data from a DataFrame
df['W']
# Pass a list of column names
df[['W','Z']]
# SQL Syntax (NOT RECOMMENDED!)
df.W
# DataFrame Columns are just Series
type(df['W'])
# **Creating a new column:**
df['new'] = df['W'] + df['Y']
df
# ** Removing Columns**
df.drop('new',axis=1)
# Not inplace unless specified!
df
df.drop('new',axis=1,inplace=True)
df
# Can also drop rows this way:
df.drop('E',axis=0)
# ** Selecting Rows**
df.loc['A']
# Or select based off of position instead of label
df.iloc[2]
# ** Selecting subset of rows and columns **
df.loc['B','Y']
df.loc[['A','B'],['W','Y']]
# ### Conditional Selection
#
# An important feature of pandas is conditional selection using bracket notation, very similar to numpy:
df
df>0
df[df>0]
df[df['W']>0]
df[df['W']>0]['Y']
df[df['W']>0][['Y','X']]
# For two conditions you can use | and & with parenthesis:
df[(df['W']>0) & (df['Y'] > 1)]
# ## More Index Details
#
# Let's discuss some more features of indexing, including resetting the index or setting it something else. We'll also talk about index hierarchy!
df
# Reset to default 0,1...n index
df.reset_index()
newind = 'CA NY WY OR CO'.split()
df['States'] = newind
df
df.set_index('States')
df
df.set_index('States',inplace=True)
df
# ## Multi-Index and Index Hierarchy
#
# Let us go over how to work with Multi-Index, first we'll create a quick example of what a Multi-Indexed DataFrame would look like:
# Index Levels
outside = ['G1','G1','G1','G2','G2','G2']
inside = [1,2,3,1,2,3]
hier_index = list(zip(outside,inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index
outside
df = pd.DataFrame(np.random.randn(6,2),index=hier_index,columns=['A','B'])
df
# Now let's show how to index this! For index hierarchy we use df.loc[], if this was on the columns axis, you would just use normal bracket notation df[]. Calling one level of the index returns the sub-dataframe:
df.loc['G1']
df.loc['G1'].loc[1]
df.index.names
df.index.names = ['Group','Num']
df
df.xs('G1')
df.xs(['G1',1])
df.xs(1,level='Num')
# # Great Job!
|
Day3&4/Python-Part1/Pandas/DataFrames.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Automated Machine Learning
#
# _**Forecasting with grouping using Pipelines**_
#
# ## Contents
#
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 3. [Data](#Data)
# 4. [Compute](#Compute)
# 4. [AutoMLConfig](#AutoMLConfig)
# 5. [Pipeline](#Pipeline)
# 5. [Train](#Train)
# 6. [Test](#Test)
#
#
# ## Introduction
# In this example we use Automated ML and Pipelines to train, select, and operationalize forecasting models for multiple time-series.
#
# If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first if you haven't already to establish your connection to the AzureML Workspace.
#
# In this notebook you will learn how to:
#
# * Create an Experiment in an existing Workspace.
# * Configure AutoML using AutoMLConfig.
# * Use our helper script to generate pipeline steps to split, train, and deploy the models.
# * Explore the results.
# * Test the models.
#
# It is advised you ensure your cluster has at least one node per group.
#
# An Enterprise workspace is required for this notebook. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade)
#
# ## Setup
# As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
# +
import json
import logging
import warnings
import numpy as np
import pandas as pd
import azureml.core
from azureml.core.workspace import Workspace
from azureml.core.experiment import Experiment
from azureml.train.automl import AutoMLConfig
# -
# Accessing the Azure ML workspace requires authentication with Azure.
#
# The default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.
#
# If you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:
# ```
# from azureml.core.authentication import InteractiveLoginAuthentication
# auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')
# ws = Workspace.from_config(auth = auth)
# ```
# If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:
# ```
# from azureml.core.authentication import ServicePrincipalAuthentication
# auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', '<PASSWORD>')
# ws = Workspace.from_config(auth = auth)
# ```
# For more details, see aka.ms/aml-notebook-auth
# +
ws = Workspace.from_config()
ds = ws.get_default_datastore()
# choose a name for the run history container in the workspace
experiment_name = 'automl-grouping-oj'
# project folder
project_folder = './sample_projects/{}'.format(experiment_name)
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Run History Name'] = experiment_name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
# -
# ## Data
# Upload data to your default datastore and then load it as a `TabularDataset`
from azureml.core.dataset import Dataset
# upload data to your default datastore
ds = ws.get_default_datastore()
ds.upload(src_dir='./data', target_path='groupdata', overwrite=True, show_progress=True)
# +
# load data from your datastore
data = Dataset.Tabular.from_delimited_files(path=ds.path('groupdata/dominicks_OJ_2_5_8_train.csv'))
data_test = Dataset.Tabular.from_delimited_files(path=ds.path('groupdata/dominicks_OJ_2_5_8_test.csv'))
data.take(5).to_pandas_dataframe()
# -
# ## Compute
#
# #### Create or Attach existing AmlCompute
#
# You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
# #### Creation of AmlCompute takes approximately 5 minutes.
# If the AmlCompute with that name is already in your workspace this code will skip the creation process.
# As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota.
# +
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Choose a name for your cluster.
amlcompute_cluster_name = "cpu-cluster-11"
found = False
# Check if this compute target already exists in the workspace.
cts = ws.compute_targets
if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute_target = cts[amlcompute_cluster_name]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2", # for GPU, use "STANDARD_NC6"
#vm_priority = 'lowpriority', # optional
max_nodes = 6)
# Create the cluster.
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)
print('Checking cluster status...')
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min_node_count is provided, it will use the scale settings for the cluster.
compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# For a more detailed view of current AmlCompute status, use get_status().
# -
# ## AutoMLConfig
# #### Create a base AutoMLConfig
# This configuration will be used for all the groups in the pipeline.
target_column = 'Quantity'
time_column_name = 'WeekStarting'
grain_column_names = ['Brand']
group_column_names = ['Store']
max_horizon = 20
automl_settings = {
"iteration_timeout_minutes" : 5,
"experiment_timeout_minutes" : 15,
"primary_metric" : 'normalized_mean_absolute_error',
"time_column_name": time_column_name,
"grain_column_names": grain_column_names,
"max_horizon": max_horizon,
"drop_column_names": ['logQuantity'],
"max_concurrent_iterations": 2,
"max_cores_per_iteration": -1
}
base_configuration = AutoMLConfig(task = 'forecasting',
path = project_folder,
n_cross_validations=3,
**automl_settings
)
# ## Pipeline
# We've written a script to generate the individual pipeline steps used to create each automl step. Calling this script will return a list of PipelineSteps that will train multiple groups concurrently and then deploy these models.
#
# This step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade).
#
# ### Call the method to build pipeline steps
#
# `build_pipeline_steps()` takes as input:
# * **automlconfig**: This is the configuration used for every automl step
# * **df**: This is the dataset to be used for training
# * **target_column**: This is the target column of the dataset
# * **compute_target**: The compute to be used for training
# * **deploy**: The option on to deploy the models after training, if set to true an extra step will be added to deploy a webservice with all the models (default is `True`)
# * **service_name**: The service name for the model query endpoint
# * **time_column_name**: The time column of the data
# +
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
service_name = 'grouped-model'
try:
# if you want to get existing service below is the command
# since aci name needs to be unique in subscription deleting existing aci if any
# we use aci_service_name to create azure aci
service = Webservice(ws, name=service_name)
if service:
service.delete()
except WebserviceException as e:
pass
# +
from build import build_pipeline_steps
steps = build_pipeline_steps(
base_configuration,
data,
target_column,
compute_target,
group_column_names=group_column_names,
deploy=True,
service_name=service_name,
time_column_name=time_column_name
)
# -
# ## Train
# Use the list of steps generated from above to build the pipeline and submit it to your compute for remote training.
# +
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(
description="A pipeline with one model per data group using Automated ML.",
workspace=ws,
steps=steps)
pipeline_run = experiment.submit(pipeline)
# -
from azureml.widgets import RunDetails
RunDetails(pipeline_run).show()
pipeline_run.wait_for_completion(show_output=False)
# ## Test
#
# Now we can use the holdout set to test our models and ensure our web-service is running as expected.
from azureml.core.webservice import AciWebservice
service = AciWebservice(ws, service_name)
X_test = data_test.to_pandas_dataframe()
# Drop the column we are trying to predict (target column)
x_pred = X_test.drop(target_column, inplace=False, axis=1)
x_pred.head()
# Get Predictions
test_sample = X_test.drop(target_column, inplace=False, axis=1).to_json()
predictions = service.run(input_data=test_sample)
print(predictions)
# Convert predictions from JSON to DataFrame
pred_dict =json.loads(predictions)
X_pred = pd.read_json(pred_dict['predictions'])
X_pred.head()
# +
# Fix the index
PRED = 'pred_target'
X_pred[time_column_name] = pd.to_datetime(X_pred[time_column_name], unit='ms')
X_pred.set_index([time_column_name] + grain_column_names, inplace=True, drop=True)
X_pred.rename({'_automl_target_col': PRED}, inplace=True, axis=1)
# Drop all but the target column and index
X_pred.drop(list(set(X_pred.columns.values).difference({PRED})), axis=1, inplace=True)
# -
X_test[time_column_name] = pd.to_datetime(X_test[time_column_name])
X_test.set_index([time_column_name] + grain_column_names, inplace=True, drop=True)
# Merge predictions with raw features
pred_test = X_test.merge(X_pred, left_index=True, right_index=True)
pred_test.head()
# +
from sklearn.metrics import mean_absolute_error, mean_squared_error
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
APE = 100*np.abs((actual_safe - pred_safe)/actual_safe)
return np.mean(APE)
def get_metrics(actuals, preds):
return pd.Series(
{
"RMSE": np.sqrt(mean_squared_error(actuals, preds)),
"NormRMSE": np.sqrt(mean_squared_error(actuals, preds))/np.abs(actuals.max()-actuals.min()),
"MAE": mean_absolute_error(actuals, preds),
"MAPE": MAPE(actuals, preds)},
)
# -
get_metrics(pred_test[PRED].values, pred_test[target_column].values)
|
how-to-use-azureml/automated-machine-learning/forecasting-grouping/auto-ml-forecasting-grouping.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="bc0bc872-20c1-4f39-891c-e1ea6eeebf3a" _uuid="3a43324855ed8243dfe5181008325402bfadf2b4"
# This exists to test the learntools implementation of the exercise defined in ex3.py
from learntools.core import binder
binder.bind(globals())
from learntools.python.ex3 import *
# + [markdown] _cell_guid="fb04726c-7f68-4527-a4ff-6a0f1de0f31c" _uuid="67f747ce4d929a0713af0d02ee23bd7f03fa7be9"
# # Exercises
#
#
#
# -
# ## 1.
#
# Many programming languages have [sign](https://en.wikipedia.org/wiki/Sign_function) available as a built-in function. Python doesn't, but we can define our own!
#
# In the cell below, define a function called `sign` which takes a numerical argument and returns -1 if it's negative, 1 if it's positive, and 0 if it's 0.
# +
# Your code goes here. Define a function called 'sign'
q1.check()
# +
# Your code goes here. Define a function called 'sign'
def sign(x):
if x == 0:
return 0
elif x > 0:
return 1
else:
return -1
q1.check()
# -
q1.hint()
q1.solution()
# ## 2.
#
# We've decided to add some simple logging to our `to_smash` function from yesterday's exercise.
# +
def to_smash(total_candies):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
print("Splitting", total_candies, "candies")
return total_candies % 3
to_smash(91)
# -
# But wait, what happens if we call it with `total_candies = 1`?
to_smash(1)
# How embarrassing!
#
# Modify the definition in the cell below to correct the grammar of our print statement. (If there's only one candy, we should use the singular "candy" instead of the plural "candies")
#
# **Bonus**: * Can you fix the function without increasing the number of lines of code?
# +
def to_smash(total_candies):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
if total_candies == 1:
print("Splitting 1 candy")
else:
print("Splitting", total_candies, "candies")
return total_candies % 3
to_smash(91)
to_smash(1)
# +
def to_smash(total_candies):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
print("Splitting", total_candies, "candy" if total_candies == 1 else "candies")
return total_candies % 3
to_smash(91)
to_smash(1)
# -
q2.hint()
q2.solution()
# ## 3. *
#
# In the main lesson we talked about deciding whether we're prepared for the weather. I said that I'm safe from today's weather if...
# - I have an umbrella...
# - or if the rain isn't too heavy and I have a hood...
# - otherwise, I'm still fine unless it's raining *and* it's a workday
#
# The function below uses our first attempt at turning this logic into a Python expression. I claimed that there was a bug in that code. Can you find it?
#
# To prove that `prepared_for_weather` is buggy, come up with a set of inputs where it returns the wrong answer.
# + _cell_guid="30dc347b-c820-4cf8-bf4a-2d310e3efb35" _uuid="cb4589bb1552f33e578457a86cc014841807364f"
def prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday):
# Don't change this code. Our goal is just to find the bug, not fix it!
return have_umbrella or rain_level < 5 and have_hood or not rain_level > 0 and is_workday
# Change the values of these inputs so they represent a case where prepared_for_weather
# returns the wrong answer.
have_umbrella = True
rain_level = 0.0
have_hood = True
is_workday = True
# Check what the function returns given the current values of the variables above
actual = prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday)
print(actual)
q3.check()
# +
def prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday):
# Don't change this code. Our goal is just to find the bug, not fix it!
return have_umbrella or rain_level < 5 and have_hood or not rain_level > 0 and is_workday
# Change the values of these inputs so they represent a case where prepared_for_weather
# returns the wrong answer.
have_umbrella = False
rain_level = 5.0
have_hood = True
is_workday = False
# Check what the function returns given the current values of the variables above
actual = prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday)
print(actual)
q3.check()
# + _cell_guid="ed71b22d-e81e-4ccc-83e8-0658ebce4580" _uuid="374d3001b55e4e3f2645641ddd61e70817fb943d"
q3.hint()
# -
q3.solution()
# + [markdown] _cell_guid="efb7dff3-e69f-4569-856b-a9052dcb1a24" _uuid="b0fed3fb9730d2079a1c77a7dad14de9b7d42129"
# ## 4.
#
# The function `is_negative` below is implemented correctly - it returns True if the given number is negative and False otherwise.
#
# However, it's more verbose than it needs to be. We can actually reduce the number of lines of code in this function by *75%* while keeping the same behaviour.
#
# See if you can come up with an equivalent body that uses just **one line** of code, and put it in the function `concise_is_negative`.
# + _cell_guid="0f83686b-132f-4faa-9f6c-d0e788c7e829" _uuid="a160b0a301c6ef13745fff22e15cec8a959cfd16"
def is_negative(number):
if number < 0:
return True
else:
return False
def concise_is_negative(number):
return True if number < 0 else False
q4.check()
# + _cell_guid="fdb3be14-30f7-493f-99da-33e36ad5fb7f" _uuid="066fabb3474f89218e1b188963a6fe226b477cd7"
q4.hint()
q4.solution()
# + [markdown] _cell_guid="2f1a1ba7-c560-476e-aea8-cc1c9fb9dbe8" _uuid="372b160a41585d1297048488347bd5350a6250c9"
# ## 5.
#
# The boolean variables `ketchup`, `mustard` and `onion` represent whether a customer wants a particular topping on their hot dog. We want to implement a number of boolean functions that correspond to some yes-or-no questions about the customer's order. For example:
# + _cell_guid="bdddb297-eb61-4d83-b49d-de3cc0fec9b3" _uuid="0d5f9904c03e2bb5f0a843ec1c715909dd078a6b"
def onionless(ketchup, mustard, onion):
"""Return whether the customer doesn't want onions.
"""
return not onion
# + [markdown] _cell_guid="95ad8530-cb19-4a1d-adfa-ecc383467818" _uuid="bedb8dd9ecca30e69ad76d280e140e9a0b4cfb8b"
# For each of the remaining functions, fill in the body to match the English description in the docstring.
# + _cell_guid="d1f47be9-d476-454d-8ba2-fe5bde676ed3" _uuid="51a6f7a13bafac75670fd5bab56e2af0834996c8"
def wants_all_toppings(ketchup, mustard, onion):
"""Return whether the customer wants "the works" (all 3 toppings)
"""
pass
q5.a.check()
# +
def wants_all_toppings(ketchup, mustard, onion):
"""Return whether the customer wants "the works" (all 3 toppings)
"""
return ketchup and mustard
q5.a.check()
# -
q5.a.hint()
q5.a.solution()
# + _cell_guid="edb075f3-61ec-4c45-8f50-d5f94809b4a8" _uuid="13a2171d08a1ed65251802271dcb5421f3b7c833"
def wants_plain_hotdog(ketchup, mustard, onion):
"""Return whether the customer wants a plain hot dog with no toppings.
"""
pass
q5.b.check()
# +
def wants_plain_hotdog(ketchup, mustard, onion):
"""Return whether the customer wants a plain hot dog with no toppings.
"""
return not ketchup and not mustard
q5.b.check()
# +
def wants_plain_hotdog(ketchup, mustard, onion):
"""Return whether the customer wants a plain hot dog with no toppings.
"""
return not ketchup and not mustard and not onion
q5.b.check()
# -
q5.b.hint()
q5.b.solution()
# + _cell_guid="1cd23cdb-d656-4b55-92f8-b342f285d796" _uuid="f0bae57d7b158f9cbec73f1954d1993a02fc68fb"
def exactly_one_sauce(ketchup, mustard, onion):
"""Return whether the customer wants either ketchup or mustard, but not both.
(You may be familiar with this operation under the name "exclusive or")
"""
pass
q5.c.check()
# -
q5.c.hint()
q5.c.solution()
# +
def exactly_one_sauce(ketchup, mustard, onion):
"""Return whether the customer wants either ketchup or mustard, but not both.
(You may be familiar with this operation under the name "exclusive or")
"""
return (ketchup + mustard) == 1
q5.c.check()
# + [markdown] _cell_guid="bec383c3-90de-4038-b53e-34cd974b1a17" _uuid="2d3dbe30733bf5593a4a3a19574588a50da8b7c8"
# ## 6. *
#
# We’ve seen that calling `bool()` on an integer returns `False` if it’s equal to 0 and True otherwise. What happens if we call `int()` on a bool? Try it out in the console or a new code cell. **TODO: Actually, no, we haven't seen that.**
#
# Can you take advantage of this to write a succinct function that corresponds to the English sentence "does the customer want exactly one topping?"?
# + _cell_guid="41472367-0095-48ed-953b-9d8a77d579b1" _uuid="8dd3ecea1d5d44eb847f963d310398c285ce1c31"
def exactly_one_topping(ketchup, mustard, onion):
"""Return whether the customer wants exactly one of the three available toppings
on their hot dog.
"""
pass
q6.check()
# + _cell_guid="202a54bb-e0ae-48cc-b93f-fcfc56198ff1" _uuid="4a236006ae92f6ac81fbd79c37025af3cc3448ec"
q6.hint()
q6.solution()
# +
def exactly_one_topping(ketchup, mustard, onion):
"""Return whether the customer wants exactly one of the three available toppings
on their hot dog.
"""
return (ketchup + mustard + onion) == 1
q6.check()
# + [markdown] _cell_guid="0d67a127-339f-497d-a079-81a0ed474c9d" _uuid="212c67201cebf9c173064d8adf88231a0496c808"
# ## 7. **
#
# In this problem we'll be working with a simplified version of [blackjack] (aka twenty-one). In this version there is one player (who you'll control) and a dealer. Play proceeds as follows:
#
# - The player is dealt two face-up cards. The dealer is dealt one face-up card.
# - The player may ask to be dealt another card ('hit') as many times as they wish. If the sum of their cards (counting Aces as 1 and Jack/Queen/King as 10) exceeds 21, they lose the round immediately.
# - The dealer then deals additional cards to himself until one of the following happens:
# - the sum of the dealer's cards exceeds 21, in which case the player wins the round
# - the sum of the dealer's cards is greater than or equal to the player's total (without exceeding 21), in which case the player loses.
#
# For this problem, you'll write a function representing the player's decision-making strategy in this game. We've provided a very unintelligent implementation below:
# + _cell_guid="c8b2cb95-ed07-4435-9e19-d4b7e1595816" _uuid="a15f4d15d5807da3c1f32bd81c4a3cfef00ff610"
def should_hit(player_total, dealer_total, player_aces):
"""Return True if the player should hit (request another card) given the current game
state, or False if the player should stay.
"""
return False
# + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This very conservative agent *always* sticks with the hand of two cards that they're dealt.
#
# We'll be simulating games between your player agent and our own dealer agent by calling your function.
#
# Try running the function below to see an example of a simulated game:
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
q7.simulate_one_game()
# -
# The real test of your agent's mettle is their average win rate over many games. Try calling the function below to simulate 1000 games of blackjack:
q7.simulate(n_games=1000)
# Our dumb agent that completely ignores the game state still manages to win every once in a while! A stopped clock is right twice a day.
#
# Try adding some more smarts to the `should_hit` function and see how high you can get your win rate.
# +
def should_hit(player_total, dealer_total, player_aces):
"""Return True if the player should hit (request another card) given the current game
state, or False if the player should stay.
"""
return False
q7.simulate(n_games=10000)
# +
def should_hit(player_total, dealer_total, player_aces):
"""Return True if the player should hit (request another card) given the current game
state, or False if the player should stay.
"""
return player_total < 12
q7.simulate(n_games=100000)
# +
def should_hit(player_total, dealer_total, player_aces):
"""Return True if the player should hit (request another card) given the current game
state, or False if the player should stay.
"""
return player_total < 12 or (player_total < 17 and player_aces > 0)
q7.simulate(n_games=100000)
|
learntools/python/nbs/ch3-testing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# ## Prism 是甚麼?
# 在[Prism 6 的首頁](https://github.com/PrismLibrary/Prism/blob/master/README.md)提到:
# ```
# Prism is a framework for building loosely coupled, maintainable, and testable XAML applications in WPF, Windows 10 UWP, and Xamarin Forms. Separate releases are available for each platform and those will be developed on independent timelines. Prism provides an implementation of a collection of design patterns that are helpful in writing well-structured and maintainable XAML applications, including MVVM, dependency injection, commands, EventAggregator, and others. Prism's core functionality is a shared code base in a Portable Class Library targeting these platforms. Those things that need to be platform specific are implemented in the respective libraries for the target platform. Prism also provides great integration of these patterns with the target platform. For example, Prism for UWP and Xamarin Forms allows you to use an abstraction for navigation that is unit testable, but that layers on top of the platform concepts and APIs for navigation so that you can fully leverage what the platform itself has to offer, but done in the MVVM way.
# ```
# 看起來,實在非常吸引人。
# ## 學習曲線陡峭
# 但要利用Prism須先了解一大堆概念。這些概念,不管Microsoft網站上的[Prism 5說明文件](https://msdn.microsoft.com/en-us/library/gg406140.aspx),或是[Prism 6 說明文件](https://github.com/PrismLibrary/Prism/blob/master/Documentation/README.md),都很龐大、嚇人。雖然有一些樣本程式,如[WPF樣本](https://github.com/PrismLibrary/Prism-Samples-Wpf)、[Windows樣本](https://github.com/PrismLibrary/Prism-Samples-Windows)、[Forms樣本](https://github.com/PrismLibrary/Prism-Samples-Forms)可供參考,但每一樣本程式都牽涉到很多周邊,讓人很難快速了解體會。
#
# ## 曙光
# 最近Prism 6的管理者提供了可在Visual Studio上使用的樣板,所謂的Prism Template Pack,大大的降低學習曲線的坡度。
# 該樣板放在Visual Studio Gallery裡,在Visual Studio中,可在Visual Studio -> Tools -> Extensions and Updates --> Online --> Visual Studio Gallery中搜尋Prism Template Pack的方式下載安裝。
#
# 該套件包含:
# * Snippets:
# * propp - Property, with a backing field, that depends on BindableBase
# * cmd - Creates a DelegateCommand property with private setter
# * cmdg - Creates a generic DelegateCommand<T> property
# * ItemTemplates
# * Cross Platform
# * Prism ViewModel - A ViewModel that derives from BindableBase and has a default constructor.
# * WPF
# * Prism UserControl - UserControl with ViewModelLocator
# * Prism Window - Window with ViewModelLocator
# * Xamarin.Forms
# * Prism ContentPage - ContentPage with ViewModelLocator
# * Prism NavigationPage - NavigationPage with ViewModelLocator
# * Prism MasterDetailPage - MasterDetailPage with ViewModelLocator
# * Prism TabbedPage - TabbedPage with ViewModelLocator
# * Prism CarouselPage - CarouselPage with ViewModelLocator
# * Project Templates
# * WPF
# * Prism Unity App - A project template for building WPF applications with Prism and Unity.
# * Prism Module - A project to add a module to a WPF Prism application.
# * Xamarin.Forms
# * Prism Unity App - XF application that uses Unity as it's container.
#
# ## 本學習系列的目的
# 希望用範例的方法,倚靠Prism Template Pack,大幅減少使用者的苦工,一步一步的了解Prism的概念與使用方法。要跟隨本系列,需先安裝曙光一節中所提到的
# Prism Template Pack。
#
# ## Unity 容器初體驗
# Unity 容器是Prism程式的頭,整個程式由Unity綜合管理。
#
# ### UnityFirstExample,一個用Unity容器的空白程式
# 這個例子介紹了說明文件中的Shell(主畫面)、Region等概念與命名規矩。
#
# #### 步驟一
# Visual Studio --> File --> New --> Project > Installed > Templates > Visual C# > Prism > WPF 中,選取 Prism Unity App(WPF),
# Name欄位填上 UnityFirstExample -> OK
# #### 步驟二
# 按F5執行程式,出現一個空白的WPF程式畫面,大功告成。
# ### 說明
# #### App.xaml.cs
# ```
# var bootstrapper = new Bootstrapper();
# bootstrapper.Run();
# ```
# * 用 `var bootstrapper = new Bootstrapper();` 起始Unity容器。
# * 用 `bootstrapper.Run();` 開始執行程式。
#
# #### Bootstrapper.cs
# ```
# protected override DependencyObject CreateShell()
# {
# return Container.Resolve<MainWindow>();
# }
#
# protected override void InitializeShell()
# {
# Application.Current.MainWindow.Show();
# }
# ```
# * Unity 容器用`Container.Resolve<MainWindow>();`找到所需的主畫面(MainWindow)
# * Unity 容器用`Application.Current.MainWindow.Show();`開始顯示主畫面
#
# #### MainWindow.xaml
# ```
# xmlns:prism="http://prismlibrary.com/"
# prism:ViewModelLocator.AutoWireViewModel="True"
# ...
# <ContentControl prism:RegionManager.RegionName="ContentRegion" />
# ```
# * 用`<ContentControl prism:RegionManager.RegionName="ContentRegion" />`定義了一個Region,取名ContentRegion,供以後的模組(Module)使用。
# * 用
# ```
# xmlns:prism="http://prismlibrary.com/"
# prism:ViewModelLocator.AutoWireViewModel="True"
# ```
# 準備做View與ViewModel的對應(詳下面的命名規矩)。
#
# #### Unity 容器的命名規矩
# 在MVVM模式的程式中,View與ViewModel的對應絕對是一件大事。
#
# 在Unity 容器中,View與ViewModel的對應可以只靠命名規矩。在本例中,MainWindow與MainWindowsViewModel單純的靠命名規矩來對應。
#
# 要用命名規矩來對應,有兩個要件
# * 在 View 的 xaml檔(本例是MainWindow.xaml)裡,要有下面兩行來向容器聲明要採用命名規矩來對應
# ```
# xmlns:prism="http://prismlibrary.com/"
# prism:ViewModelLocator.AutoWireViewModel="True"
# ```
# * View 與 ViewModel 用下面規矩來名命。
# * 假如View的名字是 Views.*Blog*(本例是Views.*MainVindow*),則對應ViewModel的名字是ViewModels.*Blog*ViewModel(本例是Views.*MainVindow*ViewModel),其中*Blog*是自己選定的部分(本例是*MainWindow*)。這裡的Views.Blog中的Views是命名空間(namespace),ViewModels.BlogViewModel裡的ViewModels也是命名空間。
# * 假如View的名字結尾是View,如 Views.*BlogView*,則對應ViewModel的名字是ViewModels.*Blog*ViewModel,其中BlogView是自己選定的部分。
# * 可以從本例裡,MainWindow.xaml落在UnityFirstExample\Views裡,MainWindowsViewModel.cs落在UnityFirstExample\ViewModelss裡,得到體驗。
#
#
#
#
|
Shell-Prism6-Tutorial-1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building 1D Rydberg Crystals
# The following notebook shows a study of many-body dynamics on a 1D system. It is based on [1707.04344](https://arxiv.org/abs/1707.04344). The authors of that paper studied the preparation of symmetry-breaking states in antiferromagnetic Ising chains, by tuning the interaction and driving parameters accross the phase diagram. In this notebook, we reproduce some results of this paper. Since this is a particular experiment not based on certified devices, we will use the `MockDevice` class to allow for a wide range of configuration settings.
# +
import numpy as np
import matplotlib.pyplot as plt
import qutip
from pulser import Pulse, Sequence, Register
from pulser.simulation import Simulation
from pulser.waveforms import CompositeWaveform, RampWaveform, ConstantWaveform
from pulser.devices import MockDevice
# -
# ## 1. Rydberg Blockade at Resonant Driving
# We first consider clusters of $1, 2$ and $3$ atoms under resonant ($\delta = 0$) driving. If all the atoms are placed within each other's blockade volume, only one excitation per group will be possible at a time. The Rabi frequency will be enhanced by $\sqrt{N}$
def occupation(reg, j):
r = qutip.basis(2,0)
N = len(reg.qubits)
prod = [qutip.qeye(2) for _ in range(N)]
prod[j] = r*r.dag()
return qutip.tensor(prod)
# Given a value of the maximum Rabi Frequency applied to the atoms, we can calculate the corresponding blockade radius using the `rydberg_blockade_radius()` method from `MockDevice`. We use this to arrange clusters of atoms which will experience this blockade effect:
# +
Omega_max = 2 * 2*np.pi
R_blockade = MockDevice.rydberg_blockade_radius(Omega_max)
print(f'Blockade Radius is: {R_blockade}µm.')
groups = 3
def blockade_cluster(N):
atom_coords = [((R_blockade/N)*x+40*group,0) for group in range(groups) for x in range(1,N+1)]
reg = Register.from_coordinates(atom_coords, prefix='q')
reg.draw(blockade_radius=R_blockade, draw_half_radius=True)
resonant_pulse = Pulse.ConstantPulse(1500, Omega_max, 0., 0.)
seq = Sequence(reg, MockDevice)
seq.declare_channel('ising', 'rydberg_global')
seq.add(resonant_pulse, 'ising')
simul = Simulation(seq, sampling_rate=.7)
obs = [sum(occupation(reg, j) for j in range(i,i+N)) for i in range(0,groups*N,N)]
res = simul.run(progress_bar=True, method='bdf')
return res.expect(obs)
# -
# Next we run `blockade_cluster(N)`, which runs the simulation, for clusters of sizes $N \in \{1,2,3\}$:
data = [blockade_cluster(N) for N in [1,2,3]]
# We now plot the probability that a Rydberg state withing the cluster is occupied (by summing the expectation values of the $|r\rangle\langle r|_i$ operators for each cluster) as it evolves in time, revealing the Rabi frequency of each configuration:
for N, expectation in enumerate(data):
plt.xlabel('Time (µs)', fontsize=14)
plt.ylabel('Probability of Rydberg state', fontsize=14)
plt.title(f'Atoms per cluster N = {N+1}', fontsize = 15)
avg = sum(expectation)/groups
plt.plot(np.arange(len(avg))/1000, avg)
plt.show()
# Only one excitation will be shared between the atoms on each cluster. Notice how the Rabi frequency increases by a factor of $\sqrt{N}$
# ## 2. Ordered Crystalline phases
# The pulse sequence that we will prepare is based on the following parameters:
# Parameters in rad/µs and ns
delta_0 = -6 * 2*np.pi
delta_f = 10 * 2*np.pi
Omega_max = 2 * 2*np.pi
t_rise = 500
t_stop = 4500
t_end = 5000
# We calculate the blockade radius from the maximal applied Rabi frequency:
# +
R_blockade = MockDevice.rydberg_blockade_radius(Omega_max)
a = 7.
reg = Register.rectangle(1,11, spacing=a, prefix='q')
print(f'Blockade Radius is: {R_blockade}µm.')
reg.draw(blockade_radius=R_blockade, draw_half_radius=True)
# -
# Create the pulses using Pulser objects:
hold = ConstantWaveform(t_rise, delta_0)
excite = RampWaveform(t_stop - t_rise, delta_0, delta_f)
sweep = Pulse.ConstantAmplitude(Omega_max, CompositeWaveform(hold, excite), 0.)
stay = Pulse.ConstantPulse(t_end - t_stop, 0., delta_f, 0.)
# +
seq = Sequence(reg, MockDevice)
seq.declare_channel('ising', 'rydberg_global')
seq.add(sweep, 'ising')
seq.add(stay, 'ising')
#print(seq)
seq.draw()
# -
# The pulse sequence we just created corresponds a path in the Phase space of the ground state, which we represent schematically with the following function:
def phase_diagram(seq):
ratio = []
for x in seq._schedule['ising']:
if isinstance(x.type,Pulse):
ratio += list(x.type.detuning.samples/Omega_max)
fig, ax = plt.subplots()
ax.grid(True, which='both')
ax.set_ylabel(r"Interaction Range $R_b/a$", fontsize=14)
ax.set_xlabel(r"Detuning $\delta/\Omega$", fontsize=14)
ax.set_xlim(-4,6)
ax.set_ylim(0,3.2)
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
y = np.arange(0.0, 5, 0.01)
x = 2*(0.6+8*(y-1.2)**2)
ax.fill_between(x, y, alpha=0.4)
y = np.arange(0.0, 5, 0.01)
x = 2*(0.8+50*(y-2.45)**2)
ax.fill_between(x, y, alpha=0.4)
y = np.arange(0.0, 5, 0.01)
x = 2*(1.+170*(y-3.06)**2)
ax.fill_between(x, y, alpha=0.4)
ax.plot(np.array(ratio), np.full(len(ratio), R_blockade/a),'red',lw=2)
plt.show()
phase_diagram(seq)
# ### 2.1 Simulation
# We run our simulation, for a list of observables corresponding to $|r\rangle \langle r|_j$ for each atom in the register:
# +
simul = Simulation(seq, sampling_rate=.1)
occup_list = [occupation(reg, j) for j in range(len(reg.qubits))]
res = simul.run(progress_bar=True)
occupations = res.expect(occup_list)
# -
# The following function plots the evolution of the expectation values with respect to time:
def plot_evolution(results):
plt.figure(figsize=(10,5))
plt.xlabel('Time (µs)', fontsize=14)
plt.ylabel('Rydberg Occupation Probabilty', fontsize=14)
for expv in results:
plt.plot(np.arange(len(expv))/1000,expv)
plt.show()
plot_evolution(occupations)
# We finally plot the probability of occupation of the Rydberg level with respect to the values of detuning, for each atom in the array:
def heat_detuning(data,start,end):
N = len(reg.qubits)
time_window = []
x =[]
detunings = simul.samples['Global']['ground-rydberg']['det'][[int(1000*t) for t in simul._times]]
for t,d in enumerate(detunings):
if start <= d <= end:
time_window.append(t)
x.append(d/(2*np.pi))
y = np.arange(1,N+1)
X, Y = np.meshgrid(x, y)
Z = np.array(data)[:,time_window]
plt.figure(figsize=(14,3))
plt.pcolormesh(X,Y,Z, cmap='hot', shading='auto')
plt.xlabel('Detuning/2π (MHz)', fontsize=14)
plt.ylabel('Atom in array', fontsize=14)
plt.yticks(range(1,N+1), [f'q{i}' for i in range(N)], va='center')
plt.colorbar(fraction=0.047, pad=0.015)
plt.show()
heat_detuning(occupations, delta_0, delta_f)
# ### 2.2 Rydberg Crystals: $Z_3$ Order
# To arrive at a different phase, we reduce the interatomic distance $a$, thus increasing the interaction range between the atoms. This will lead to a $Z_3$ ordered phase:
# +
a = 3.5
reg = Register.rectangle(1, 10, spacing=a, prefix='q')
delta_0 = -4 * 2*np.pi
delta_f = 10 * 2*np.pi
Omega_max = 2.0 * 2*np.pi # btw 1.8-2 * 2pi MHz
t_rise = 600
t_stop = 2500
t_end = 3000
R_blockade = MockDevice.rydberg_blockade_radius(Omega_max)
reg.draw(blockade_radius=R_blockade, draw_half_radius=True)
#
hold = ConstantWaveform(t_rise, delta_0)
excite = RampWaveform(t_stop - t_rise, delta_0, delta_f)
sweep = Pulse.ConstantAmplitude(Omega_max, CompositeWaveform(hold, excite), 0.)
stay = Pulse.ConstantPulse(t_end - t_stop, 0., delta_f, 0.)
#
seq = Sequence(reg, MockDevice)
seq.declare_channel('ising', 'rydberg_global')
seq.add(sweep, 'ising')
seq.add(stay, 'ising')
#print(seq)
#seq.draw()
phase_diagram(seq)
simul = Simulation(seq, sampling_rate=0.1)
occup_list = [occupation(reg, j) for j in range(simul._size)]
res = simul.run(progress_bar=True, method='bdf')
occupations = res.expect(occup_list)
plot_evolution(occupations)
heat_detuning(occupations, delta_0, delta_f)
# -
# ### 2.3 Rydberg Crystals: $Z_4$ Order
# Decreasing even more the interatomic distance leads to a $Z_4$ order. The magnitude of the Rydberg interaction with respect to that of the applied pulses means our solver has to control terms with a wider range, thus leading to longer simulation times:
# +
a = 2.8
reg = Register.rectangle(1,9, spacing=a, prefix='q')
# Parameters in rad/µs and ns
delta_0 = -4 * 2*np.pi
delta_f = 10 * 2*np.pi
Omega_max = 2.0 * 2*np.pi # btw 1.8-2 2pi*MHz
t_rise = 600
t_stop = 2500
t_end = 3000
R_blockade = MockDevice.rydberg_blockade_radius(Omega_max)
reg.draw(blockade_radius=R_blockade, draw_half_radius=True)
#
hold = ConstantWaveform(t_rise, delta_0)
excite = RampWaveform(t_stop - t_rise, delta_0, delta_f)
sweep = Pulse.ConstantAmplitude(Omega_max, CompositeWaveform(hold, excite), 0.)
stay = Pulse.ConstantPulse(t_end - t_stop, 0., delta_f, 0.)
#
seq = Sequence(reg, MockDevice)
seq.declare_channel('ising', 'rydberg_global')
seq.add(sweep, 'ising')
seq.add(stay, 'ising')
phase_diagram(seq)
simul = Simulation(seq, sampling_rate=0.4)
occup_list = [occupation(reg, j) for j in range(simul._size)]
#
res = simul.run(progress_bar=True, method='bdf')
occupations = res.expect(occup_list)
plot_evolution(occupations)
heat_detuning(occupations,delta_0,delta_f)
|
tutorials/applications/Building 1D Rydberg Crystals.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EDA about companies creation
# +
# %run "../config/notebook.ipynb"
# %run "../config/files.ipynb"
# load preparation from cache
FROM_CACHE=True
# import communes of the Haute-Garonne: will be imported while importing sirenes
# import the Insee staffing ranges
# %run "../prepare/prepare_insee_staffing_ranges.ipynb"
# import Haute-Garonne sirenes
# %run "../prepare/prepare_companies_sirene_registry.ipynb"
# %run "../prepare/prepare_map_hautegaronne_townships.ipynb"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import geopandas as gpd
from matplotlib.patches import Patch
# %matplotlib inline
# -
# #### Companies by code_insee
# +
companies_by_code_insee = df_sirenes_hautegaronne.groupby('code_insee')['code_insee'].count()
def classify_creation(value):
target = value // 10000
return target
companies_creationclass_by_code_insee = companies_by_code_insee.apply(classify_creation)
# -
gdf_hautegaronne_townships_companies = map_hautegaronne_postalcodes.merge(companies_creationclass_by_postalcode, left_on='ID', right_on='codepostal')
# +
ax = map_hautegaronne_companies.plot(figsize=(16,16),column='creation', cmap='RdPu')
# Turns off-grid on the left Axis.
ax.grid(False)
ax.axis('off')
legends = [Patch(facecolor='#49006a', label="plus de 70 000 créations d'entreprises"),
Patch(facecolor='#f988ad', label="entre 30 000 et 39 999 créations d''entreprises"),
Patch(facecolor='#fbbabd', label="entre 20 000 et 29 999 créations d''entreprises"),
Patch(facecolor='#fdddd9', label="entre 10 000 et 19 999 créations d''entreprises"),
Patch(facecolor='#fff7f3', label="moins de 10 000 créations d'entreprises")]
ax.legend(handles=legends, loc='upper right')
# -
map_hautegaronne_companies.sort_values(by='creation', ascending=False)
map_hautegaronne_companies.sort_values(by='creation', ascending=False)['creation'].unique()
# ## 2019 analysis
# +
companies_by_postalcode = df_companies[df_companies['year'] == '2019'].groupby('codepostal')['creation'].count()
max_creation = companies_by_postalcode.max()
step = max_creation // 4
step = 1000
def classify_creation(value):
target = value // step
return target
companies_creationclass_by_postalcode = companies_by_postalcode.apply(classify_creation)
# -
map_hautegaronne_companies = map_hautegaronne_postalcodes.merge(companies_creationclass_by_postalcode, left_on='ID', right_on='codepostal')
# +
ax = map_hautegaronne_companies.plot(figsize=(16,16),column='creation', cmap='RdPu')
# Turns off-grid on the left Axis.
ax.grid(False)
ax.axis('off')
legends = [Patch(facecolor='#49006a', label="plus de {} créations d'entreprises".format(step*3)),
Patch(facecolor='#f988ad', label="entre {} et {} créations d''entreprises".format(step*2,step*3 -1)),
Patch(facecolor='#fbbabd', label="entre {} et {} créations d''entreprises".format(step,step*2 -1)),
Patch(facecolor='#fff7f3', label="moins de {} créations d'entreprises".format(step))]
ax.legend(handles=legends, loc='upper right')
# -
|
notebooks/eda/eda_companies_creation_in_france.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="b4812c1b"
# ! git clone https://github.com/jonasitzmann/pix2latent.git
# %cd pix2latent
# !pip install -r requirements.txt
# ! pip install .
% cd examples
# + id="V-WnT_R7SiHr" outputId="7904da0a-ebda-411b-c82c-365e98319d7b" colab={"base_uri": "https://localhost:8080/"}
import nltk
nltk.download('wordnet')
import os, os.path as osp
import numpy as np
import torch
import torch.nn as nn
from pix2latent.model import BigGAN
from pix2latent.utils import image, video
from pix2latent import VariableManager, save_variables
from pix2latent.transform import TransformBasinCMAOptimizer, SpatialTransform
import pix2latent.loss_functions as LF
import pix2latent.utils.function_hooks as hook
import pix2latent.distribution as dist
# + id="w0si66KiFa77"
fp = './images/dog-example-153.jpg'
mask_fp = './images/dog-example-153-mask.jpg'
class_lbl = 153
method = 'hybrid'
ng_method = 'CMA'
lr = 0.05
latent_noise = 0.05
truncate = 2.0
make_video = True
max_minibatch = 1
num_samples = 1
### ---- initialize necessary --- ###
# (1) pretrained generative model
model = BigGAN().cuda().eval()
# (2) variable creator
var_manager = VariableManager()
# (3) default l1 + lpips loss function
loss_fn = LF.ProjectionLoss()
target = image.read(fp, as_transformed_tensor=True, im_size=256)
weight = image.read(mask_fp, as_transformed_tensor=True, im_size=256)
weight = ((weight + 1.) / 2.).clamp_(0.3, 1.0)
fn = fp.split('/')[-1].split('.')[0]
save_dir = f'./results/biggan_256/{method}_{fn}_w_transform'
var_manager = VariableManager()
# (4) define input output variable structure. the variable name must match
# the argument name of the model and loss function call
var_manager.register(
variable_name='z',
shape=(128,),
distribution=dist.TruncatedNormalModulo(sigma=1.0, trunc=truncate),
var_type='input',
learning_rate=lr,
hook_fn=hook.Clamp(truncate),
)
var_manager.register(
variable_name='c',
shape=(128,),
default=model.get_class_embedding(class_lbl)[0],
var_type='input',
learning_rate=0.01,
)
var_manager.register(
variable_name='target',
shape=(3, 256, 256),
requires_grad=False,
default=target,
var_type='output'
)
var_manager.register(
variable_name='weight',
shape=(3, 256, 256),
requires_grad=False,
default=weight,
var_type='output'
)
### ---- optimize (transformation) ---- ####
target_transform_fn = SpatialTransform(pre_align=weight)
weight_transform_fn = SpatialTransform(pre_align=weight)
tranform_params = target_transform_fn.get_default_param(as_tensor=True)
var_manager.register(
variable_name='t',
shape=tuple(tranform_params.size()),
requires_grad=False,
var_type='transform',
grad_free=True,
)
t_opt = TransformBasinCMAOptimizer(
model, var_manager, loss_fn, max_batch_size=1, log=make_video)
# this tells the optimizer to apply transformation `target_transform_fn`
# with parameter `t` on the variable `target`
t_opt.register_transform(target_transform_fn, 't', 'target')
t_opt.register_transform(weight_transform_fn, 't', 'weight')
# (highly recommended) speeds up optimization by propating information
t_opt.set_variable_propagation('z')
t_vars, (t_out, t_target, t_candidate), t_loss = \
t_opt.optimize(meta_steps=50, grad_steps=10)
os.makedirs(save_dir, exist_ok=True)
if make_video:
video.make_video(osp.join(save_dir, 'transform_out.mp4'), t_out)
video.make_video(osp.join(save_dir, 'transform_target.mp4'), t_target)
image.save(osp.join(save_dir, 'transform_out.jpg'), t_out[-1])
image.save(osp.join(save_dir, 'transform_target.jpg'), t_target[-1])
image.save(osp.join(save_dir, 'transform_candidate.jpg'), t_candidate)
np.save(osp.join(save_dir, 'transform_tracked.npy'),
{'t': t_opt.transform_tracked})
t = t_opt.get_candidate()
var_manager.edit_variable('t', {'default': t, 'grad_free': False})
var_manager.edit_variable('z', {'learning_rate': lr})
del t_opt, t_vars, t_out, t_target, t_candidate, t_loss
model.zero_grad()
torch.cuda.empty_cache()
### ---- optimize (latent) ---- ###
if method == 'adam':
var_manager.edit_variable('z', {'grad_free': False})
opt = GradientOptimizer(
model, var_manager, loss_fn,
max_batch_size=args.max_minibatch,
log=args.make_video
)
opt.register_transform(target_transform_fn, 't', 'target')
opt.register_transform(weight_transform_fn, 't', 'weight')
vars, out, loss = opt.optimize(num_samples=args.num_samples, grad_steps=500)
# + id="FI2Mukx_Fb_o"
NUM_CLASSES = 1000
IMAGENET = {1440764: 0, 1443537: 1, 1484850: 2, 1491361: 3, 1494475: 4, 1496331: 5, 1498041: 6, 1514668: 7, 1514859: 8, 1518878: 9, 1530575: 10, 1531178: 11, 1532829: 12, 1534433: 13, 1537544: 14, 1558993: 15, 1560419: 16, 1580077: 17, 1582220: 18, 1592084: 19, 1601694: 20, 1608432: 21, 1614925: 22, 1616318: 23, 1622779: 24, 1629819: 25, 1630670: 26, 1631663: 27, 1632458: 28, 1632777: 29, 1641577: 30, 1644373: 31, 1644900: 32, 1664065: 33, 1665541: 34, 1667114: 35, 1667778: 36, 1669191: 37, 1675722: 38, 1677366: 39, 1682714: 40, 1685808: 41, 1687978: 42, 1688243: 43, 1689811: 44, 1692333: 45, 1693334: 46, 1694178: 47, 1695060: 48, 1697457: 49, 1698640: 50, 1704323: 51, 1728572: 52, 1728920: 53, 1729322: 54, 1729977: 55, 1734418: 56, 1735189: 57, 1737021: 58, 1739381: 59, 1740131: 60, 1742172: 61, 1744401: 62, 1748264: 63, 1749939: 64, 1751748: 65, 1753488: 66, 1755581: 67, 1756291: 68, 1768244: 69, 1770081: 70, 1770393: 71, 1773157: 72, 1773549: 73, 1773797: 74, 1774384: 75, 1774750: 76, 1775062: 77, 1776313: 78, 1784675: 79, 1795545: 80, 1796340: 81, 1797886: 82, 1798484: 83, 1806143: 84, 1806567: 85, 1807496: 86, 1817953: 87, 1818515: 88, 1819313: 89, 1820546: 90, 1824575: 91, 1828970: 92, 1829413: 93, 1833805: 94, 1843065: 95, 1843383: 96, 1847000: 97, 1855032: 98, 1855672: 99, 1860187: 100, 1871265: 101, 1872401: 102, 1873310: 103, 1877812: 104, 1882714: 105, 1883070: 106, 1910747: 107, 1914609: 108, 1917289: 109, 1924916: 110, 1930112: 111, 1943899: 112, 1944390: 113, 1945685: 114, 1950731: 115, 1955084: 116, 1968897: 117, 1978287: 118, 1978455: 119, 1980166: 120, 1981276: 121, 1983481: 122, 1984695: 123, 1985128: 124, 1986214: 125, 1990800: 126, 2002556: 127, 2002724: 128, 2006656: 129, 2007558: 130, 2009229: 131, 2009912: 132, 2011460: 133, 2012849: 134, 2013706: 135, 2017213: 136, 2018207: 137, 2018795: 138, 2025239: 139, 2027492: 140, 2028035: 141, 2033041: 142, 2037110: 143, 2051845: 144, 2056570: 145, 2058221: 146, 2066245: 147, 2071294: 148, 2074367: 149, 2077923: 150, 2085620: 151, 2085782: 152, 2085936: 153, 2086079: 154, 2086240: 155, 2086646: 156, 2086910: 157, 2087046: 158, 2087394: 159, 2088094: 160, 2088238: 161, 2088364: 162, 2088466: 163, 2088632: 164, 2089078: 165, 2089867: 166, 2089973: 167, 2090379: 168, 2090622: 169, 2090721: 170, 2091032: 171, 2091134: 172, 2091244: 173, 2091467: 174, 2091635: 175, 2091831: 176, 2092002: 177, 2092339: 178, 2093256: 179, 2093428: 180, 2093647: 181, 2093754: 182, 2093859: 183, 2093991: 184, 2094114: 185, 2094258: 186, 2094433: 187, 2095314: 188, 2095570: 189, 2095889: 190, 2096051: 191, 2096177: 192, 2096294: 193, 2096437: 194, 2096585: 195, 2097047: 196, 2097130: 197, 2097209: 198, 2097298: 199, 2097474: 200, 2097658: 201, 2098105: 202, 2098286: 203, 2098413: 204, 2099267: 205, 2099429: 206, 2099601: 207, 2099712: 208, 2099849: 209, 2100236: 210, 2100583: 211, 2100735: 212, 2100877: 213, 2101006: 214, 2101388: 215, 2101556: 216, 2102040: 217, 2102177: 218, 2102318: 219, 2102480: 220, 2102973: 221, 2104029: 222, 2104365: 223, 2105056: 224, 2105162: 225, 2105251: 226, 2105412: 227, 2105505: 228, 2105641: 229, 2105855: 230, 2106030: 231, 2106166: 232, 2106382: 233, 2106550: 234, 2106662: 235, 2107142: 236, 2107312: 237, 2107574: 238, 2107683: 239, 2107908: 240, 2108000: 241, 2108089: 242, 2108422: 243, 2108551: 244, 2108915: 245, 2109047: 246, 2109525: 247, 2109961: 248, 2110063: 249, 2110185: 250, 2110341: 251, 2110627: 252, 2110806: 253, 2110958: 254, 2111129: 255, 2111277: 256, 2111500: 257, 2111889: 258, 2112018: 259, 2112137: 260, 2112350: 261, 2112706: 262, 2113023: 263, 2113186: 264, 2113624: 265, 2113712: 266, 2113799: 267, 2113978: 268, 2114367: 269, 2114548: 270, 2114712: 271, 2114855: 272, 2115641: 273, 2115913: 274, 2116738: 275, 2117135: 276, 2119022: 277, 2119789: 278, 2120079: 279, 2120505: 280, 2123045: 281, 2123159: 282, 2123394: 283, 2123597: 284, 2124075: 285, 2125311: 286, 2127052: 287, 2128385: 288, 2128757: 289, 2128925: 290, 2129165: 291, 2129604: 292, 2130308: 293, 2132136: 294, 2133161: 295, 2134084: 296, 2134418: 297, 2137549: 298, 2138441: 299, 2165105: 300, 2165456: 301, 2167151: 302, 2168699: 303, 2169497: 304, 2172182: 305, 2174001: 306, 2177972: 307, 2190166: 308, 2206856: 309, 2219486: 310, 2226429: 311, 2229544: 312, 2231487: 313, 2233338: 314, 2236044: 315, 2256656: 316, 2259212: 317, 2264363: 318, 2268443: 319, 2268853: 320, 2276258: 321, 2277742: 322, 2279972: 323, 2280649: 324, 2281406: 325, 2281787: 326, 2317335: 327, 2319095: 328, 2321529: 329, 2325366: 330, 2326432: 331, 2328150: 332, 2342885: 333, 2346627: 334, 2356798: 335, 2361337: 336, 2363005: 337, 2364673: 338, 2389026: 339, 2391049: 340, 2395406: 341, 2396427: 342, 2397096: 343, 2398521: 344, 2403003: 345, 2408429: 346, 2410509: 347, 2412080: 348, 2415577: 349, 2417914: 350, 2422106: 351, 2422699: 352, 2423022: 353, 2437312: 354, 2437616: 355, 2441942: 356, 2442845: 357, 2443114: 358, 2443484: 359, 2444819: 360, 2445715: 361, 2447366: 362, 2454379: 363, 2457408: 364, 2480495: 365, 2480855: 366, 2481823: 367, 2483362: 368, 2483708: 369, 2484975: 370, 2486261: 371, 2486410: 372, 2487347: 373, 2488291: 374, 2488702: 375, 2489166: 376, 2490219: 377, 2492035: 378, 2492660: 379, 2493509: 380, 2493793: 381, 2494079: 382, 2497673: 383, 2500267: 384, 2504013: 385, 2504458: 386, 2509815: 387, 2510455: 388, 2514041: 389, 2526121: 390, 2536864: 391, 2606052: 392, 2607072: 393, 2640242: 394, 2641379: 395, 2643566: 396, 2655020: 397, 2666196: 398, 2667093: 399, 2669723: 400, 2672831: 401, 2676566: 402, 2687172: 403, 2690373: 404, 2692877: 405, 2699494: 406, 2701002: 407, 2704792: 408, 2708093: 409, 2727426: 410, 2730930: 411, 2747177: 412, 2749479: 413, 2769748: 414, 2776631: 415, 2777292: 416, 2782093: 417, 2783161: 418, 2786058: 419, 2787622: 420, 2788148: 421, 2790996: 422, 2791124: 423, 2791270: 424, 2793495: 425, 2794156: 426, 2795169: 427, 2797295: 428, 2799071: 429, 2802426: 430, 2804414: 431, 2804610: 432, 2807133: 433, 2808304: 434, 2808440: 435, 2814533: 436, 2814860: 437, 2815834: 438, 2817516: 439, 2823428: 440, 2823750: 441, 2825657: 442, 2834397: 443, 2835271: 444, 2837789: 445, 2840245: 446, 2841315: 447, 2843684: 448, 2859443: 449, 2860847: 450, 2865351: 451, 2869837: 452, 2870880: 453, 2871525: 454, 2877765: 455, 2879718: 456, 2883205: 457, 2892201: 458, 2892767: 459, 2894605: 460, 2895154: 461, 2906734: 462, 2909870: 463, 2910353: 464, 2916936: 465, 2917067: 466, 2927161: 467, 2930766: 468, 2939185: 469, 2948072: 470, 2950826: 471, 2951358: 472, 2951585: 473, 2963159: 474, 2965783: 475, 2966193: 476, 2966687: 477, 2971356: 478, 2974003: 479, 2977058: 480, 2978881: 481, 2979186: 482, 2980441: 483, 2981792: 484, 2988304: 485, 2992211: 486, 2992529: 487, 2999410: 488, 3000134: 489, 3000247: 490, 3000684: 491, 3014705: 492, 3016953: 493, 3017168: 494, 3018349: 495, 3026506: 496, 3028079: 497, 3032252: 498, 3041632: 499, 3042490: 500, 3045698: 501, 3047690: 502, 3062245: 503, 3063599: 504, 3063689: 505, 3065424: 506, 3075370: 507, 3085013: 508, 3089624: 509, 3095699: 510, 3100240: 511, 3109150: 512, 3110669: 513, 3124043: 514, 3124170: 515, 3125729: 516, 3126707: 517, 3127747: 518, 3127925: 519, 3131574: 520, 3133878: 521, 3134739: 522, 3141823: 523, 3146219: 524, 3160309: 525, 3179701: 526, 3180011: 527, 3187595: 528, 3188531: 529, 3196217: 530, 3197337: 531, 3201208: 532, 3207743: 533, 3207941: 534, 3208938: 535, 3216828: 536, 3218198: 537, 3220513: 538, 3223299: 539, 3240683: 540, 3249569: 541, 3250847: 542, 3255030: 543, 3259280: 544, 3271574: 545, 3272010: 546, 3272562: 547, 3290653: 548, 3291819: 549, 3297495: 550, 3314780: 551, 3325584: 552, 3337140: 553, 3344393: 554, 3345487: 555, 3347037: 556, 3355925: 557, 3372029: 558, 3376595: 559, 3379051: 560, 3384352: 561, 3388043: 562, 3388183: 563, 3388549: 564, 3393912: 565, 3394916: 566, 3400231: 567, 3404251: 568, 3417042: 569, 3424325: 570, 3425413: 571, 3443371: 572, 3444034: 573, 3445777: 574, 3445924: 575, 3447447: 576, 3447721: 577, 3450230: 578, 3452741: 579, 3457902: 580, 3459775: 581, 3461385: 582, 3467068: 583, 3476684: 584, 3476991: 585, 3478589: 586, 3481172: 587, 3482405: 588, 3483316: 589, 3485407: 590, 3485794: 591, 3492542: 592, 3494278: 593, 3495258: 594, 3496892: 595, 3498962: 596, 3527444: 597, 3529860: 598, 3530642: 599, 3532672: 600, 3534580: 601, 3535780: 602, 3538406: 603, 3544143: 604, 3584254: 605, 3584829: 606, 3590841: 607, 3594734: 608, 3594945: 609, 3595614: 610, 3598930: 611, 3599486: 612, 3602883: 613, 3617480: 614, 3623198: 615, 3627232: 616, 3630383: 617, 3633091: 618, 3637318: 619, 3642806: 620, 3649909: 621, 3657121: 622, 3658185: 623, 3661043: 624, 3662601: 625, 3666591: 626, 3670208: 627, 3673027: 628, 3676483: 629, 3680355: 630, 3690938: 631, 3691459: 632, 3692522: 633, 3697007: 634, 3706229: 635, 3709823: 636, 3710193: 637, 3710637: 638, 3710721: 639, 3717622: 640, 3720891: 641, 3721384: 642, 3724870: 643, 3729826: 644, 3733131: 645, 3733281: 646, 3733805: 647, 3742115: 648, 3743016: 649, 3759954: 650, 3761084: 651, 3763968: 652, 3764736: 653, 3769881: 654, 3770439: 655, 3770679: 656, 3773504: 657, 3775071: 658, 3775546: 659, 3776460: 660, 3777568: 661, 3777754: 662, 3781244: 663, 3782006: 664, 3785016: 665, 3786901: 666, 3787032: 667, 3788195: 668, 3788365: 669, 3791053: 670, 3792782: 671, 3792972: 672, 3793489: 673, 3794056: 674, 3796401: 675, 3803284: 676, 3804744: 677, 3814639: 678, 3814906: 679, 3825788: 680, 3832673: 681, 3837869: 682, 3838899: 683, 3840681: 684, 3841143: 685, 3843555: 686, 3854065: 687, 3857828: 688, 3866082: 689, 3868242: 690, 3868863: 691, 3871628: 692, 3873416: 693, 3874293: 694, 3874599: 695, 3876231: 696, 3877472: 697, 3877845: 698, 3884397: 699, 3887697: 700, 3888257: 701, 3888605: 702, 3891251: 703, 3891332: 704, 3895866: 705, 3899768: 706, 3902125: 707, 3903868: 708, 3908618: 709, 3908714: 710, 3916031: 711, 3920288: 712, 3924679: 713, 3929660: 714, 3929855: 715, 3930313: 716, 3930630: 717, 3933933: 718, 3935335: 719, 3937543: 720, 3938244: 721, 3942813: 722, 3944341: 723, 3947888: 724, 3950228: 725, 3954731: 726, 3956157: 727, 3958227: 728, 3961711: 729, 3967562: 730, 3970156: 731, 3976467: 732, 3976657: 733, 3977966: 734, 3980874: 735, 3982430: 736, 3983396: 737, 3991062: 738, 3992509: 739, 3995372: 740, 3998194: 741, 4004767: 742, 4005630: 743, 4008634: 744, 4009552: 745, 4019541: 746, 4023962: 747, 4026417: 748, 4033901: 749, 4033995: 750, 4037443: 751, 4039381: 752, 4040759: 753, 4041544: 754, 4044716: 755, 4049303: 756, 4065272: 757, 4067472: 758, 4069434: 759, 4070727: 760, 4074963: 761, 4081281: 762, 4086273: 763, 4090263: 764, 4099969: 765, 4111531: 766, 4116512: 767, 4118538: 768, 4118776: 769, 4120489: 770, 4125021: 771, 4127249: 772, 4131690: 773, 4133789: 774, 4136333: 775, 4141076: 776, 4141327: 777, 4141975: 778, 4146614: 779, 4147183: 780, 4149813: 781, 4152593: 782, 4153751: 783, 4154565: 784, 4162706: 785, 4179913: 786, 4192698: 787, 4200800: 788, 4201297: 789, 4204238: 790, 4204347: 791, 4208210: 792, 4209133: 793, 4209239: 794, 4228054: 795, 4229816: 796, 4235860: 797, 4238763: 798, 4239074: 799, 4243546: 800, 4251144: 801, 4252077: 802, 4252225: 803, 4254120: 804, 4254680: 805, 4254777: 806, 4258138: 807, 4259630: 808, 4263257: 809, 4264628: 810, 4265275: 811, 4266014: 812, 4270147: 813, 4273569: 814, 4275548: 815, 4277352: 816, 4285008: 817, 4286575: 818, 4296562: 819, 4310018: 820, 4311004: 821, 4311174: 822, 4317175: 823, 4325704: 824, 4326547: 825, 4328186: 826, 4330267: 827, 4332243: 828, 4335435: 829, 4336792: 830, 4344873: 831, 4346328: 832, 4347754: 833, 4350905: 834, 4355338: 835, 4355933: 836, 4356056: 837, 4357314: 838, 4366367: 839, 4367480: 840, 4370456: 841, 4371430: 842, 4371774: 843, 4372370: 844, 4376876: 845, 4380533: 846, 4389033: 847, 4392985: 848, 4398044: 849, 4399382: 850, 4404412: 851, 4409515: 852, 4417672: 853, 4418357: 854, 4423845: 855, 4428191: 856, 4429376: 857, 4435653: 858, 4442312: 859, 4443257: 860, 4447861: 861, 4456115: 862, 4458633: 863, 4461696: 864, 4462240: 865, 4465501: 866, 4467665: 867, 4476259: 868, 4479046: 869, 4482393: 870, 4483307: 871, 4485082: 872, 4486054: 873, 4487081: 874, 4487394: 875, 4493381: 876, 4501370: 877, 4505470: 878, 4507155: 879, 4509417: 880, 4515003: 881, 4517823: 882, 4522168: 883, 4523525: 884, 4525038: 885, 4525305: 886, 4532106: 887, 4532670: 888, 4536866: 889, 4540053: 890, 4542943: 891, 4548280: 892, 4548362: 893, 4550184: 894, 4552348: 895, 4553703: 896, 4554684: 897, 4557648: 898, 4560804: 899, 4562935: 900, 4579145: 901, 4579432: 902, 4584207: 903, 4589890: 904, 4590129: 905, 4591157: 906, 4591713: 907, 4592741: 908, 4596742: 909, 4597913: 910, 4599235: 911, 4604644: 912, 4606251: 913, 4612504: 914, 4613696: 915, 6359193: 916, 6596364: 917, 6785654: 918, 6794110: 919, 6874185: 920, 7248320: 921, 7565083: 922, 7579787: 923, 7583066: 924, 7584110: 925, 7590611: 926, 7613480: 927, 7614500: 928, 7615774: 929, 7684084: 930, 7693725: 931, 7695742: 932, 7697313: 933, 7697537: 934, 7711569: 935, 7714571: 936, 7714990: 937, 7715103: 938, 7716358: 939, 7716906: 940, 7717410: 941, 7717556: 942, 7718472: 943, 7718747: 944, 7720875: 945, 7730033: 946, 7734744: 947, 7742313: 948, 7745940: 949, 7747607: 950, 7749582: 951, 7753113: 952, 7753275: 953, 7753592: 954, 7754684: 955, 7760859: 956, 7768694: 957, 7802026: 958, 7831146: 959, 7836838: 960, 7860988: 961, 7871810: 962, 7873807: 963, 7875152: 964, 7880968: 965, 7892512: 966, 7920052: 967, 7930864: 968, 7932039: 969, 9193705: 970, 9229709: 971, 9246464: 972, 9256479: 973, 9288635: 974, 9332890: 975, 9399592: 976, 9421951: 977, 9428293: 978, 9468604: 979, 9472597: 980, 9835506: 981, 10148035: 982, 10565667: 983, 11879895: 984, 11939491: 985, 12057211: 986, 12144580: 987, 12267677: 988, 12620546: 989, 12768682: 990, 12985857: 991, 12998815: 992, 13037406: 993, 13040303: 994, 13044778: 995, 13052670: 996, 13054560: 997, 13133613: 998, 15075141: 999}
# + id="Ip8hjHS2OzlW"
def one_hot_from_names(class_name_or_list):
batch_size = 1
""" Create a one-hot vector from the name of an imagenet class ('tennis ball', 'daisy', ...).
We use NLTK's wordnet search to try to find the relevant synset of ImageNet and take the first one.
If we can't find it direcly, we look at the hyponyms and hypernyms of the class name.
Params:
class_name_or_list: string containing the name of an imagenet object or a list of such strings (for a batch).
Output:
array of shape (batch_size, 1000)
"""
try:
from nltk.corpus import wordnet as wn
except ImportError:
raise ImportError("You need to install nltk to use this function")
if not isinstance(class_name_or_list, (list, tuple)):
class_name_or_list = [class_name_or_list]
else:
batch_size = max(batch_size, len(class_name_or_list))
classes = []
for class_name in class_name_or_list:
class_name = class_name.replace(" ", "_")
original_synsets = wn.synsets(class_name)
original_synsets = list(filter(lambda s: s.pos() == 'n', original_synsets)) # keep only names
if not original_synsets:
return None
possible_synsets = list(filter(lambda s: s.offset() in IMAGENET, original_synsets))
if possible_synsets:
classes.append(IMAGENET[possible_synsets[0].offset()])
else:
# try hypernyms and hyponyms
possible_synsets = sum([s.hypernyms() + s.hyponyms() for s in original_synsets], [])
possible_synsets = list(filter(lambda s: s.offset() in IMAGENET, possible_synsets))
if possible_synsets:
classes.append(IMAGENET[possible_synsets[0].offset()])
return model.get_class_embedding(classes[0])
# + id="v88lM5_sSOWv"
new_c = one_hot_from_names(['cat'])[0].shape
z = var_manager.variable_info['z']['default']
z
# + id="6e0LjuVxSR7c" outputId="0caf18d2-647b-4006-f890-ac80c3a6347f" colab={"base_uri": "https://localhost:8080/", "height": 298}
img = model.forward(v, new_c)
# + id="HMerdhQ6Tqhx"
|
notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [source](../../api/alibi_detect.od.vaegmm.rst)
# # Variational Auto-Encoding Gaussian Mixture Model
# ## Overview
#
#
# The Variational Auto-Encoding Gaussian Mixture Model (VAEGMM) Outlier Detector follows the [Deep Autoencoding Gaussian Mixture Model for Unsupervised Anomaly Detection](https://openreview.net/forum?id=BJJLHbb0-) paper but with a [VAE](https://arxiv.org/abs/1312.6114) instead of a regular Auto-Encoder. The encoder compresses the data while the reconstructed instances generated by the decoder are used to create additional features based on the reconstruction error between the input and the reconstructions. These features are combined with encodings and fed into a Gaussian Mixture Model ([GMM](https://en.wikipedia.org/wiki/Mixture_model#Gaussian_mixture_model)). The VAEGMM outlier detector is first trained on a batch of unlabeled, but normal (*inlier*) data. Unsupervised or semi-supervised training is desirable since labeled data is often scarce. The sample energy of the GMM can then be used to determine whether an instance is an outlier (high sample energy) or not (low sample energy). The algorithm is suitable for tabular and image data.
# ## Usage
#
# ### Initialize
#
# Parameters:
#
# * `threshold`: threshold value for the sample energy above which the instance is flagged as an outlier.
#
# * `latent_dim`: latent dimension of the VAE.
#
# * `n_gmm`: number of components in the GMM.
#
# * `encoder_net`: `tf.keras.Sequential` instance containing the encoder network. Example:
#
# ```python
# encoder_net = tf.keras.Sequential(
# [
# InputLayer(input_shape=(n_features,)),
# Dense(60, activation=tf.nn.tanh),
# Dense(30, activation=tf.nn.tanh),
# Dense(10, activation=tf.nn.tanh),
# Dense(latent_dim, activation=None)
# ])
# ```
#
# * `decoder_net`: `tf.keras.Sequential` instance containing the decoder network. Example:
#
# ```python
# decoder_net = tf.keras.Sequential(
# [
# InputLayer(input_shape=(latent_dim,)),
# Dense(10, activation=tf.nn.tanh),
# Dense(30, activation=tf.nn.tanh),
# Dense(60, activation=tf.nn.tanh),
# Dense(n_features, activation=None)
# ])
# ```
#
# * `gmm_density_net`: layers for the GMM network wrapped in a `tf.keras.Sequential` class. Example:
#
# ```python
# gmm_density_net = tf.keras.Sequential(
# [
# InputLayer(input_shape=(latent_dim + 2,)),
# Dense(10, activation=tf.nn.tanh),
# Dense(n_gmm, activation=tf.nn.softmax)
# ])
# ```
#
# * `vaegmm`: instead of using a separate encoder, decoder and GMM density net, the VAEGMM can also be passed as a `tf.keras.Model`.
#
# * `samples`: number of samples drawn during detection for each instance to detect.
#
# * `beta`: weight on the KL-divergence loss term following the $\beta$-[VAE](https://openreview.net/forum?id=Sy2fzU9gl) framework. Default equals 1.
#
# * `recon_features`: function to extract features from the reconstructed instance by the decoder. Defaults to a combination of the mean squared reconstruction error and the cosine similarity between the original and reconstructed instances by the VAE.
#
# * `data_type`: can specify data type added to metadata. E.g. *'tabular'* or *'image'*.
#
# Initialized outlier detector example:
#
# ```python
# from alibi_detect.od import OutlierVAEGMM
#
# od = OutlierVAEGMM(
# threshold=7.5,
# encoder_net=encoder_net,
# decoder_net=decoder_net,
# gmm_density_net=gmm_density_net,
# latent_dim=4,
# n_gmm=2,
# samples=10
# )
# ```
# ### Fit
#
# We then need to train the outlier detector. The following parameters can be specified:
#
# * `X`: training batch as a numpy array of preferably normal data.
#
# * `loss_fn`: loss function used for training. Defaults to the custom VAEGMM loss which is a combination of the [elbo](https://en.wikipedia.org/wiki/Evidence_lower_bound) loss, sample energy of the GMM and a loss term penalizing small values on the diagonals of the covariance matrices in the GMM to avoid trivial solutions. It is important to balance the loss weights below so no single loss term dominates during the optimization.
#
# * `w_recon`: weight on elbo loss term. Defaults to 1e-7.
#
# * `w_energy`: weight on sample energy loss term. Defaults to 0.1.
#
# * `w_cov_diag`: weight on covariance diagonals. Defaults to 0.005.
#
# * `optimizer`: optimizer used for training. Defaults to [Adam](https://arxiv.org/abs/1412.6980) with learning rate 1e-4.
#
# * `cov_elbo`: dictionary with covariance matrix options in case the elbo loss function is used. Either use the full covariance matrix inferred from X (*dict(cov_full=None)*), only the variance (*dict(cov_diag=None)*) or a float representing the same standard deviation for each feature (e.g. *dict(sim=.05)*) which is the default.
#
# * `epochs`: number of training epochs.
#
# * `batch_size`: batch size used during training.
#
# * `verbose`: boolean whether to print training progress.
#
# * `log_metric`: additional metrics whose progress will be displayed if verbose equals True.
#
#
# ```python
# od.fit(
# X_train,
# epochs=10,
# batch_size=1024
# )
# ```
#
# It is often hard to find a good threshold value. If we have a batch of normal and outlier data and we know approximately the percentage of normal data in the batch, we can infer a suitable threshold:
#
# ```python
# od.infer_threshold(
# X,
# threshold_perc=95
# )
# ```
# ### Detect
#
# We detect outliers by simply calling `predict` on a batch of instances `X` to compute the instance level sample energies. We can also return the instance level outlier score by setting `return_instance_score` to True.
#
# The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys:
#
# * `is_outlier`: boolean whether instances are above the threshold and therefore outlier instances. The array is of shape *(batch size,)*.
#
# * `instance_score`: contains instance level scores if `return_instance_score` equals True.
#
#
# ```python
# preds = od.predict(
# X,
# return_instance_score=True
# )
# ```
# ## Examples
#
# ### Tabular
#
# [Outlier detection on KDD Cup 99](../../examples/od_aegmm_kddcup.ipynb)
|
doc/source/od/methods/vaegmm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# +
samples = ["the cat sat on the mat.", "the dog ate my homework."]
dimentionality = 1000
max_length = 10
results = np.zeros(shape=(len(samples), max_length, dimentionality))
for i, sample in enumerate(samples):
for j, word in list(enumerate(sample.split()))[:max_length]:
index = abs(hash(word)) % dimentionality
results[i, j, index] = 1.
# -
results
|
one_hot encoding with hashing trick in numpy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pyEX
from perspective import PerspectiveWidget
psp = PerspectiveWidget([], 'y_line', columns=['bidPrice', 'askPrice'])
psp
c = pyEX.Client()
c.topsSSE(symbols='AAPL', on_data=psp.update)
|
examples/sse.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# Suppose we have a list of N numbers and repeate the following operations until we are left with only a single numner:
# +
# Choose any two numbers and replace them with their sum.
# +
# Moreover, we associate a penalty with each operation equal to the value of the new number,
# and call the penaly for the entire list as the sum of the penalities of each operations
# For example given a list of [1,2,3,4,6], we should choose 2 and 3 for the
# -
#first operation, which would transform the list into [1,5,4,5] and incur a
# penalty of 5. The Goal in this problem is to find the highest possible penaly for a given input
# +
# Signature:
#int getTotalTime(int[] arr)
# +
#Input:
# An Array arr containing N integers denatinf the numbers in the list
# -
# Output:
# an int representing the highest possible total penalty
#Constrains:
# 1 <= N <= 10^6
# 1 <= Ai <= 10^7 where *Ai denotes the i-th initial element of an array.
# The sum of values of N over all the test cases will not exceed 5*10^6
# +
# Example
# arr [4,2,1,3]
# output = 26
# first add 4+3 for the penalty of 7 Not array is [7,2,1]
# add 7+2 for penaly of 9. Now the array is [9,1]
# Add 9+1 for pnalty of 10. The panlaties sum to 26
# +
# -------------- Start of program
debug = False
#lets define function to sort arry, and return the sum of the largest tow numners
penaltysum = 0
def func_sum_of_largest_two_numbers(arr):
if(debug):
print(arr)
arr.sort()
if(debug):
print(arr)
sumoflargetwonumbers = arr[len(arr)-1] + arr[len(arr1)-2]
global penaltysum
penaltysum = sumoflargetwonumbers + penaltysum
arr.pop()
arr.pop()
arr.append(sumoflargetwonumbers)
if(debug):
print(arr)
if len(arr)>1:
func_sum_of_largest_two_numbers(arr)
else:
print(f" max Penalty sum is: {penaltysum}")
# -
arr1 = [4,2,1,3]
func_sum_of_largest_two_numbers(arr1)
# +
# running the same function with the debug toggle to see the output of the recursive function
debug = True
# need to reset the global variable
penaltysum = 0
# -
arr1 = [4,2,1,3]
func_sum_of_largest_two_numbers(arr1)
|
PythonJupyterNotebooks/Week3-Day1-Challenge.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ele9996/Temperature-and-Humidity-forecasting/blob/main/main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="PgFFC76SNVvD"
# #Temperature and humidity forecasting
# + id="t0FttZjuTB08"
import tensorflow as tf
import numpy as np
from tensorflow import keras
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import os
from zipfile import ZipFile
# + id="fo34_B25dfCU"
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
# + id="Te56lo9wmebe"
#parser = argparse.ArgumentParser()
#parser.add_argument('--model', type=str, required=True, help='model name')
#parser.add_argument('--labels', type=int, required=True, help='model output')
#args = parser.parse_args()
#per prova
label_to_predict= 2
model_name= "my_model"
# + [markdown] id="JEvUcAkrRcf_"
# ##Import Dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 620} id="JzQev5UmNTTy" outputId="95b91480-0a6f-4b55-9168-5f93d9e240c3"
uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"
zip_path = keras.utils.get_file(origin=uri, fname="jena_climate_2009_2016.csv.zip")
zip_file = ZipFile(zip_path)
zip_file.extractall()
csv_path = "jena_climate_2009_2016.csv"
df = pd.read_csv(csv_path)
df
# + id="XUTkTmfkRlXw" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="c580a518-24ee-426f-c537-9c095da82dd6"
selected_feat=['T (degC)','rh (%)']
selected_df=df[selected_feat] #df.loc[:,selected_feat].values
selected_df
# + colab={"base_uri": "https://localhost:8080/"} id="Nr_5hYCUShRr" outputId="9bb31bf5-fb32-42d9-a6f9-9f1581204eb6"
#Data conversion to a 32-bit float numpy array
selected_array=selected_df.to_numpy(dtype=np.float32) #.astype(np.float32)
selected_array
# + id="kpST-ZOSj1Nw"
#I define training (70%),validation (10%) and test data set (20%)
n=len(selected_array)
train_data=selected_array[0:int(n*0.7)]
val_data=selected_array[int(n*0.7):int(n*0.8)]
test_data = selected_array[int(n*0.8):]
# + id="TFmfO9jglbgE"
#I define mean and standard deviation (for normalization)
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
#I define the size of the window and Label Options (which is a number)
#LABEL_OPTIONS= is a number used to make my model understand if I'm predicting only temperature, only humidity or humidity and temperature together.
# is 0 for temperature, 1 for humidity, 2 for both
input_width = 6
LABEL_OPTIONS =label_to_predict
#LABEL_OPTIONS = args.labels
# + [markdown] id="4GHPN7iXS00v"
# ##Window Generation
#
# + id="vteVFROJWOvA"
#features: temperature, humidity (x6 values)
#one temperature value (the one corresponding to the next time interval)
class WindowGenerator:
def __init__(self, input_width, label_options, mean, std):
self.input_width = input_width
self.label_options = label_options
self.mean = tf.reshape(tf.convert_to_tensor(mean), [1, 1, 2])
self.std = tf.reshape(tf.convert_to_tensor(std), [1, 1, 2])
def split_window(self, features):
input_indeces = np.arange(self.input_width)
inputs = features[:, :-1, :]
if self.label_options < 2:
labels = features[:, -1, self.label_options]
labels = tf.expand_dims(labels, -1)
num_labels = 1
else:
labels = features[:, -1, :]
num_labels = 2
inputs.set_shape([None, self.input_width, 2])
labels.set_shape([None, num_labels])
return inputs, labels
def normalize(self, features):
features = (features - self.mean) / (self.std + 1.e-6)
return features
def preprocess(self, features):
inputs, labels = self.split_window(features)
inputs = self.normalize(inputs)
return inputs, labels
def make_dataset(self, data, train):
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=input_width+1,
sequence_stride=1,
batch_size=32)
ds = ds.map(self.preprocess)
ds = ds.cache()
if train is True:
ds = ds.shuffle(100, reshuffle_each_iteration=True)
return ds
# + id="iqqVtJv4rfDk"
generator = WindowGenerator(input_width, LABEL_OPTIONS, mean, std)
train_ds = generator.make_dataset(train_data, True)
val_ds = generator.make_dataset(val_data, False)
test_ds = generator.make_dataset(test_data, False)
# + [markdown] id="vPuVpKDQTCF4"
# ##Dataset check
# + colab={"base_uri": "https://localhost:8080/"} id="LTTnYu0V20bt" outputId="153faf55-deda-4286-cd98-8ad1d6811bc5"
# split datasets into input (X) and output (y) variables
#train
for x_train,y_train in train_ds.take(1):
print(x_train.shape)
print(y_train.shape)
#validation
for x_val,y_val in val_ds.take(1):
print(x_val.shape)
print(y_val.shape)
#test
for x_test,y_test in test_ds.take(1):
print(x_test.shape)
print(y_test.shape)
# + [markdown] id="UICNDxMVtZfc"
# ##Class to measure the MAE separately on the two outputs
# + id="SQLbxTg0tsxN"
class MultiOutputMAE(tf.keras.metrics.Metric):
def __init__(self, name= 'mean_absolute_error', **kwargs):
super().__init__(name=name, **kwargs)
self.total= self.add_weight('total', initializer='zeros', shape=(2,))
self.count = self.add_weight('count', initializer= 'zeros')
def update_state(self, y_true, y_pred, sample_weight= None):
error= tf.abs(y_pred - y_true)
error= tf.reduce_mean(error, axis=0)
self.total.assign_add(error)
self.count.assign_add(1.)
return
def reset_states (self):
self.count.assign(tf.zeros_like(self.count))
self.total.assign(tf.zeros_like(self.total))
return
def result(self):
result = tf.math.divide_no_nan(self.total, self.count)
return result
# + [markdown] id="0TVo1c1KTGs9"
# ##MPL Model
# + id="JLyLije4vQbS"
#MLP Model definition
mlp_model = keras.Sequential([
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dense(units=2) #metti 2 se vuoi fare doppia predizione 1 se ne vuoi fare solo una
])
# + colab={"base_uri": "https://localhost:8080/"} id="0eCHf8kh83q9" outputId="65328652-fa1c-41db-ed83-9f34dae8529f"
#Training, evaluation and testing with mlp model
mlp_model.compile(loss='mse', optimizer='adam', metrics=['mse'])
mlp_model.fit(train_ds, epochs=20)
# + colab={"base_uri": "https://localhost:8080/"} id="EIE9p4f0_ksc" outputId="6a8517d3-dbd4-47ba-cbcd-95d1ceb080e8"
#model evaluation on validation set
mlp_model.compile(loss='mae', optimizer='adam')
val_loss= mlp_model.evaluate(val_ds)
# + colab={"base_uri": "https://localhost:8080/"} id="Gw-5aR4EKEdM" outputId="b7da1edc-c113-43d0-ca4a-6140984edc47"
#test the model
mlp_model.compile(loss='mse', optimizer='adam', metrics=[MultiOutputMAE()])
#mlp_model.compile(loss='mae', optimizer='adam')
test_loss= mlp_model.evaluate(test_ds)
# + colab={"base_uri": "https://localhost:8080/"} id="Iray07evP3w3" outputId="886663b6-de61-4290-cc58-9c950acb86f0"
#check
predictions = mlp_model.predict(x_test)
# summarize the first 5 cases
for i in range(5):
print('%s => %s (expected %s)' % (x_test[i].numpy().tolist(), predictions[i], y_test[i]))
# + id="vwmzZrebNmj5" colab={"base_uri": "https://localhost:8080/"} outputId="b4e58680-68fb-460c-c89d-211eb048ff3c"
#number of parameters VS mae
mlp_model.summary()
# + [markdown] id="hh4ThJ5IciM1"
# #CNN 1D
# + id="GMsWLPIS0tyN"
#CNN_1D Model definition
cnn_1d_model = keras.Sequential([
keras.layers.Conv1D(filters=64,kernel_size=3, activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dense(units=2)
])
# + id="1o0nawfugCAg" colab={"base_uri": "https://localhost:8080/"} outputId="6ec534ff-b46e-4fce-81c6-661dc4beb05f"
#Training, evaluation and testing with mlp model
cnn_1d_model.compile(loss='mse', optimizer='adam', metrics=['mse'])
cnn_1d_model.fit(train_ds, epochs=20)
# + id="WuEGq6spgB11" colab={"base_uri": "https://localhost:8080/"} outputId="d89f27ad-1a30-4ae7-e0af-6c82447cdd18"
#model evaluation on validation set
cnn_1d_model.compile(loss='mae', optimizer='adam')
val_loss= cnn_1d_model.evaluate(val_ds)
# + id="vPmEEOdwgS30" colab={"base_uri": "https://localhost:8080/"} outputId="c67cb657-3ac8-4b5b-f788-3dce875e1deb"
#test the model
#model evaluation on validation set
cnn_1d_model.compile(loss='mse', optimizer='adam', metrics=[MultiOutputMAE()])
#cnn_1d_model.compile(loss='mae', optimizer='adam')
test_loss= cnn_1d_model.evaluate(test_ds)
# + id="Uzygry5mg_q8" colab={"base_uri": "https://localhost:8080/"} outputId="01269454-14bc-416b-9788-6b6321644b2a"
#check
predictions = cnn_1d_model.predict(x_test)
# summarize the first 5 cases
for i in range(5):
print('%s => %s (expected %s)' % (x_test[i].numpy().tolist(), predictions[i], y_test[i]))
# + id="6XOpqTI4scZP" colab={"base_uri": "https://localhost:8080/"} outputId="04c04bc0-0077-4bf0-f801-be10b0d18979"
cnn_1d_model.summary()
# + [markdown] id="OKUnLyEYiLPh"
# #LSTM
# + id="ctVc9Z6o2e6O"
#LSTM Model definition
lstm_model = keras.Sequential([
keras.layers.LSTM(units=64),
keras.layers.Flatten(),
keras.layers.Dense(units=2)
])
# + id="7ShNPdmMiGF3" colab={"base_uri": "https://localhost:8080/"} outputId="c5d28620-0023-42c6-c6c1-9716d7f63c5c"
#Training, evaluation and testing with mlp model
lstm_model.compile(loss='mse', optimizer='adam', metrics=['mse'])
lstm_model.fit(train_ds, epochs=20)
# + id="rPWwiXd4iZTt" colab={"base_uri": "https://localhost:8080/"} outputId="44451c66-317c-40f5-902c-0d44c44a3bb6"
#model evaluation on validation set
lstm_model.compile(loss='mae', optimizer='adam')
val_loss= lstm_model.evaluate(val_ds)
# + id="NXv3oe7qiZH8" colab={"base_uri": "https://localhost:8080/"} outputId="15aecbe9-918c-458c-8c89-9d2bb8919415"
#test the model
lstm_model.compile(loss='mse', optimizer='adam', metrics=[MultiOutputMAE()])
#lstm_model.compile(loss='mae', optimizer='adam')
test_loss= lstm_model.evaluate(test_ds)
# + id="vT14RaTqiY9F" colab={"base_uri": "https://localhost:8080/"} outputId="1c11b7e2-07bf-4e4f-9dc8-0f2aeaf43889"
#check
predictions = lstm_model.predict(x_test)
# summarize the first 5 cases
for i in range(5):
print('%s => %s (expected %s)' % (x_test[i].numpy().tolist(), predictions[i], y_test[i]))
# + id="WHX00zSousEc" colab={"base_uri": "https://localhost:8080/"} outputId="7d7557f6-a446-4ba9-9646-2d0f681446a0"
cnn_1d_model.summary()
# + [markdown] id="7eYgO8vdrkZu"
# #Save models
# + id="C9EYHdMsn_Tt" outputId="476d77e3-7c83-45fe-d061-37cfe5a1f747" colab={"base_uri": "https://localhost:8080/"}
#mlp_model.save('./models/mpl_model.h5')
#cnn_1d_model.save('./models/cnn_1d_model.h5')
#lstm_model.save('./models/lstm_model.h5')
#mlp model save
run_model = tf.function(lambda x: mlp_model(x))
concrete_func = run_model.get_concrete_function(tf.TensorSpec([1, 6, 2],tf.float32))
mlp_model.save("./models/mlp_model", signatures=concrete_func)
#cnn_1d model save
run_model = tf.function(lambda x: cnn_1d_model(x))
concrete_func = run_model.get_concrete_function(tf.TensorSpec([1, 6, 2],tf.float32))
cnn_1d_model.save("./models/cnn_1d_model", signatures=concrete_func)
#lstm model save
run_model = tf.function(lambda x: lstm_model(x))
concrete_func = run_model.get_concrete_function(tf.TensorSpec([1, 6, 2],tf.float32))
lstm_model.save("./models/lstm_model", signatures=concrete_func)
# + [markdown] id="yatSVws9rikA"
# ##Plot #Params vs MAE
# + id="Eht3MWf8ZbFb" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="601b973f-b114-4e6e-a252-499ca35da2fc"
x=[18434, 17026, 17026]
y=[0.9371, 1.0920 , 0.3967]
plt.plot(x,y,'o--')
# + id="OhGDjao6M8S8" outputId="d17085e4-9e69-4101-e927-c329a7b67ae9" colab={"base_uri": "https://localhost:8080/", "height": 353}
# !zip -r model.zip {"./models"}
try:
from google.colab import files
files.download('./model.zip')
except ImportError:
pass
|
main.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Navigation using Double DQN
#
# ---
#
# In this notebook, i will implement Double DQN and see how good is this Technique.
#
# Note that the same parameters are applied to all the techniques I have used in this notebook as well as in other notebooks.
#
# #### different techniques which can be used to imporve DQN
# '''
# * - Double DQN
# 2 - Prioritized experience replay
# 3 - Dueling DQN
# 4 - multi-step bootstrap targets (A3C)
# 5 - Distributional DQN
# 6 - Noisy DQN
#
# *****
# Rainbow # combaination of these sex different techniques
# *****
# '''
# ### 1. Start the Environment
#
from unityagents import UnityEnvironment
import numpy as np
# Next, we will start the environment!
env = UnityEnvironment(file_name="Banana_Windows_x86_64/Banana_Windows_x86_64/Banana.exe")
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ### 2. Examine the State and Action Spaces
#
# The simulation contains a single agent that navigates a large environment. At each time step, it has four actions at its disposal:
# - `0` - walk forward
# - `1` - walk backward
# - `2` - turn left
# - `3` - turn right
#
# The state space has `37` dimensions and contains the agent's velocity, along with ray-based perception of objects around agent's forward direction. A reward of `+1` is provided for collecting a yellow banana, and a reward of `-1` is provided for collecting a blue banana.
#
# Run the code cell below to print some information about the environment.
# +
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
# -
# ### 3. Implementation
#
# Now we will implement the idea of Double DQN.
# +
# import requiered packages
import sys
import os
import random
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from collections import deque,namedtuple
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Determine if I want to train the agent on GPU (if available) or CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Our Enviroment
env_info = env.reset(train_mode=False)[brain_name]
action_size = brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
# +
# The model consist of 4 fully connected layer and output layer
class DQN(nn.Module):
def __init__(self,state_size,action_size,seed):
super(DQN,self).__init__()
self.state_size = state_size # the size of env states which = 37
self.action_size = action_size # the size of available actions in the env which = 4
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(self.state_size,32)
self.fc2 = nn.Linear(32,64)
self.fc3 = nn.Linear(64,64)
self.fc4 = nn.Linear(64,64)
self.actions = nn.Linear(64,self.action_size)
def forward(self,state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
qvals = self.actions(x)
return qvals
# -
class RLAgent():
def __init__(self,state_size,action_size,buffer_size,batch_size,UPDATE_EVERY,
seed,gamma,TAU,alpha=5e-4):
self.state_size = state_size
self.action_size = action_size
self.buffer_size = buffer_size
self.batch_size = batch_size
self.UPDATE_EVERY = UPDATE_EVERY
self.seed = random.seed(seed)
self.gamma = gamma
self.alpha = alpha
self.TAU = TAU
# Q-Network
self.DQN_local = DQN(state_size, action_size, seed).to(device)
#self.DQN_local.apply(self.weights_init)
self.DQN_target = DQN(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.DQN_local.parameters(), lr=self.alpha)
# Replay memory
self.memory = ReplayBuffer(self.action_size, self.buffer_size, self.batch_size, self.seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
# weight xavier initialize
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.xavier_uniform(m.weight)
def act(self,state,eps):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.DQN_local.eval()
with torch.no_grad():
action_values = self.DQN_local(state)
self.DQN_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy()).astype(np.int32)
else:
return random.choice(np.arange(self.action_size))
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % self.UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences, self.gamma)
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones= experiences
# Get expected Q values from local model
Q_expected = self.DQN_local(states)
Q_expected = Q_expected.gather(1, actions)
# Get next actions from local model
next_actions = self.DQN_local(next_states).max(dim=1)[1].view(-1, 1).long().to(device)
# Get expected Q values (for next states) from target model
Q_targets_next = self.DQN_target(next_states)
# evaluatue next actions and Compute Q targets
Q_targets = rewards + (gamma * Q_targets_next.gather(1,next_actions) * (1 - dones))
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.DQN_local, self.DQN_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
def train_agent(EPISODES=2000,max_ts=1000,eps_start=1.0,eps_decay=0.99,
eps_min=0.01,gamma=0.99,alpha=5e-4,seed=6):
buffer_size = int(1e5)
batch_size = 64
UPDATE_EVERY = 4
TAU = 1e-3
Agent = RLAgent(state_size,action_size,buffer_size,batch_size,UPDATE_EVERY,
seed,gamma,TAU,alpha)
scores = []
scores_window = deque(maxlen=100)
eps = eps_start
solved = False
for i_episode in range(1,EPISODES + 1):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0
for time_step in range(max_ts):
action = Agent.act(state,eps)
#print(type(action))
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
Agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_min, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=13.0 and solved !=True:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
torch.save(Agent.DQN_local.state_dict(), 'checkpoints/DoubleDQN.pth')
solved = True
break
return scores
Double_DQN_scores = train_agent()
env.close()
fig = plt.figure()
plt.plot(np.arange(len(Double_DQN_scores)), Double_DQN_scores)
plt.ylabel("Double DQN Score")
plt.xlabel("Episode")
plt.show()
|
Project-1_Navigation/Navigation/02.Navigation-Double_DQN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: basedatascience
# language: python
# name: basedatascience
# ---
# # Testing Notebook 03
#
# This notebook is where I'm developing the containment methods for multivariate functions in $\mathbb{R}^n$
# +
import pandas as pd
import numpy as np
import plotly.express as px
import matplotlib.pyplot as plt
from statdepth.depth._containment import _is_in_simplex
from statdepth.depth._depthcalculations import _subsequences
# -
# First, create some multidimensional data
data = [pd.DataFrame(np.random.randint(0,5,size=(30, 3)), columns=list('ABC')) for _ in range(7)]
# +
from scipy.special import binom
from statdepth.depth._containment import _is_in_simplex
from statdepth.depth._depthcalculations import _subsequences
from typing import List
def _simplex_containment(data: List[pd.DataFrame], curve: pd.DataFrame, J=2, relax=False):
n = len(data)
l, d = data[0].shape
# Iterate over our subsequences of functions to form simplex with d+1 vertices
containment = 0
# For each time index, check containment
for idx in curve.index:
containment += _is_in_simplex(simplex_points=np.array([df.loc[idx, :] for df in data]),
point=np.array(curve.loc[idx, :]))
# If relaxation, return proportion of containment, else return integer divion so that we
# only get 1 if all rows are contained
return containment / l if relax else containment // l
# +
def _simplex_depth(data: list, curve: pd.DataFrame, J=2, relax=False):
l, d = data[0].shape
n = len(data)
depth = 0
S_nj = 0
subseq = _subsequences([i for i in range(n)], d + 1)
for seq in subseq:
cdata = [data[i] for i in seq]
S_nj += _simplex_containment(data=data, curve=curve, relax=relax)
depth += S_nj / binom(n, d + 1)
return depth
def simplexdepth(data: list, J=2, relax=False):
depths = []
f = [i for i in range(len(data))]
for cdf in data:
cdata = [df for df in data if df is not cdf]
depths.append(_simplex_depth(data=cdata, curve=cdf, J=J, relax=relax))
return pd.Series(index=f, data=depths)
# -
# %%timeit
simplexdepth(data)
def pointwise_depth(data: pd.DataFrame, J=2, containment='simplex'):
n, d = data.shape
depths = []
for time in data.index:
S_nj = 0
point = data.loc[time, :]
subseq = _subsequences(list(data.drop(time, axis=0).index), d + 1)
for seq in subseq:
S_nj += _is_in_simplex(simplex_points=
np.array(data.loc[seq, :]), point=np.array(point))
depths.append(S_nj / binom(n, d + 1))
return pd.Series(index=data.index, data=depths)
df = pd.DataFrame(np.random.rand(12, 3), columns=list('ABC'))
px.scatter_3d(x=df['A'], y=df['B'], z=df['C'])
# depths = pointwise_depth(data=df).sort_values(ascending=False)
# +
def _plot(df, deep_or_outlying: pd.Series) -> None:
n = len(df.columns)
cols = df.columns
select = df.loc[deep_or_outlying.index, :]
if n > 3:
pass
elif n == 3:
fig = go.Figure(data=[
go.Scatter3d(x=df[cols[0]], y=df[cols[1]], z=df[cols[2]], mode='markers', marker_color='blue', name=''),
go.Scatter3d(x=select[cols[0]], y=select[cols[1]], z=select[cols[2]], mode='markers',
marker_color='red', name='')
])
fig.update_layout(showlegend=False)
fig.show()
elif n == 2:
fig = go.Figure(data=[
go.Scatter(x=df[cols[0]], y=df[cols[1]], mode='markers', marker_color='blue', name=''),
go.Scatter(x=select[cols[0]], y=select[cols[1]], mode='markers',
marker_color='red', name='')
])
fig.update_layout(showlegend=False)
fig.show()
else: # n = 1
pass
# _plot(df, depths[0:1])
# -
from statdepth import PointwiseDepth
df = pd.DataFrame(np.random.rand(20, 3), columns=list('ABC'))
d = PointwiseDepth(df)
d
d.median()
from statdepth.depth._containment import _is_in_simplex
s = _is_in_simplex
s == _is_in_simplex
df
# +
def _pointwisedepth(data: pd.DataFrame, points: pd.Index=None, J=2, containment='simplex'):
"""Compute pointwise depth for n points in R^p, where data is an nxp matrix of points. If points is not None,
only compute depth for the given points (should be a subset of data.index)"""
n, d = data.shape
depths = []
to_compute = data.index
if points is not None:
to_compute = points
if containment == 'simplex':
for time in to_compute:
S_nj = 0
point = data.loc[time, :]
subseq = _subsequences(list(data.drop(time, axis=0).index), d + 1)
print(f'len of subseq is {len(subseq)}')
for seq in subseq:
S_nj += _is_in_simplex(simplex_points=
np.array(data.loc[seq, :]), point=np.array(point))
depths.append(S_nj / binom(n, d + 1))
return pd.Series(index=to_compute, data=depths)
def _samplepointwisedepth(data: pd.DataFrame, points: pd.Index=None, K=2, J=2, containment='simplex'):
n, d = data.shape
to_compute = data.index
depths = []
if points is not None:
to_compute = points
# K blocks of points (indices)
ss = n // K
for time in to_compute:
cd = []
for _ in range(ss):
sdata = data.sample(n=ss, axis=0)
# If our current datapoint isnt in the sampled data, just append it since we need to sample it
# for _is_in_simplex()
if not time in sdata.index:
sdata = sdata.append(data.loc[time, :])
cd.append(_pointwisedepth(data=sdata, points=[time], J=J, containment=containment))
depths.append(np.mean(cd))
print(f'depths is {depths}')
return pd.Series(index=to_compute, data=depths)
ds = _samplepointwisedepth(data=df, K=2)
# -
ds.sort_values(ascending=False)
# +
import plotly.graph_objects as go
_plot(df, d.ordered()[0:3])
|
testing_notebooks/jlehrer_testing_notebook_03.ipynb
|