code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %config InlineBackend.figure_format='retina'
# +
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab
import os
import sys
import time
import numpy as np
import h5py
from librosa.feature import melspectrogram
from librosa import logamplitude
from matplotlib import mlab
from matplotlib import gridspec
from scipy.interpolate import interp1d
mpl.rcParams['agg.path.chunksize'] = 10000
# +
def get_psd(real_strain, sampling_rate=4096):
# Define some constants
nfft = 2 * sampling_rate # Bigger values yield better resolution?
# Use matplotlib.mlab to calculate the PSD from the real strain
P_xx, freqs = mlab.psd(real_strain, NFFT=nfft, Fs=sampling_rate)
# Interpolate it linearly, so we can re-sample the spectrum arbitrarily
psd = interp1d(freqs, P_xx)
return psd
def apply_psd(signal_t, psd, sampling_rate=4096):
"""
Take a signal in the time domain, and a precalculated Power Spectral
Density, and color the signal according to the given PSD.
Args:
signal_t: A signal in time domain (i.e. a 1D numpy array)
psd: A Power Spectral Density, e.g. calculated from the detector noise.
Should be a function: psd(frequency)
sampling_rate: Sampling rate of signal_t
Returns: color_signal_t, the colored signal in the time domain.
"""
# First set some parameters for computing power spectra
n = len(signal_t)
dt = 1./sampling_rate
# Go into Fourier (frequency) space: signal_t -> signal_f
frequencies = np.fft.rfftfreq(n, dt)
signal_f = np.fft.rfft(signal_t)
# Divide by the given Power Spectral Density (PSD)
# This is the 'whitening' = actually adding color
color_signal_f = signal_f / (np.sqrt(psd(frequencies) / dt / 2.))
# Go back into time space: color_signal_f -> color_signal_t
color_signal_t = np.fft.irfft(color_signal_f, n=n)
return color_signal_t
# +
# Path to the directory where all data is stored
data_path = '../data'
# Read in the HDF file
with h5py.File(os.path.join(data_path, 'strain', 'H1_2017_4096.hdf5')) as file:
strain = np.array(file['strain/Strain'])
# -
psd = get_psd(strain)
whitened_strain = apply_psd(strain, psd)
signal = whitened_strain[int(4096*(2048.6-5)):int(4096*(2048.6+5))]
plt.plot(signal)
plt.axvline(x=4.5*4096, ls='--', color='red')
plt.axvline(x=5.5*4096, ls='--', color='red')
plt.gcf().set_size_inches(18, 4, forward=True)
plt.show()
def make_spectrogram(strain):
result = melspectrogram(strain, sr=4096, n_fft=1024, hop_length=64,
n_mels=64, fmin=0, fmax=400)
return logamplitude(result)
plt.figure(1)
plt.imshow(make_spectrogram(signal), origin="lower", interpolation="none")
plt.gcf().set_size_inches(18, 4, forward=True)
plt.axvline(x=288.45, ls='--', color='red')
plt.axvline(x=352.55, ls='--', color='red')
plt.figure(2)
plt.imshow(make_spectrogram(signal), origin="lower")
plt.gcf().set_size_inches(18, 4, forward=True)
plt.axvline(x=288.45, ls='--', color='red')
plt.axvline(x=352.55, ls='--', color='red')
plt.show()
# +
fs = 4096
deltat = 5
# pick a shorter FTT time interval, like 1/8 of a second:
NFFT = int(fs/8)
# and with a lot of overlap, to resolve short-time features:
NOVL = int(NFFT*15./16)
# and choose a window that minimizes "spectral leakage"
# (https://en.wikipedia.org/wiki/Spectral_leakage)
window = np.blackman(NFFT)
window = np.bartlett(NFFT)
# the right colormap is all-important! See:
# http://matplotlib.org/examples/color/colormaps_reference.html
# viridis seems to be the best for our purposes, but it's new; if you don't have it, you can settle for ocean.
spec_cmap='viridis'
#spec_cmap='ocean'
# Plot the H1 spectrogram:
plt.figure(figsize=(10,6))
spec_H1, freqs, bins, im = plt.specgram(signal, NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s)')
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-deltat, deltat, 0, 500])
plt.title('aLIGO H1 strain data')
plt.gcf().set_size_inches(18, 4, forward=True)
plt.show()
# -
| quantum gravity/convwave/src/jupyter/real_signal_in_detector_recording.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: firstEnv
# language: python
# name: firstenv
# ---
from singan_polyp_aug import generate_data, prepare_requirements
import pandas as pd
help(prepare_requirements.prepare_checkpoints)
help(generate_data)
output_dir = "/work/vajira/DATA/sinGAN_polyps/singan_out"
paths, reals = prepare_requirements.prepare_checkpoints("/work/vajira/DATA/sinGAN_polyps/singan_test_2", link_keys=["link1", "link2", "link3", "link4"], real_data=True)
len(reals)
len(paths)
# +
#paths
# +
#generate_data.generate_from_single_checkpoint(output_dir, paths[101], gen_start_scale=0, num_samples=5)
# -
generate_data.generate_simple(output_dir, "/work/vajira/DATA/sinGAN_polyps/singan_test_2", gen_start_scale=1, num_samples=5)
# +
#reals
# +
#pd.DataFrame(reals)
# -
from natsort import natsorted
str(paths[0])
paths_str = [str(p) for p in paths ]
# +
#paths_str
# -
natsorted(list(paths_str))
help(generate_data)
generate_data.generate_simple("/work/vajira/DATA/sinGAN_polyps/singan_test_2", "/work/vajira/DATA/sinGAN_polyps/singan_test_2")
| notebooks/generate_singan_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hypothesis testing
#
# In this notebook we demonstrate formal hypothesis testing using the [NHANES](https://www.cdc.gov/nchs/nhanes/index.htm) data.
#
# It is important to note that the NHANES data are a "complex survey". The data are not an independent and representative sample from the target population. Proper analysis of complex survey data should make use of additional information about how the data were collected. Since complex survey analysis is a somewhat specialized topic, we ignore this aspect of the data here, and analyze the NHANES data as if it were an independent and identically distributed sample from a population.
#
# First we import the libraries that we will need.
# %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
import scipy.stats.distributions as dist
# Below we read the data, and convert some of the integer codes to text values. The NHANES codebooks for
# [SMQ020](https://wwwn.cdc.gov/Nchs/Nhanes/2015-2016/SMQ_I.htm#SMQ020),
# [RIAGENDR](https://wwwn.cdc.gov/Nchs/Nhanes/2015-2016/DEMO_I.htm#RIAGENDR), and
# [DMDCITZN](https://wwwn.cdc.gov/Nchs/Nhanes/2015-2016/DEMO_I.htm#DMDCITZN) describe the meanings of the numerical
# codes.
# +
da = pd.read_csv("nhanes_2015_2016.csv")
da["SMQ020x"] = da.SMQ020.replace({1: "Yes", 2: "No", 7: np.nan, 9: np.nan}) # np.nan represents a missing value
da["RIAGENDRx"] = da.RIAGENDR.replace({1: "Male", 2: "Female"})
da["DMDCITZNx"] = da.DMDCITZN.replace({1: "Yes", 2: "No", 7: np.nan, 9: np.nan})
# -
# ### Hypothesis tests for one proportions
#
# The most basic hypothesis test may be the one-sample test for a proportion. This test is used if we have specified a particular value as the null value for the proportion, and we wish to assess if the data are compatible with the true parameter value being equal to this specified value. One-sample tests are not used very often in practice, because it is not very common that we have a specific fixed value to use for comparison.
#
# For illustration, imagine that the rate of lifetime smoking in another country was known to be 40%, and we wished to assess whether the rate of lifetime smoking in the US were different from 40%. In the following notebook cell, we carry out the (two-sided) one-sample test that the population proportion of smokers is 0.4, and obtain a p-value of 0.43. This indicates that the NHANES data are compatible with the proportion of (ever) smokers in the US being 40%.
x = da.SMQ020x.dropna() == "Yes"
p = x.mean()
se = np.sqrt(0.4 * 0.6 / len(x))
test_stat = (p - 0.4) / se
pvalue = 2*dist.norm.cdf(-np.abs(test_stat))
print(test_stat, pvalue)
# The following cell carries out the same test as performed above using the Statsmodels library. The results in the first (default) case below are slightly different from the results obtained above because Statsmodels by default uses the sample proportion instead of the null proportion when computing the standard error. This distinction is rarely consequential, but we can specify that the null proportion should be used to calculate the standard error, and the results agree exactly with what we calculated above. The first two lines below carry out tests using the normal approximation to the sampling distribution of the test statistic, and the third line below carries uses the exact binomial sampling distribution. We can see here that the p-values are nearly identical in all three cases. This is expected when the sample size is large, and the proportion is not close to either 0 or 1.
# +
# Prints test statistic, p-value
print(sm.stats.proportions_ztest(x.sum(), len(x), 0.4)) # Normal approximation with estimated proportion in SE
print(sm.stats.proportions_ztest(x.sum(), len(x), 0.4, prop_var=0.4)) # Normal approximation with null proportion in SE
# Prints the p-value
print(sm.stats.binom_test(x.sum(), len(x), 0.4)) # Exact binomial p-value
# -
# ### Hypothesis tests for two proportions
#
# Comparative tests tend to be used much more frequently than tests comparing one population to a fixed value. A two-sample test of proportions is used to assess whether the proportion of individuals with some trait differs between two sub-populations. For example, we can compare the smoking rates between females and males. Since smoking rates vary strongly with age, we do this in the subpopulation of people between 20 and 25 years of age. In the cell below, we carry out this test without using any libraries, implementing all the test procedures covered elsewhere in the course using Python code. We find that the smoking rate for men is around 10 percentage points greater than the smoking rate for females, and this difference is statistically significant (the p-value is around 0.01).
# +
dx = da[["SMQ020x", "RIDAGEYR", "RIAGENDRx"]].dropna() # Drop missing values
dx = dx.loc[(dx.RIDAGEYR >= 20) & (dx.RIDAGEYR <= 25), :] # Restrict to people between 20 and 25 years old
# Summarize the data by caclculating the proportion of yes responses and the sample size
p = dx.groupby("RIAGENDRx")["SMQ020x"].agg([lambda z: np.mean(z=="Yes"), "size"])
p.columns = ["Smoke", "N"]
print(p)
# The pooled rate of yes responses, and the standard error of the estimated difference of proportions
p_comb = (dx.SMQ020x == "Yes").mean()
va = p_comb * (1 - p_comb)
se = np.sqrt(va * (1 / p.N.Female + 1 / p.N.Male))
# Calculate the test statistic and its p-value
test_stat = (p.Smoke.Female - p.Smoke.Male) / se
pvalue = 2*dist.norm.cdf(-np.abs(test_stat))
print(test_stat, pvalue)
# -
# Essentially the same test as above can be conducted by converting the "Yes"/"No" responses to numbers (Yes=1, No=0) and conducting a two-sample t-test, as below:
dx_females = dx.loc[dx.RIAGENDRx=="Female", "SMQ020x"].replace({"Yes": 1, "No": 0})
dx_males = dx.loc[dx.RIAGENDRx=="Male", "SMQ020x"].replace({"Yes": 1, "No": 0})
sm.stats.ttest_ind(dx_females, dx_males) # prints test statistic, p-value, degrees of freedom
# ### Hypothesis tests comparing means
#
# Tests of means are similar in many ways to tests of proportions. Just as with proportions, for comparing means there are one and two-sample tests, z-tests and t-tests, and one-sided and two-sided tests. As with tests of proportions, one-sample tests of means are not very common, but we illustrate a one sample test in the cell below. We compare systolic blood pressure to the fixed value 120 (which is the lower threshold for "pre-hypertension"), and find that the mean is significantly different from 120 (the point estimate of the mean is 126).
dx = da[["BPXSY1", "RIDAGEYR", "RIAGENDRx"]].dropna()
dx = dx.loc[(dx.RIDAGEYR >= 40) & (dx.RIDAGEYR <= 50) & (dx.RIAGENDRx == "Male"), :]
print(dx.BPXSY1.mean()) # prints mean blood pressure
sm.stats.ztest(dx.BPXSY1, value=120) # prints test statistic, p-value
# In the cell below, we carry out a formal test of the null hypothesis that the mean blood pressure for women between the ages of 50 and 60 is equal to the mean blood pressure of men between the ages of 50 and 60. The results indicate that while the mean systolic blood pressure for men is slightly greater than that for women (129 mm/Hg versus 128 mm/Hg), this difference is not statistically significant.
#
# There are a number of different variants on the two-sample t-test. Two often-encountered variants are the t-test carried out using the t-distribution, and the t-test carried out using the normal approximation to the reference distribution of the test statistic, often called a z-test. Below we display results from both these testing approaches. When the sample size is large, the difference between the t-test and z-test is very small.
dx = da[["BPXSY1", "RIDAGEYR", "RIAGENDRx"]].dropna()
dx = dx.loc[(dx.RIDAGEYR >= 50) & (dx.RIDAGEYR <= 60), :]
bpx_female = dx.loc[dx.RIAGENDRx=="Female", "BPXSY1"]
bpx_male = dx.loc[dx.RIAGENDRx=="Male", "BPXSY1"]
print(bpx_female.mean(), bpx_male.mean()) # prints female mean, male mean
print(sm.stats.ztest(bpx_female, bpx_male)) # prints test statistic, p-value
print(sm.stats.ttest_ind(bpx_female, bpx_male)) # prints test statistic, p-value, degrees of freedom
# Another important aspect of two-sample mean testing is "heteroscedasticity", meaning that the variances within the two groups being compared may be different. While the goal of the test is to compare the means, the variances play an important role in calibrating the statistics (deciding how big the mean difference needs to be to be declared statistically significant). In the NHANES data, we see that there are moderate differences between the amount of variation in BMI for females and for males, looking within 10-year age bands. In every age band, females having greater variation than males.
dx = da[["BMXBMI", "RIDAGEYR", "RIAGENDRx"]].dropna()
da["agegrp"] = pd.cut(da.RIDAGEYR, [18, 30, 40, 50, 60, 70, 80])
da.groupby(["agegrp", "RIAGENDRx"])["BMXBMI"].agg(np.std).unstack()
# The standard error of the mean difference (e.g. mean female blood pressure minus mean male blood pressure) can be estimated in at least two different ways. In the statsmodels library, these approaches are referred to as the "pooled" and the "unequal" approach to estimating the variance. If the variances are equal (i.e. there is no heteroscedasticity), then there should be little difference between the two approaches. Even in the presence of moderate heteroscedasticity, as we have here, we can see that the results for the two methods are quite similar. Below we have a loop that considers each 10-year age band and assesses the evidence for a difference in mean BMI for women and for men. The results printed in each row of output are the test-statistic and p-value.
for k, v in da.groupby("agegrp"):
bmi_female = v.loc[v.RIAGENDRx=="Female", "BMXBMI"].dropna()
bmi_female = sm.stats.DescrStatsW(bmi_female)
bmi_male = v.loc[v.RIAGENDRx=="Male", "BMXBMI"].dropna()
bmi_male = sm.stats.DescrStatsW(bmi_male)
print(k)
print("pooled: ", sm.stats.CompareMeans(bmi_female, bmi_male).ztest_ind(usevar='pooled'))
print("unequal:", sm.stats.CompareMeans(bmi_female, bmi_male).ztest_ind(usevar='unequal'))
print()
# ### Paired tests
#
# A common situation in applied research is to measure the same quantity multiple times on each unit of analysis. For example, in NHANES, systolic blood pressure is measured at least two times (sometimes there is a third measurement) on each subject. Although the measurements are repeated, there is no guarantee that the mean is the same each time, i.e. the mean blood pressure may be slightly lower on the second measurement compared to the first, since people are a bit more nervous the first time they are measured. A paired test is a modified form of mean test that can be used when we are comparing two repeated measurements on the same unit.
#
# A paired t-test for means is equivalent to taking the difference between the first and second measurement, and using a one-sample test to compare the mean of these differences to zero. Below we see that in the entire NHANES sample, the first measurement of systolic blood pressure is on average 0.67 mm/Hg greater than the second measurement. While this difference is not large, it is strongly statistically significant. That is, there is strong evidence that the mean values for the first and second blood pressure measurement differ.
dx = da[["BPXSY1", "BPXSY2"]].dropna()
db = dx.BPXSY1 - dx.BPXSY2
print(db.mean())
sm.stats.ztest(db)
# To probe this effect further, we can divide the population into 10 year wide age bands and also stratify by gender, then carry out the paired t-test within each of the resulting 12 strata. We see that the second systolic blood pressure measurement is always lower on average than the first. The difference is larger for older people and for males. The difference is statistically significant for females over 30, and for males over 60.
#
# Conducting many hypothesis tests and "cherry picking" the interesting results is usually a bad practice. Here we are doing such "multiple testing" for illustration, and acknowledge that the strongest differences may be over-stated. Nevertheless, there is a clear and consistent trend with age -- older people tend to have greater differences between their first and second blood pressure measurements than younger people. There is also a difference between the genders, with older men having a stronger difference between the first and second blood pressure measurements than older women. The gender difference for younger peple is less clear.
dx = da[["RIAGENDRx", "BPXSY1", "BPXSY2", "RIDAGEYR"]].dropna()
dx["agegrp"] = pd.cut(dx.RIDAGEYR, [18, 30, 40, 50, 60, 70, 80])
for k, g in dx.groupby(["RIAGENDRx", "agegrp"]):
db = g.BPXSY1 - g.BPXSY2
# print stratum definition, mean difference, sample size, test statistic, p-value
print(k, db.mean(), db.size, sm.stats.ztest(db.values, value=0))
# ## Power and sample size for hypothesis tests
#
# Like nearly any analysis with data, hypothesis tests will yield more sensitive and confident results when there are larger amounts of data. Here we will study the relationship between the sample size (amount of data), and the behavior of a hypothesis test. For illustration, we will use the paired t-test for assessing whether the first and second systolic blood pressure measurement have different means, restricting to people between 50 and 60 years of age.
#
# The simulation below randomly subsamples sets of 100, 200, 400, and 800 people from the 50-60 year old subset of NHANES, and conducts a paired z-test on each subsample. This process is repeated 500 times for each sample size. We report the fraction of these 500 trials where the p-value was smaller than 0.05. A higher fraction of trials where p<0.05 implies that it is easier to detect a difference between the first and second blood pressure measurements. We see that this proportion (called "statistical power") is only around 0.2 when the sample size is 100, and grows to essentially 100% when the sample size is 800. Note that all of these subsamples are drawn from the same parent population, so there is no change from subsample to subsample in terms of whether the means are truly different. The only thing that changes is our evidence that the difference is real. When the sample size is small, we rarely amass such evidence.
all_p = []
dy = dx.loc[(dx.RIDAGEYR >= 50) & (dx.RIDAGEYR <= 60), :]
for n in 100, 200, 400, 800:
pv = []
for i in range(500):
dz = dy.sample(n)
db = dz.BPXSY1 - dz.BPXSY2
_, p = sm.stats.ztest(db.values, value=0)
pv.append(p)
pv = np.asarray(pv)
all_p.append(pv)
print((pv <= 0.05).mean())
# In the simulation code above, we saved all the p-values to the array named `all_p` so we can consider them further here. The histogram below shows the distribution of p-values for 500 subsamples of data of size 100. We see that there are more smaller p-values than larger ones, but not to a dramatic degree. Some of the p-values are almost 1, indicating that the subsample mean differences were almost zero in those cases.
sns.distplot(all_p[0]);
# Next we inspect the distribution of p-values for subsamples of size 400. We see that the p-values are much more concentrated close to zero compared to when we had only 100 data points. If we obtain a sample of size 400 from this population, we have around a 70% chance of obtaining a p-value smaller than 0.05, and are very unlikely to obtain a p-value greater than 0.4.
sns.distplot(all_p[2]);
| nhanes_hypothesis_testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# %matplotlib inline
# ## 1, Examples on Images
# +
# reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
# print out the info of image
print('This image is: ', type(image), ' with dimensions: ', image.shape)
# -
# if you wantes to show a single color channel image called 'gray'
plt.imshow(image, cmap='gray')
plt.imshow(image)
# ### Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## 2, Functions for Canny Edge Detection
# #### 第一步:图片灰度化, cv2.cvtColor(img, cv2.RGB2GARY)
# opencv 灰度转换
def grayscale(image):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
# 如果用 cv2.imread() 方法读取图片,应该使用 BGR2GRAY
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return gray_image
# #### 第二步:高斯平滑, cv2.GaussianBlur
# 高斯平滑
def gaussian_blur(gray_image, kernel_size):
"""Applies a Gaussian Noise kernel"""
blur_gray = cv2.GaussianBlur(gray_image, (kernel_size, kernel_size), 0)
return blur_gray
# #### 第三步:Canny Edge Detection 边缘检测,cv2.Canny(blur_gray, low_threshold, high_threshold)
def canny_dege_detection(gray_blur, low_threshold, high_threshold):
"""Applies the Canny transform"""
edges = cv2.Canny(gray_blur, low_threshold, high_threshold)
return edges
# #### 第四步:Region of Masking 感兴趣区域选定 cv2.fillPoly() for regions selection
def region_of_interest(image, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
mask = np.zeros_like(image)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(image.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255, ) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(image, mask)
return masked_image
# #### 第五步:Hough Transform 霍夫变换,cv2.HoughLinesP
def hough_transform_lines(masked_image, rho, theta, threshold, min_line_len, max_line_gap):
"""
`masked_image` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), min_line_len, max_line_gap)
line_image = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
draw_lines(line_image, lines)
return line_image
# #### 第六步:Draw Lines 画线 cv2.line() to draw lines on an image given endpoints
def draw_lines(line_img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_img, (x1, y1), (x2, y2), color, thickness)
# #### 第七步:overlay two images 图像重叠 cv2.addWeighted() to coadd / overlay two images
def weighted_image(initial_image, image, apha, beta, rho):
"""
`image` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_image, apha, image, beta, rho)
# ## 3, Testing on Images
import os
os.listdir('test_images/')
# ### Build a Lane Finding Pipeline 车道检测流水线
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# **Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.**
# create a directory to save processed images
test_images_output = "test_images_output"
if not os.path.exists(test_images_output):
os.mkdir(test_images_output)
# +
# Build a pipeline that will draw lane lines on the test_images then save them to the test_images_output directory.
# Setting the parameters for functions
# 1. kernel_size for gussian blur
kernel_size = 5
# 2. thresholds for canny edge
low_threshold = 60
high_threshold = 140
# 3. constants for Hough transformation
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 20 # minimum number of votes (intersections in Hough grid cell)
min_line_len = 30 #minimum number of pixels making up a line
max_line_gap = 150 # maximum gap in pixels between connectable line segments
# 4. vertices for polygon with area of interest
left_bottom = [50, 539]
right_bottom = [900, 539]
apex = [470, 320]
vertices = [left_bottom, right_bottom, apex]
# 5.
# -
image = mpimg.imread('test_images/solidWhiteRight.jpg')
image_grayscale = grayscale(image)
plt.imshow(image_grayscale, cmap='gray')
gary_blur = gaussian_blur(image_grayscale, kernel_size)
plt.imshow(gary_blur, cmap='gray')
image_canny_edge = canny_dege_detection(gary_blur, low_threshold, high_threshold)
plt.imshow(image_canny_edge, cmap='gray')
vertices = np.array([[(0,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
image_mask = region_of_interest(image_canny_edge, vertices)
plt.imshow(image_mask, cmap='gray')
line_image = hough_transform_lines(image_mask, rho, theta, threshold, min_line_len, max_line_gap)
plt.imshow(line_image, cmap='gray')
color_edges = np.dstack((image_canny_edge, image_canny_edge, image_canny_edge))
image_lines_and_edges = weighted_image(color_edges, line_image, 0.8, 1.0, 0)
plt.imshow(image_lines_and_edges, cmap='gray')
image_lines_and_edges = weighted_image(image, line_image, 0.8, 1.0, 0)
plt.imshow(image_lines_and_edges)
# ### 构建车道检测处理流程
def finding_lane_pipe(image):
# 1, Convert GrayScale 对原始图像进行灰度化
image_grayscale = grayscale(image)
# 2, Gaussian Bluring 高斯滤波
gary_blur = gaussian_blur(image_grayscale, kernel_size)
# 3, Canny edge decetion Canny 边缘检测
image_canny_edge = canny_dege_detection(gary_blur, low_threshold, high_threshold)
# 4, Mask edges to area of interest 区域选择
vertices = np.array([[(0,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
image_mask = region_of_interest(image_canny_edge, vertices)
# 5, Detect Hough Lines
# and, Draw the lines on the edge image 霍夫线
line_image = hough_transform_lines(image_mask, rho, theta, threshold, min_line_len, max_line_gap)
# 6, weighted_image(initial_image, image, apha, beta, rho): 图像重叠
# Create a "color" binary image to combine with line image
# color_edges = np.dstack((image_canny_edge, image_canny_edge, image_canny_edge))
image_lines_and_edges = weighted_image(image, line_image, 0.8, 1.0, 0)
return image_lines_and_edges
# testing on images
images = os.listdir('test_images/')
for raw_image in images:
image = mpimg.imread("test_images/" + raw_image)
image_with_line = finding_lane_pipe(image)
# show the processed image
plt.imshow(image_with_line)
# save the processed image to test_images_output dir
mpimg.imsave(os.path.join(test_images_output, "processed_"+raw_image), image_with_line)
# ## 4, Testing on Videos
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = finding_lane_pipe(image)
return result
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
print(clip1)
# create a directory to save processed videos
test_videos_output = "test_videos_output"
if not os.path.exists(test_videos_output):
os.mkdir(test_videos_output)
white_output = 'test_videos_output/solidWhiteRight.mp4'
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
# %time white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
yellow_clip = clip2.fl_image(process_image)
# %time yellow_clip.write_videofile(yellow_output, audio=False)
#%%
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_output = 'test_videos_output/challenge.mp4'
challenge_clip = clip3.fl_image(process_image)
# %time challenge_clip.write_videofile(challenge_output, audio=False)
#%%
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
| udacity-program_self_driving_car_engineer_v1.0/project03-lane_detection_basic/Finding Lane Lines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # Missing Data
#
# Let's show a few convenient methods to deal with Missing Data in pandas:
import numpy as np
import pandas as pd
df = pd.DataFrame({'A':[1,2,np.nan],
'B':[5,np.nan,np.nan],
'C':[1,2,3]})
df
df.dropna()
df.dropna(axis=1)
df.dropna(thresh=2)
df.fillna(value='FILL VALUE')
df['A'].fillna(value=df['A'].mean())
# # Great Job!
| 03-Python-for-Data-Analysis-Pandas/04-Missing Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
df=pd.read_csv("housing.csv")
df.head()
df.shape
df.describe()
plt.figure(figsize=(16,8))
sns.heatmap(df.isnull(),cbar=False)
df.dtypes
len(df['Address'].unique())
df=df.drop(['Address'],axis=1)
df.head()
df.hist()
corr=df.corr()
sns.heatmap(corr,annot=True)
y=df['Price']
x=df.drop(['Price'],axis=1)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x)
X=scaler.transform(x)x
y=np.log(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
train_test_split(y, shuffle=False)
from sklearn.linear_model import LinearRegression
model_linear = LinearRegression().fit(X_train, y_train)
y_pred=model_linear.predict(X_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred)
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, y_pred)
from sklearn.svm import SVR
clf = SVR(gamma='scale', C=1.0, epsilon=0.2)
clf.fit(X_train, y_train)
y_pred=clf.predict(X_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred)
from sklearn.svm import SVR
clf = SVR(gamma='scale', C=1, epsilon=0)
clf.fit(X_train, y_train)
y_pred=clf.predict(X_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred)
from sklearn.preprocessing import MinMaxScaler
z=[[70000.0,6.0,8.0,3.0,30000.0]]
z=np.asarray(z)
y1=model_linear.predict(z)
y1
| Fuel_efficiency_prediction/.ipynb_checkpoints/Housing_price-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.1 64-bit (''deep_learning'': conda)'
# name: python371jvsc74a57bd0c850e6f77ff7c9cbece5364f8526ec42dd183cf59251b1cfd7b71a0467b242c1
# ---
# + [markdown] id="Ng63tDwZSSm5"
# # Using Our Margin-MSE trained ColBERT Checkpoint
#
# We provide a fully retrieval trained (with Margin-MSE using a 3 teacher Bert_Cat Ensemble on MSMARCO-Passage) DistilBert-based instance on the HuggingFace model hub here: https://huggingface.co/sebastian-hofstaetter/colbert-distilbert-margin_mse-T2-msmarco
#
# This instance can be used to **re-rank a candidate set** or **directly for a vector index based dense retrieval**. The architecure is a 6-layer DistilBERT, with an additional single linear layer at the end.
#
# If you want to know more about our simple, yet effective knowledge distillation method for efficient information retrieval models for a variety of student architectures, check out our paper: https://arxiv.org/abs/2010.02666 🎉
#
# This notebook gives you a minimal usage example of downloading our ColBERT checkpoint to encode passages and queries to create a (term-x-term dot-product & max-pool & sum) score of their relevance.
#
#
#
# ---
#
#
# Let's get started by installing the awesome *transformers* library from HuggingFace:
#
# + id="r2WyNOE2R2rW"
pip install transformers
# + [markdown] id="YqkWDa_jWu7c"
# The next step is to download our checkpoint and initialize the tokenizer and models:
#
# + id="oTYEtziISSDl"
from transformers import AutoTokenizer,AutoModel, PreTrainedModel,PretrainedConfig
from typing import Dict
import torch
class ColBERTConfig(PretrainedConfig):
model_type = "ColBERT"
bert_model: str
compression_dim: int = 768
dropout: float = 0.0
return_vecs: bool = False
trainable: bool = True
class ColBERT(PreTrainedModel):
"""
ColBERT model from: https://arxiv.org/pdf/2004.12832.pdf
We use a dot-product instead of cosine per term (slightly better)
"""
config_class = ColBERTConfig
base_model_prefix = "bert_model"
def __init__(self,
cfg) -> None:
super().__init__(cfg)
self.bert_model = AutoModel.from_pretrained(cfg.bert_model)
for p in self.bert_model.parameters():
p.requires_grad = cfg.trainable
self.compressor = torch.nn.Linear(self.bert_model.config.hidden_size, cfg.compression_dim)
def forward(self,
query: Dict[str, torch.LongTensor],
document: Dict[str, torch.LongTensor]):
query_vecs = self.forward_representation(query)
document_vecs = self.forward_representation(document)
score = self.forward_aggregation(query_vecs,document_vecs,query["attention_mask"],document["attention_mask"])
return score
def forward_representation(self,
tokens,
sequence_type=None) -> torch.Tensor:
vecs = self.bert_model(**tokens)[0] # assuming a distilbert model here
vecs = self.compressor(vecs)
# if encoding only, zero-out the mask values so we can compress storage
if sequence_type == "doc_encode" or sequence_type == "query_encode":
vecs = vecs * tokens["tokens"]["mask"].unsqueeze(-1)
return vecs
def forward_aggregation(self,query_vecs, document_vecs,query_mask,document_mask):
# create initial term-x-term scores (dot-product)
score = torch.bmm(query_vecs, document_vecs.transpose(2,1))
# mask out padding on the doc dimension (mask by -1000, because max should not select those, setting it to 0 might select them)
exp_mask = document_mask.bool().unsqueeze(1).expand(-1,score.shape[1],-1)
score[~exp_mask] = - 10000
# max pooling over document dimension
score = score.max(-1).values
# mask out paddding query values
score[~(query_mask.bool())] = 0
# sum over query values
score = score.sum(-1)
return score
#
# init the model & tokenizer (using the distilbert tokenizer)
#
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") # honestly not sure if that is the best way to go, but it works :)
model = ColBERT.from_pretrained("sebastian-hofstaetter/colbert-distilbert-margin_mse-T2-msmarco")
# + [markdown] id="EOGT8YQQX1Ot"
# Now we are ready to use the model to encode two sample passage and query pairs (this would be the re-ranking mode, where we have a candidate list - for indexing or pre-compute mode you need to call forward_representation and forward_aggregation independently):
# + colab={"base_uri": "https://localhost:8080/"} id="Rzt9Ix9UYMLy" outputId="529e338e-b4e7-4251-cf9b-4363ac8a3ed8"
# our relevant example
passage1_input = tokenizer("We are very happy to show you the 🤗 Transformers library for pre-trained language models. We are helping the community work together towards the goal of advancing NLP 🔥.",return_tensors="pt")
# a non-relevant example
passage2_input = tokenizer("Hmm I don't like this new movie about transformers that i got from my local library. Those transformers are robots?",return_tensors="pt")
# the user query -> which should give us a better score for the first passage
query_input = tokenizer("what is the transformers library")
# adding the mask augmentation, we used 8 as the fixed number for training regardless of batch-size
# it has a somewhat (although not huge) positive impact on effectiveness, we hypothesize that might be due to the increased
# capacity of the query encoding, not so much because of the [MASK] pre-training, but who knows :)
query_input.input_ids += [103] * 8 # [MASK]
query_input.attention_mask += [1] * 8
query_input["input_ids"] = torch.LongTensor(query_input.input_ids).unsqueeze(0)
query_input["attention_mask"] = torch.LongTensor(query_input.attention_mask).unsqueeze(0)
#print("Passage 1 Tokenized:",passage1_input)
#print("Passage 2 Tokenized:",passage2_input)
#print("Query Tokenized:",query_input)
# note how we call the bert model for pairs, can be changed to: forward_representation and forward_aggregation
score_for_p1 = model.forward(query_input,passage1_input).squeeze(0)
score_for_p2 = model.forward(query_input,passage2_input).squeeze(0)
print("---")
print("Score passage 1 <-> query: ",float(score_for_p1))
print("Score passage 2 <-> query: ",float(score_for_p2))
# + [markdown] id="_1bY5qB9b-AI"
# As we see the model gives the first passage a higher score than the second - these scores would now be used to generate a list (if we run this comparison on all passages in our collection or candidate set). The scores are in the 100+ range (as we create a dot-product of 768 dimensional vectors, which naturally gives a larger score)
#
# - If you want to look at more complex usages and training code we have a library for that: https://github.com/sebastian-hofstaetter/transformer-kernel-ranking 👏
#
# - If you use our model checkpoint please cite our work as:
#
# ```
# @misc{hofstaetter2020_crossarchitecture_kd,
# title={Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation},
# author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
# year={2020},
# eprint={2010.02666},
# archivePrefix={arXiv},
# primaryClass={cs.IR}
# }
# ```
#
# Thank You 😊 If you have any questions feel free to reach out to Sebastian via mail (email in the paper).
#
| minimal_colbert_usage_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
from recon.augmentation import ent_label_sub
from recon.dataset import Dataset
from recon.types import Example, Span
# +
example = Example(
text="This is a first sentence with entity. This is an entity in the 2nd sentence.",
spans=[
Span(text="entity", start=30, end=36, label="ENTITY"),
Span(text="entity", start=49, end=55, label="ENTITY"),
],
)
ds = Dataset("test_dataset", data=[example])
ds.apply_("recon.v1.augment.ent_label_sub", label="ENTITY", subs=["new entity"], sub_prob=1.0)
assert len(ds) == 2
assert ds.data[0] == example
assert ds.data[1] == Example(
text="This is a first sentence with new entity. This is an new entity in the 2nd sentence.",
spans=[
Span(text="new entity", start=30, end=40, label="ENTITY"),
Span(text="new entity", start=53, end=63, label="ENTITY"),
],
)
# -
ent_label_sub(example, "ENTITY", ["new entity"], sub_prob=1.0)
ds = Dataset("test_dataset", data=[example])
ds.apply_("recon.v1.augment.ent_label_sub", label="ENTITY", subs=["new entity"], sub_prob=1.0)
ds.data
| examples/6.2_augmentations_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0a6 64-bit (''3.10.0a6'': pyenv)'
# language: python
# name: python3
# ---
# ## Kompletny przykład
# ### Przykładowy wycinek aplikacji wykonującej podsumowanie transakcji pobranych z jakiegoś źródła zewnętrznego
# ### Klient bazodanowy
# +
from __future__ import annotations
from typing import List
from decimal import Decimal
from dataclasses import dataclass
from enum import Enum
from reader import Reader
from utils import cmap, cfilter, pipe
from base import DomainError
import result
@dataclass
class DbClient:
dummy_transactions: List[tuple]
def get_transactions(self) -> result.Result[List[tuple], str]:
# Tutaj w realnej aplikacji potrencjalnie mógłby wystąpić jakiś błąd - zwrócony zostałby wtedy Error
return result.Ok(self.dummy_transactions)
# -
# ### Definicja dziedziny
# +
@dataclass(frozen=True, repr=False)
class ValidationError(DomainError): pass
@dataclass(frozen=True)
class Transaction:
@dataclass(frozen=True)
class Id:
value: str
@staticmethod
@result.from_generator
def create(value: str) -> result.Result[Transaction.Id, ValidationError]:
if not value.startswith("#"):
return ValidationError.create("Id should start with #")
return value
class Type(Enum):
CASH = 'CASH'
CARD = 'CARD'
@staticmethod
@result.from_generator
def create(_type: str) -> result.Result[Transaction.Type, ValidationError]:
try:
return Transaction.Type[_type]
except KeyError:
return ValidationError.create(f"Invalid type of transaction given: {_type}")
id: Transaction.Id
value: Decimal
type: Transaction.Type
@staticmethod
@result.from_generator
def create(_id: str, value: Decimal, _type: str) -> result.Result[Transaction, ValidationError]:
t_id, t_type = yield result.aggregate([
Transaction.Id.create(_id),
Transaction.Type.create(_type)
]).flat_map_error(ValidationError.join_errors(
"Transaction validation errors. "
f"Raw data (id={_id!r}, value={value!r}, type={_type!r})"
))
return Transaction(t_id, value, t_type)
# -
# ### Utworzenie procesu zawierającego logikę aplikacji
# **To jest kod definiowany w obszarze logiki biznesowej.
# Na tym etapie wszystkie funkcje są czyste, nieświadome zależności i gotowe na obsługę błędów powstałych w trakcie wykonania**
# +
@Reader.create
def fetch_transactions(trans_type: str):
db_client = yield lambda env: env["db_client"]
return (
db_client.get_transactions()
| cmap(lambda trans_data: Transaction.create(*trans_data))
| result.aggregate()
| cfilter(lambda t: t.type == Transaction.Type[trans_type])
| list
)
def summarize(transactions: List[Transaction]) -> str:
return pipe(
transactions,
cmap(lambda t: t.value),
sum,
(lambda value: f"REPORT RESULT. TRANSACTION SUM IS: {value}")
)
complete_process = (
fetch_transactions("CASH")
| result.flat_map(summarize)
| result.map_error(lambda e: f"Could not produce report because of error: {e}")
)
# -
# ### Uruchomienie procesu
# **W górnych warstwach aplikacji, gdzie dostępne są zależności wymagane przez proces,
# tworzony jest obiekt środowiska i podawany do procesu, co powoduje jego uruchomienie
# Na koniec badany jest wynik działania procesu przez dopasowanie do obu wariantów**
# +
env = {
"db_client": DbClient([
("#1234", Decimal("11.23"), "CASH"),
# ("1234", Decimal("-12.34"), "PAYPAL"),
("#1234", Decimal("12.44"), "CASH")
])
}
process_result = complete_process.run(env)
process_result.match(
lambda report: print("Process was successful.", report),
lambda error: print("Process failed.", error)
)
| example_complete.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as pl
import cvxpy as cvx
# +
pl.xkcd()
x = np.linspace(0, 4, 500)
pl.figure(0, figsize=(3, 2))
ax = pl.subplot(111)
ax.set_axis_bgcolor('white')
ax.plot(x, (x - 2)**2 - 5, c='r')
ax.set_xlabel("x")
ax.set_ylabel("f(x)")
ax.set_ylim(-5.2, -1.0)
ax.set_xticks(range(5))
ax.set_yticks(range(-5, 0))
# +
pl.xkcd()
x = np.linspace(0, 4, 500)
pl.figure(0, figsize=(3, 2))
ax = pl.subplot(111)
ax.set_axis_bgcolor('white')
ax.plot(x, (x - 2)**4 - 3*(x - 2)**2 + x- 1, c='r')
ax.set_xlabel("x")
ax.set_ylabel("f(x)")
#ax.set_ylim(-5.2, -1.0)
ax.set_xticks(range(5))
ax.set_yticks(range(-5, 9, 2))
# -
| 2016 PyCologne CVXPY/Plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # About `pandas`<br>`pandas` 소개
#
#
# ## 참고문헌<br>References
#
#
# * 맥키니 저, 김영근 역, 파이썬 라이브러리를 활용한 데이터 분석, 2판, 한빛미디어, 2019, ISBN 979-11-6224-190-5 ([코드와 데이터](https://github.com/wesm/pydata-book/)) <br><NAME>, Python for Data Analysis, 2nd Ed., O'Reilly, 2017. ([Code and data](https://github.com/wesm/pydata-book/))
#
#
# * https://pandas.pydata.org/pandas-docs/stable/user_guide/10min.html
#
#
# `pandas` is one of python libraries to handle and analyze data.<br>`pandas`는 데이터 취급과 분석을 위한 파이썬 라이브러리 가운데 하나이다.
#
#
# `pandas` mostly store data in `Series` or `DataFrame`, similar to `dict` of python or a table, respectively.<br>주로 *시리즈* `Series` 또는 *데이터 프레임* `DataFrame`에 데이터를 저장하는데, 각각 파이썬의 `dict` 또는 표와 비슷하다.
#
#
# In general, it is imported as follows.<br>일반적으로 다음과 같이 불러들인다.
#
#
#
# +
# Import pandas for tables
import pandas as pd
# -
# ## Series
# Let's make a series as follows.<br>다음과 같이 `Series`를 하나 만들어 보자.
#
#
# +
x_series_int = pd.Series(range(0, 10+1), name='x')
x_series_int
# -
# If you prefer a `Series` of `float` values, `dtype` argument is available.<br>
# `float` 값의 `Series`를 선호한다면, `dtype` 매개변수로 지정할 수 있다.
#
#
# +
x_series_float = pd.Series(range(0, 10+1), name='x float', dtype=float)
x_series_float
# -
# Or you can also generate one from an existing `Series`.<br>기존의 `Series`로 부터 생성하는 것도 가능하다.
#
#
# +
x_series_float_from_int = x_series_int.astype(float)
x_series_float_from_int
# -
# Two `Series` are identical.<br>두 `Series`는 동일하다.
#
#
# +
x_series_float.equals(x_series_float_from_int)
# -
# Because all corresponding elements of two `Series` are identical.<br>왜냐하면 두 `Series`의 대응되는 원소가 모두 동일하기 때문이다.
#
#
# +
x_series_float.eq(x_series_float_from_int)
# -
# ## DataFrame
#
#
# Let's make a `DataFrame` by combining a few `Series` above.<br>위 여러 `Series`를 조합하여 `DataFrame`을 만들어 보자.
#
#
# +
df = pd.DataFrame({
"float":x_series_float
},
index=x_series_int, columns=['float']
)
df
# -
# We can choose a column as follows.<br>열 선택은 다음과 같이 가능하다.
#
#
# +
df.float
# +
df["float"]
# -
# Adding a new column looks similar to another key-value pair of a `dict`.<br>열을 더 추가하는 것은 `dict`에 새로운 키-밸류 쌍과 비슷하다.
#
#
# +
df["twice"] = df.float.add(df.float)
df["square"] = df["float"] * df.float
df
# -
# Also available is the `shift` method.<br>`shift` 메소드로 열을 하나씩 앞으로 당기거나 뒤로 미는 것도 가능하다.
#
#
# +
df["shift by one"] = df.float.shift(1)
df["shift by minus one"] = df.float.shift(-1)
df
# -
# ## Vectorization<br>벡터화
#
#
# Let's think about calculating sum of a sequence using a `DataFrame`.<br>`DataFrame`으로 수열의 합을 계산하는 경우를 생각해 보자.
#
#
# +
n = [0]
s = [0]
for i in range(1, 10+1):
n.append(i)
s.append(s[-1] + i)
df_sum = pd.DataFrame(
{
'n': n,
"for": s,
}, index=n
)
df_sum
# -
# Also, we may utilize followng formula.<br>또한 아래 식을 사용할 수도 있을 것이다.
#
#
# $$
# \sum_{i=1}^{n}i = \frac{n(n+1)}{2}
# $$
#
#
# +
df_sum["vec"] = df_sum.n * (df_sum.n + 1) // 2
df_sum
# -
# ## Pandas in 10 minutes<br> 10분 Pandas 사용법
#
#
# English subtitle (10m27s)<br>
# 영문 자막 (10m27s)
#
#
# [](https://youtube.com/watch?v=_T8LGqJtuGc)
#
#
# ## Final Bell<br>마지막 종
#
#
# +
# stackoverfow.com/a/24634221
import os
os.system("printf '\a'");
# -
| 00_introduction/50_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# # Temoa's model
#
# ## Hydrogen cost:
# * \$ 13.11/kg [1](https://cafcp.org/content/cost-refill): 2019 price of hydrogen in Californian re-fuelling stations.
# * \$ 6/kg [2](https://www.powermag.com/how-much-will-hydrogen-based-power-cost/): hydrogen cost from electrolysis in 2020 (region not specified).
# * \$ 4/kg [3](https://www.energy.gov/sites/prod/files/2017/11/f46/HPTT%20Roadmap%20FY17%20Final_Nov%202017.pdf): DOE's target.
#
# ## Energy equivalence:
#
# equivalence = alternative_economy [mpge]/ conventional_economy [mpg] * to_gge
#
# alternative_economy [mpge] = range/capacity/to_gge
#
# * conventional_economy [Gasoline]: 30 mpg [1](https://theicct.org/sites/default/files/publications/EV_cost_2020_2030_20190401.pdf)
# * EVs: range = 250 mi, battery pack = 75 kWh, to_gge = 0.031 gallons/kWh [1](https://theicct.org/sites/default/files/publications/EV_cost_2020_2030_20190401.pdf)
# * FCEVs (based on Toyota 2021 Mirai): range = 650 km, tank = 5.6 kg, to_gge = 1.0 gallons/kg [2](https://h2.live/en/wasserstoffautos/toyota-mirai#:~:text=The%20carbon%20fibre%2Dreinforced%20plastic,a%20range%20of%20500%20km)
ev_economy = 250/75/0.031
print(ev_economy, ' mpge')
fcev_economy = 650/1.61/5.6/1.0
print(fcev_economy, ' mpge')
# * The value for EVs is an average. Some vehicles have higher fuel economies. For example: Tesla S Performance: range = 348 miles, pattery pack = 100 kWh. [1](https://www.fueleconomy.gov/feg/pdfs/guides/FEG2021.pdf)
#
# * The calculated fuel economy of the Toyota Mirai is close to the official number.
# However, the official fuel economy of the Toyota 2021 Mirai is lower. It is 67/64 mpge.
# We will adopt 67 mpge for the rest of the calculations. [2](https://www.toyota.com/rde/mlp/mirai/2021) (Accessed on 12/21/20)
# +
ev_equivalence = ev_economy/30 * 0.031
print('1 kWh of electricity makes {0} gasoline gallons'.format(ev_equivalence))
fcev_equivalence = fcev_economy/30 * 1
print('1 kg of H2 makes {0} gasoline gallons'.format(fcev_equivalence))
# -
# To clarify:
# * 1 kWh produces the same amount of energy than 0.031 gallons of gasoline.
# * An EV covers the same number of miles with 1 kWh than 0.11 gallon of gasoline.
# * 1 kg of H2 produces the same amount of energy than 1.0 gallon of gasoline.
# * An FCEV covers the same number of miles with 1 kg than 2.4 gallon of gasoline.
# ## EVs capital investment:
# * 2018 Sedan, BEV-250 (Battery Electric Vehicles w/ 250-mile range): \$ 58,000 / 75 kWh [1](https://theicct.org/sites/default/files/publications/EV_cost_2020_2030_20190401.pdf).
#
# * 2018 SUV, BEV-250: \$ 83,000 / 128 kWh [1](https://theicct.org/sites/default/files/publications/EV_cost_2020_2030_20190401.pdf).
#
# ## FCEVs capital investment:
# * 2017 Toyota Mirai: \$ 57,000 [1](https://www.hydrogen.energy.gov/pdfs/progress17/v_e_5_james_2017.pdf) / 5.6 kg [2](https://h2.live/en/wasserstoffautos/toyota-mirai#:~:text=The%20carbon%20fibre%2Dreinforced%20plastic,a%20range%20of%20500%20km.)
#
# * 2020 Hiunday Nexo (SUV): \$ 58,735 [1](https://www.hyundaiusa.com/us/en/vehicles/nexo) / 6.33 kg [2](https://h2.live/en/wasserstoffautos/toyota-mirai#:~:text=The%20carbon%20fibre%2Dreinforced%20plastic,a%20range%20of%20500%20km.)
#
# * Nikola Motor (pickup truck): \$ 80,000 [3](https://www.forbes.com/sites/alanohnsman/2020/06/29/hydrogen-truckmaker-nikola-opens-badger-pickup-reservationsahead-of-production-plans/?sh=259f195332ba) / 8 kg [4](https://nikolamotor.com/badger). Not sure about the last one, it seems like they will never make a single vehicle.
evcost = []
evcost.append(58000/75)
evcost.append(83000/128)
print(np.average(np.array(evcost))/1e6, 'M$/kWh')
print(np.average(np.array(evcost))/1e6/0.031, 'M$/GGE')
fcevcost = []
fcevcost.append(57000/5.6)
fcevcost.append(83000/6.33)
print(np.average(np.array(fcevcost))/1e6, 'M$/kg')
print(np.average(np.array(fcevcost))/1.0/1e6, 'M$/GGE')
# ## EVs Lifetime
#
# * Car lifetime: 150,000 km [1]
#
# * Tesla: 8 year or 150,000 mi (Model S) [2]
#
# ## FCEVs Lifetime
#
# * 8-year / 100,000-mile FCEV (Toyota Mirai) [3]
#
# * Fuel Cell lifetime: 150,000 - 200,000 miles [4]
#
#
# ### References:
#
# [1] <NAME>. Effects of battery manufacturing on electric vehicle life-cycle greenhouse gas emissions. ICCT Briefing. 2018. [1](https://theicct.org/sites/default/files/publications/EV-life-cycle-GHG_ICCT-Briefing_09022018_vF.pdf)
#
# [2] Tesla support. Accessed on 12/18/20.
# [2](https://www.tesla.com/support/vehicle-warranty#:~:text=The%20Battery%20and%20Drive%20Unit,capacity%20over%20the%20warranty%20period.)
#
# [3] Toy<NAME>. Accessed on 12/18/20. [3](https://staging.toyota.com/mirai/2020/ownership-experience.html#module-portraits_of_pioneers)
#
# [4] California Fuel Cell Partnership (CAFCP). Frequently Asked Questions.
# [4](https://cafcp.org/sites/default/files/FCEV_factbooklet.pdf)
#
# ## Waste
#
# It seems like the most meaningful contribution to the e-waste is from the battery packs.
#
# * lifetime_consumption = total_milage / range * battery_capacity * to_gallons
#
# * specific_waste = mass / lifetime_consumption
#
# ## EVs waste
#
# * battery pack mass: 464 kg (average between 385 and 544kg) [1]
#
# * range: 250 mi [3]
#
# * battery pack capacity: 75 kWh [3]
#
# * energy equivalency: 1kWh ~ 0.031 gallons [3]
#
#
# ## FCEVs waste (Based on Toyota 2021 Mirai)
#
# * battery pack mass: 44.6 kg (1.24 kWh) [2]
#
# * range: 403 mi [4]
#
# * tank capacity: 5.6 kg [4]
#
# * energy equivalency: 1 kg ~ 1 gallon
#
#
# ### References:
#
# [1] Berjoza and Jurgena. INFLUENCE OF BATTERIES WEIGHT ON ELECTRIC AUTOMOBILE PERFORMANCE. 16th International Scientific Conference Engineering for Rural Development. 2017. [1](http://tf.llu.lv/conference/proceedings2017/Papers/N316.pdf)
#
# [2] Toyota. 2021 Mirai Full Specs. Accessed on 12/18/20. [2](toyota.com/mirai/features/mileage_estimates/3002/3003)
#
# [3] [3](https://theicct.org/sites/default/files/publications/EV_cost_2020_2030_20190401.pdf)
#
# [4] [4](https://h2.live/en/wasserstoffautos/toyota-mirai#:~:text=The%20carbon%20fibre%2Dreinforced%20plastic,a%20range%20of%20500%20km.)
#
# ### More:
#
# Contains some info of the material composition of the batteries.
#
# Slowik, <NAME>. How Technology, Recycling, and Policy can Mitigate Supply Risks to the Long-Term Transition to Zero-Emission Vehicles. The International Council on Clean Transportation. December 2020. [1](http://www.zevalliance.org/wp-content/uploads/2020/12/zev-supply-risks-dec2020.pdf)
#
#
mass = 464 # [kg]
lifetime_consumption = 150e3 / 250 * 75 * 0.031
specific_waste = mass/lifetime_consumption
print(specific_waste*1e3, 'kg/kGal')
mass = 44.6 # [kg]
lifetime_consumption = 150e3 / 403 * 5.6 * 1.0
specific_waste = mass/lifetime_consumption
print(specific_waste*1e3, 'kg/kGal')
| fuel-analysis/temoa-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Alternating text and code
#
# Sphinx-Gallery is capable of transforming Python files into rST files
# with a notebook structure. For this to be used you need to respect some syntax
# rules. This example demonstrates how to alternate text and code blocks and some
# edge cases. It was designed to be compared with the
# :download:`source Python script <plot_parse.py>`.
#
# This is the first text block and directly follows the header docstring above.
#
#
import numpy as np
# +
# You can separate code blocks using either a single line of ``#``'s
# (>=20 columns), ``#%%``, or ``# %%``. For consistency, it is recommend that
# you use only one of the above three 'block splitter' options in your project.
A = 1
import matplotlib.pyplot as plt
# -
# Block splitters allow you alternate between code and text blocks **and**
# separate sequential blocks of code (above) and text (below).
#
#
# A line of ``#``'s also works for separating blocks. The above line of ``#``'s
# separates the text block above from this text block. Notice however, that
# separated text blocks only shows as a new lines between text, in the rendered
# output.
#
#
# +
def dummy():
"""This should not be part of a 'text' block'"""
# %%
# This comment inside a code block will remain in the code block
pass
# this line should not be part of a 'text' block
# -
# ####################################################################
#
# The above syntax makes a line cut in Sphinx. Note the space between the first
# ``#`` and the line of ``#``'s.
#
#
# <div class="alert alert-danger"><h4>Warning</h4><p>The next kind of comments are not supported (notice the line of ``#``'s
# and the ``# %%`` start at the margin instead of being indented like
# above) and become too hard to escape so just don't use code like this::
#
# def dummy2():
# """Function docstring"""
# ####################################
# # This comment
# # %%
# # and this comment inside python indentation
# # breaks the block structure and is not
# # supported
# dummy2</p></div>
#
#
#
"""Free strings are not supported. They will be rendered as a code block"""
# New lines can be included in your text block and the parser
# is capable of retaining this important whitespace to work with Sphinx.
# Everything after a block splitter and starting with ``#`` then one space,
# is interpreted by Sphinx-Gallery to be a rST text block. Keep your text
# block together using ``#`` and a space at the beginning of each line.
#
# ## rST header within text block
#
#
print('one')
# another way to separate code blocks shown above
B = 1
# Last text block.
#
# That's all folks !
#
# .. literalinclude:: plot_parse.py
#
#
#
#
| resources/.ipynb_checkpoints/plot_parse-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # ENV / ATM 415: Climate Laboratory, Spring 2016
#
# ## Assignment 3
#
# Out: Tuesday February 23, 2016
#
# Due: Thursday March 3, 2016 at 10:15 am.
# ____________________________________________
#
# ## About this document
#
# This file is a [Jupyter notebook](http://jupyter-notebook.readthedocs.org/en/latest/notebook.html) (also formerly called IPython notebook).
#
# Each cell contains either a block of Python code, or some formatted text.
#
# To open this document, you should launch your Jupyter notebook server by typing
# ```
# jupyter notebook
# ```
# from your command line (or use the `ipython-notebook` button on the Anaconda launcher).
# ## Basic navigation
#
# To select a particular cell for editing, just double-click on it.
#
# There is a pull-down menu at the top of the window, used for setting the content of each cell. A text cell like this one will say `Markdown`.
# +
# This is an example of a Python code cell.
# Note that I can include text as long as I use the # symbol (Python comment)
# Results of my code will display below the input
print 3+5
# -
# Go ahead and edit the Python code cell above to do something different. To evaluate whatever is in the cell, just press `shift-enter`.
#
# Notice that you are free to jump around and evaluate cells in any order you want. The effect is exactly like typing each cell into a Python console in the order that you evaluate them.
# ## Your assignment
#
# Answer all questions below, using the example code as a guide.
#
# **You will submit your work as a single notebook file**. You can use this file as a template, or create a new, empty notebook. Select `Markdown` for cells that contain your written answers to the questions below.
#
# Save your notebook as
# ```
# [your last name]_Assignment03.ipynb
# ```
#
# (so for example, my submission would be `Rose_Assignment03.ipynb`).
#
# The easiest way to do this is just click on the title text at the top of the window. Currently it says `Assignment03`. When you click on it, you get a prompt for a new notebook name.
#
# Try to make sure your notebook **runs from start to finish without error**. Do this:
#
# - Save your work
# - From the `Kernel` menu, select `Restart` (this will wipe out any variables stored in memory).
# - From the `Cell` menu, select `Run All`. This will run each cell in your notebook in order.
# - Did it reach the end without error, and with the results you expected?
# - Yes: Good.
# - No: Find and fix the errors. (remember that the Python interpreter only knows what has already been defined in previous cells. The order of evaluation matters)
# - Save your work and submit your notebook file by email to <<EMAIL>>
#
# _________________________________
# +
# We usually want to begin every notebook by setting up our tools:
# graphics in the notebook, rather than in separate windows
# %matplotlib inline
# Some standard imports
import numpy as np
import matplotlib.pyplot as plt
# -
# We need the custom climlab package for this assignment
import climlab
# ## Question 1
#
# (Primer Section 3.8, Review question 1)
#
# List 10 questions that a strictly zero-dimensional climate model cannot answer. For at least five of the 10 questions, add your explanation of why.
# Your answer here...
# _______________________
#
# ## Question 2
#
# (Primer Section 3.8, Review question 2)
#
# The similarities between the first two EBMs (those of Budyko and Sellers) are fairly obvious -- what are they? Now list at least differences bewteen these very early EBMs.
# Your answer here...
# _________________
#
# ## Question 3
# Using the function `climlab.solar.insolation.daily_insolation()`, calculate the incoming solar radiation (insolation) at three different latitudes:
# - the equator
# - 45ºN
# - the North Pole.
#
# Use present-day orbital parameters.
#
# a) **Make a well-labeled graph** that shows **all three insolation curves on the same plot**. The x
# axis of your graph should be days of the calendar year (beginning January 1), and the y axis should be insolation in W/m2. Include a legend showing which curve corresponds to which latitude.
#
# b) Comment on the very different shapes of these three curves.
# +
# This is a code cell.
# Use the '+' button on the toolbar above to add new cells.
# Use the arrow buttons to reorder cells.
# -
# This is a text cell.
# _____________________________
#
# ## Question 4
#
# **Make the same graph using the orbital parameters of 10,000 years ago** (just after the end of the last ice age). Compare with your graph from Question 1 to answer these questions:
#
# a) Was the insolation at northern high latitudes at summer solstice weaker or stronger 10,000 years ago compared to present conditions?
#
# b) Was the summer season longer or shorter at high northern latitudes? To see this, look at the length of time between polar sunrise and polar sunset.
#
# c) What other differences do you notice?
# _______________________________
#
# ## Question 5
#
# a) Calculate the **annual average insolation** for an array of latitudes ranging from the South Pole to the North Pole.
#
# There is more than one way to do this. You loop through a list of days of the year, add up the insolation, and divide by the number of days. Or you can use a shortcut like `np.mean()`.
#
# Present your results as a well-labeled graph of annual average insolation as a function of latitude. You may use either present-day, or any other orbital parameters, but make sure you explain clearly what you using.
# b) Recall that the equilibrium temperature in our zero-dimensional EBM is
#
# $$ T_{eq} = \left( \frac{(1-\alpha) ~ Q}{\tau~ \sigma} \right)^{\frac{1}{4}} $$
#
#
# **Suppose that this model applies independently at every latitude, where $Q$ is the annual average insolation at that latitude** (i.e. ignore exchanges of energy between adjacent latitude bands). Using the annual mean $Q$ you computed in part (a), make a graph of $T_{eq}$ as a function of latitude. Make sure to state clearly any assumptions you make about the parameter values $\alpha, \tau$.
# c) What are typical annual-average surface temperature values at the South Pole, North Pole, and equator on Earth? Make sure to state your sources for these numbers. How do these compare to the equilibrium temperatures you computed in part (b)? Discuss some possible shortcomings of the simple model you used in part (b).
# _________________________________
#
# ## Bonus question (for fun and extra credit)
#
# Repeat question 4 and 5 for a planet with zero eccentricity and 90º obliquity.
#
# Speculate on what the seasonal cycle of temperature might look like at different locations on this planet.
#
# *90º obliquity means the planet’s rotation axis is parallel with the earth-sun plane, as if the planet were lying on its side. In our solar system, the planet Uranus has an obliquity close to 90º, as do many of the newly discovered extra-solar planets.*
| assignments/Assignment03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: thesis-repo
# language: python
# name: thesis-repo
# ---
# ## Ordering features by importance (rxn-physicochemical properties)- GBC
import numpy as np
import pandas as pd
from src.config import raw_data_path, training_dataset_path, testing_dataset_path
import src.data.notebook_utils as utils
# %cd ..
# +
folder = 'notebooks/results/'
file = 'features.csv'
df = pd.read_csv(folder + file, index_col=1)
data = utils.read_data()
columns_by_type = utils.get_columns(data.columns)
predictors = columns_by_type['_feat_'] | columns_by_type['solUD']
# -
# if feats_names_by_importance is index
df = df.reset_index().rename({'index':'feats_names_by_importance'}, axis='columns')
df.head()
len(predictors)
def row_to_list(x):
l = x[1:-1].split(' ')
return [f.rstrip()[1:-1] for f in l]
# +
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convert_to_float(f):
if isfloat(f): return float(f)
return 0
# -
def feat_values_to_list(x):
l = x[1:-1].split(' ')
return([convert_to_float(f.rstrip()) for f in l])
weight_per_feature = {}
for index, row in df.iterrows():
feat_names = row_to_list(row['feats_names_by_importance'])
feat_values = feat_values_to_list(row['feats_importance_values'])
for feat_name in feat_names:
if feat_name not in weight_per_feature.keys(): weight_per_feature[feat_name] = 0
i = 0
while(float(feat_values[i])>0.000001 and i < min(len(feat_names),len(feat_values)) ):
weight_per_feature[feat_names[i]] += float(feat_values[i])
i+=1
df_weight_per_feature = pd.DataFrame(list(weight_per_feature.items()),columns = ['feature','weight'])
df_weight_per_feature['weight'] = df_weight_per_feature['weight'].apply(lambda x: x/len(df))
df_weight_per_feature.head()
df_weight_per_feature.to_csv(folder + 'fetures_sorted_by_weight.csv', index=None)
| notebooks/0.3-mbto-features_to_weights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dl-env
# language: python
# name: dl-env
# ---
# +
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:85% !important; }</style>"))
# %load_ext autoreload
# -
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
# +
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers,models,utils
import matplotlib.pyplot as plt
# -
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
(x_train, y_train), (x_test, y_test) = keras.datasets.reuters.load_data(num_words=10000)
print('x_train shape : %s' % str(x_train.shape))
print('y_train shape : %s' % str(y_train.shape))
print('x_test shape : %s' % str(x_test.shape))
print('y_test shape : %s' % str(y_test.shape))
x_train = vectorize_sequences(x_train)
x_test = vectorize_sequences(x_test)
print('x_train shape : %s' % str(x_train.shape))
print('x_test shape : %s' % str(x_test.shape))
y_train = utils.to_categorical(y_train)
y_test = utils.to_categorical(y_test)
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
x_val = x_train[:1000]
x_train = x_train[1000:]
y_val = y_train[:1000]
y_train = y_train[1000:]
y_train
history = model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))
plt.figure(figsize=(15,7.5))
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.figure(figsize=(15,7.5))
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
epochs = range(1, len(acc_values) + 1)
plt.plot(epochs, acc_values, 'bo', label='Training acc')
plt.plot(epochs, val_acc_values, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# +
(x_train, y_train), (x_test, y_test) = keras.datasets.reuters.load_data(num_words=10000)
x_train = vectorize_sequences(x_train)
x_test = vectorize_sequences(x_test)
y_train = utils.to_categorical(y_train)
y_test = utils.to_categorical(y_test)
# +
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# -
model.fit(x_train, y_train, epochs=9, batch_size=512)
model.evaluate(x_test, y_test)
# ### Creating bottleneck i.e reducing number of neurons in the intermediate layer
x_val = x_train[:1000]
x_train = x_train[1000:]
y_val = y_train[:1000]
y_train = y_train[1000:]
# +
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(4, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# -
history = model.fit(x_train, y_train, epochs=20, batch_size=128,
validation_data=(x_val, y_val))
# ### Using more hidden units
# +
model = models.Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# -
history = model.fit(x_train, y_train, epochs=10, batch_size=512,
validation_data=(x_val, y_val))
# ### Using Single layer network
# +
model = models.Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# -
history = model.fit(x_train, y_train, epochs=10, batch_size=512,
validation_data=(x_val, y_val))
# +
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# -
history = model.fit(x_train, y_train, epochs=10, batch_size=512,
validation_data=(x_val, y_val))
| notebooks/dl-chollet/scripts/Reuter News Multiclass Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep-Learning-Interview_Part_1
#
# >本文原作者<NAME>,本文来源于知乎专栏。
# 问题集: https://zhuanlan.zhihu.com/p/29936999
# 回答及对应英文页标:https://zhuanlan.zhihu.com/p/29965072
# elviswf 对上述问题找到 中文版对应页码。github 地址:[那些深度学习《面试》你可能需要知道的(中文页标版)](https://github.com/elviswf/DeepLearningBookQA_cn) https://github.com/elviswf/DeepLearningBookQA_cn
#
# ----
#
# *本人以自身学习,加强基础知识为目的,查漏补缺。将对应页面知识,进行复习,学习,总结提炼。---ZJ*
#
# 深度学习中文版 2017 年 9 月 4 日版
#
# #### 1. 列举常见的一些范数及其应用场景,如 L0,L1,L2,L∞,Frobenius 范数
#
# 答:书内页面
# 【英】p39-p40 ;还有p230-p236 有 regularization的应用
# 【中】p34-p35 ;还有 p197-p208 有 regularization 的应用
#
# **2.5 范数**
#
# 有时我们需要衡量一个向量的大小。在机器学习中,我们经常使用被称为 范数(norm)的函数衡量向量大小。形式上,$L^p$ 范数定义如下
#
# $$||x||_p = (\sum_{i}|x_{i}|^p)^{\frac{1}{p}}\tag{2.30} $$
#
# 其中 $p ∈\mathbb{R},p ≥ 1$。
#
# 范数(包括 $L^p$ 范数)是将向量映射到非负值的函数。 直观上来说,向量 $x$ 的范数衡量从原点到点 $x$ 的距离。 更严格地说,范数是满足下列性质的任意函数:
#
# - $f(x) = 0 \Rightarrow x = \mathbf{0}$
# - $f(x + y) \leq f(x) + f(y)$ (三角不等式)
# - $\forall \alpha \in \mathbb{R}$, $f(\alpha x) = \alpha f(x)$
#
# 当$p=2$时,$L^2$范数被称为欧几里得范数。 它表示从原点出发到向量 $x$ 确定的点的欧几里得距离。 $L^2$范数在机器学习中出现地十分频繁,经常简化表示为$||x||$,略去了下标$2$。 平方$L^2$范数也经常用来衡量向量的大小,可以简单地通过点积 $x^Tx$计算。
#
# 平方$L^2$范数在数学和计算上都比$L^2$范数本身更方便。 例如,平方$L^2$范数对$x$中每个元素的导数只取决于对应的元素,而$L^2$范数对每个元素的导数却和整个向量相关。 但是在很多情况下,平方$L^2$范数也可能不受欢迎,因为它在原点附近增长得十分缓慢。 在某些机器学习应用中,区分恰好是零的元素和非零但值很小的元素是很重要的。 在这些情况下,我们转而使用在各个位置斜率相同,同时保持简单的数学形式的函数:$L^1$范数。 $L^1$范数可以简化如下:
#
# $$\lVert{x}_1\rVert = \sum_i |x_i|.\tag{2.31}$$
#
#
# 当机器学习问题中零和非零元素之间的差异非常重要时,通常会使用 $L^1$ 范数。 每当$x$中某个元素从 $0$ 增加 $\epsilon$,对应的$L^1$范数也会增加 $\epsilon$。
#
# 有时候我们会统计向量中非零元素的个数来衡量向量的大小。 有些作者将这种函数称为”$L^0$范数”,但是这个术语在数学意义上是不对的。 向量的非零元素的数目不是范数,因为对向量缩放$\alpha$倍不会改变该向量非零元素的数目。 $L^1$范数经常作为表示非零元素数目的替代函数。
#
# 另外一个经常在机器学习中出现的范数是$L^\infty$范数,也被称为\,最大范数。 这个范数表示向量中具有最大幅值的元素的绝对值:
#
# $$ \lVert{x}_1\rVert _\infty = \max_i |x_i|.\tag{2.32}$$
#
# 有时候我们可能也希望衡量矩阵的大小。 在深度学习中,最常见的做法是使用 Frobenius 范数,
#
# $$\lVert A \rVert_F= \sqrt{\sum_{i,j} A_{i,j}^2}, $$
#
#
# 其类似于向量的$L^2$范数。
#
# 两个向量的点积可以用范数来表示。 具体地,
#
# $$ x^{\mathrm{T}}y =\lVert{x}\rVert_2\lVert{Y}\rVert_2 \cos \theta\tag{2.34}$$
#
# 其中 $\theta$ 表示 $x$ 和 $y$ 之间的夹角。
#
# ---
#
# **第七章 深度学习中的正则化** (chrome 打开 pdf ----225 )
#
#
# #### 2. 简单介绍一下贝叶斯概率与频率派概率,以及在统计中对于真实参数的假设。
#
# 答:p35
#
#
# 尽管我们的确需要一种用以对不确定性进行表示和推理的方法,但是概率论并不能明显地提供我们在人工智能领域需要的所有工具。概率论最初的发展是为了分
# 析事件发生的频率。我们可以很容易地看出概率论,对于像在扑克牌游戏中抽出一手特定的牌这种事件的研究中,是如何使用的。这类事件往往是可以重复的。当我们说一个结果发生的概率为 p,这意味着如果我们反复实验 (例如,抽取一手牌) 无限次,有 p 的比例可能会导致这样的结果。
#
# 这种推理似乎并不立即适用于那些不可重复的命题。如果一个医生诊断了病人,并说该病人患流感的几率为 40%,这意味着非常不同的事情——我们既不能让病人有无穷多的副本,也没有任何理由去相信病人的不同副本在具有不同的潜在条件下表现出相同的症状。在医生诊断病人的例子中,我们用概率来表示一种信任度(degree of belief),其中 1 表示非常肯定病人患有流感,而 0 表示非常肯定病人没有流感。前面那种概率,直接与事件发生的频率相联系,被称为**频率派概率(frequentist probability)**;而后者,涉及到确定性水平,被称为**贝叶斯概率(Bayesian probability)**。
#
# 关于不确定性的常识推理,如果我们已经列出了若干条我们期望它具有的性质,那么满足这些性质的唯一一种方法就是将贝叶斯概率和频率派概率视为等同的。例如,如果我们要在扑克牌游戏中根据玩家手上的牌计算她能够获胜的概率,我们使用和医生情境完全相同的公式,就是我们依据病人的某些症状计算她是否患病的概率。为什么一小组常识性假设蕴含了必须是相同的公理控制两种概率?更多的细节参见 Ramsey (1926)。
#
# 概率可以被看作是用于处理不确定性的逻辑扩展。逻辑提供了一套形式化的规则,可以在给定某些命题是真或假的假设下,判断另外一些命题是真的还是假的。概率论提供了一套形式化的规则,可以在给定一些命题的似然后,计算其他命题为真的似然。
#
# #### 3. 概率密度的万能近似器
#
# 答:p43:3.10 上面那一段
#
# #### 4. 简单介绍一下 sigmoid,relu,softplus,tanh,RBF 及其应用场景
#
# 答:sigmoid 和 softplus 在 p43 页;全部的在 p123-p127
#
# #### 5.Jacobian,Hessian 矩阵及其在深度学习中的重要性
#
# 答:p56-p62
#
# #### 6.KL 散度在信息论中度量的是那个直观量
#
# 答:p46
#
# #### 7. 数值计算中的计算上溢与下溢问题,如 softmax 中的处理方式
#
# 答:p52-p53
#
# #### 8. 与矩阵的特征值相关联的条件数 (病态条件) 指什么,与梯度爆炸与梯度弥散的关系
#
# 答:p53;
#
# #### 9. 在基于梯度的优化问题中,如何判断一个梯度为 0 的零界点为局部极大值/全局极小值还是鞍点,Hessian 矩阵的条件数与梯度下降法的关系
#
# 答:p56-p62
#
# #### 10.KTT 方法与约束优化问题,活跃约束的定义
#
# 答:p60-p61
#
# #### 11. 模型容量,表示容量,有效容量,最优容量概念
#
# 答:p70;p71;p72
#
# #### 12. 正则化中的权重衰减与加入先验知识在某些条件下的等价性
#
# 答:p74 75
#
# #### 13. 高斯分布的广泛应用的缘由
#
# 答:p40
#
# #### 14. 最大似然估计中最小化 KL 散度与最小化分布之间的交叉熵的关系
#
# 答:p83,84,85
#
# #### 15. 在线性回归问题,具有高斯先验权重的 MAP 贝叶斯推断与权重衰减的关系,与正则化的关系
#
# 答: p87
#
# #### 16. 稀疏表示,低维表示,独立表示
#
# 答:p92
#
# #### 17. 列举一些无法基于地图 (梯度?) 的优化来最小化的代价函数及其具有的特点
#
# 答:p97 维度灾难
#
# #### 18. 在深度神经网络中,引入了隐藏层,放弃了训练问题的凸性,其意义何在
#
# 答:p119-122
#
# #### 19. 函数在某个区间的饱和与平滑性对基于梯度的学习的影响
#
# 答:p98
#
# #### 20. 梯度爆炸的一些解决办法 ???
#
# 答:p185 是在这页吗? 后面再细看
#
# #### 21.MLP 的万能近似性质
#
# 答:p123
#
# #### 22. 在前馈网络中,深度与宽度的关系及表示能力的差异 ???
#
# 答:p125
#
# #### 23. 为什么交叉熵损失可以提高具有 sigmoid 和 softmax 输出的模型的性能,而使用均方误差损失则会存在很多问题。分段线性隐藏层代替 sigmoid 的利弊
#
# 答:p140
#
# #### 24. 表示学习的发展的初衷?并介绍其典型例子: 自编码器
#
# 答:p3
#
# 许多人工智能任务都可以通过以下方式解决:先提取一个合适的特征集,然后将这些特征提供给简单的机器学习算法。例如,对于通过声音鉴别说话者的任务来
# 说,一个有用的特征是对其声道大小的估计。这个特征为判断说话者是男性、女性还是儿童提供了有力线索。
#
# 然而,对于许多任务来说,我们很难知道应该提取哪些特征。例如,假设我们想编写一个程序来检测照片中的车。我们知道,汽车有轮子,所以我们可能会想用车轮的存在与否作为特征。不幸的是,我们难以准确地根据像素值来描述车轮看上去像什么。虽然车轮具有简单的几何形状,但它的图像可能会因场景而异,如落在车轮上的阴影、太阳照亮的车轮的金属零件、汽车的挡泥板或者遮挡的车轮一部分的前景物体等等
#
# **解决这个问题的途径之一是使用机器学习来发掘表示本身,而不仅仅把表示映射到输出。这种方法我们称之为表示学习(representation learning)。**学习到的表示往往比手动设计的表示表现得更好。并且它们只需最少的人工干预,就能让AI系统迅速适应新的任务。表示学习算法只需几分钟就可以为简单的任务发现一个很好的特征集,对于复杂任务则需要几小时到几个月。手动为一个复杂的任务设计特征需要耗费大量的人工时间和精力;甚至需要花费整个社群研究人员几十年的时间。
#
# **表示学习算法的典型例子是自编码器(autoencoder)。自编码器由一个编码器(encoder)函数和一个解码器(decoder)函数组合而成。**编码器函数将输入数据转换为一种不同的表示,而解码器函数则将这个新的表示转换到原来的形式。我们期望当输入数据经过编码器和解码器之后尽可能多地保留信息,同时希望新的表示有各种好的特性,这也是自编码器的训练目标。为了实现不同的特性,我们可以设计不同形式的自编码器。
#
#
#
#
# #### 25. 在做正则化过程中,为什么只对权重做正则惩罚,而不对偏置做权重惩罚
#
# 答:p142
#
# #### 26. 在深度学习神经网络中,所有的层中考虑使用相同的权重衰减的利弊
#
# 答:p142
#
# #### 27. 正则化过程中,权重衰减与 Hessian 矩阵中特征值的一些关系,以及与梯度弥散,梯度爆炸的关系
#
# 答:p142-144
#
# #### 28.L1/L2 正则化与高斯先验/对数先验的 MAP 贝叶斯推断的关系
#
# 答:p145,146
#
# #### 29. 什么是欠约束,为什么大多数的正则化可以使欠约束下的欠定问题在迭代过程中收敛
#
# 答:p147 页底 `Chapter 7.3`
#
# #### 30. 为什么考虑在模型训练时对输入 (隐藏单元/权重) 添加方差较小的噪声,与正则化的关系
#
# 答:p149-p150 `Chapter 7.5-7.6`
#
# #### 31. 共享参数的概念及在深度学习中的广泛影响
#
# 答:多任务学习 p151;p156 `Chapter 7.7; 7.9`
#
# #### 32. Dropout 与 Bagging 集成方法的关系,以及 Dropout 带来的意义与其强大的原因
#
# 答:p159-p165 `Chapter 7.12`
#
# #### 33. 批量梯度下降法更新过程中,批量的大小与各种更新的稳定性关系
#
# 答:p170 `Chapter 8.1.3`
#
# #### 34. 如何避免深度学习中的病态,鞍点,梯度爆炸,梯度弥散
#
# 答:p173-p178 `Chapter 8.2.1`
#
# #### 35.SGD 以及学习率的选择方法,带动量的 SGD 对于 Hessian 矩阵病态条件及随机梯度方差的影响
#
# 答:p180;p181-p184 `Chapter 8.3`;
#
# #### 36. 初始化权重过程中,权重大小在各种网络结构中的影响,以及一些初始化的方法;偏置的初始化
#
# 答:初始化权重:p184; `Chapter 8.4`
# 偏置初始化:p186页底 `Chapter 8.4`
#
# #### 37. 自适应学习率算法: AdaGrad,RMSProp,Adam 等算法的做法
#
# 答:AdaGrad:p187;
# RMSProp:p188;
# Adam:p189 `Chapter 8.5.1-3`
#
# #### 38. 二阶近似方法: 牛顿法,共轭梯度,BFGS 等的做法
#
# 答:牛顿法:p190 `Chapter 8.6.1`;
# 共轭梯度: p191-p193; `Chapter 8.6.2`
# BFGS:p193-p194 `Chapter 8.6.3`
#
# #### 39.Hessian 的标准化对于高阶优化算法的意义
#
# 答:p195 `Chapter 8.7.1`
#
# #### 40. 卷积网络中的平移等变性的原因,常见的一些卷积形式
#
# 答:平移等变性:p205页底; `Chapter 9.3`
# 常见的一些卷积形式:p211-p218 `Chapter 9.5`
#
# #### 41.pooling 的做法的意义
#
# 答:p207; p210 `Chapter 9.3-4`
#
# #### 42. 循环神经网络常见的一些依赖循环关系,常见的一些输入输出,以及对应的应用场景
#
# 答:p230-p238 `Chapter 10.2`
#
# #### 43. seq2seq,gru,lstm 等相关的原理
#
# 答:seq2seq:p240-p241; `Chapter 10.4`
# gru:p250; `Chapter 10.10.2`
# lstm:p248 `Chapter 10.10.1`
#
# #### 44. 采样在深度学习中的意义
#
# 答:p286 第一段 `Chapter 12.4.3`
#
# #### 45. 自编码器与线性因子模型,PCA,ICA 等的关系
#
# 答:线性因子模型可以扩展到自编码器和深度概率模型: p304-p305; `Chapter 13.5`
# PCA:p298; `Chapter 13.1`
# ICA:p298 `Chapter 13.2`
#
# #### 46. 自编码器在深度学习中的意义,以及一些常见的变形与应用
#
# 答:意义: p306 `Chapter 14.1`
# 常见变形: p306-p313 `Chapter 14.5`
# 应用: p319 `Chapter 14.9`
#
# #### 47. 受限玻尔兹曼机广泛应用的原因
#
# 答:p400: 想特别了解的人注意这句话: See Mohamed et al. (2012b) for an analysis of reasons for the success of these models. `Chapter 20.2`
#
# #### 48. 稳定分布与马尔可夫链
#
# 答:p362 `Chapter 17.3`
#
# #### 49.Gibbs 采样的原理
#
# 答:p365 `Chapter 17.4`
#
# #### 50. 配分函数通常难以计算的解决方案
#
# 答:p368 `Chapter 17.5.2`
# “遇到难以处理的无向图模型中的配分函数时, 蒙特卡洛方法仍是最主要工具”
#
# #### 51. 几种参数估计的联系与区别: MLE/MAP/贝叶斯
#
# 答:P82/85/87 `Chapter 5.5`
#
# #### 52. 半监督的思想以及在深度学习中的应用
#
# 答:p329-p332 `Chapter 15.3`
#
# #### 53. 举例 CNN 中的 channel 在不同数据源中的含义
#
# 答:p219-220 `Chapter 9.7`
#
# #### 54. 深度学习在 NLP,语音,图像等领域的应用及常用的一些模型
#
# 答:p272-p293 `Chapter 12.1-5`
#
# #### 55.word2vec 与 glove 的比较
#
# 答:How is GloVe different from word2vec?;
#
# GloVe 以及 Word2vec 能称为 deep learning 么?这俩模型的层次其实很浅的;
#
# http://t.cn/RvYslDf
#
# 这个问题没找到答案,我去找了 quora 和知乎上的相关问题以及 quora 一个回答提及的论文。 (若有人在书中找到,请批评指正)
#
# #### 56. 注意力机制在深度学习的某些场景中为何会被大量使用,其几种不同的情形
#
# 答:p288 `Chapter 1192.168.3.11`
#
# #### 57.wide&deep 模型中的 wide 和 deep 介绍
#
# 答:https://arxiv.org/pdf/1606.07792.pdf#### 此问题答案未在书中找到,为此我去找了原论文,论文图 1 有详细的介绍。 (若有人在书中找到,请批评指正)
#
# #### 58. 核回归与 RBF 网络的关系
#
# 答:p89 `Chapter 5.7.2`
#
# #### 59.LSTM 结构推导,为什么比 RNN 好?
#
# 答:p248 `Chapter 10.10`
#
# #### 60. 过拟合在深度学习中的常见的一些解决方案或结构设计
#
# 答:p143-159; `Chapter 7.1-12`
# 包括:Parameter Norm Penalties(参数范数惩罚); Dataset Augmentation (数据集增强); Early Stopping(提前终止); Parameter Tying and Parameter Sharing (参数绑定与参数共享); Bagging and Other Ensemble Methods(Bagging 和其他集成方法);Dropout. 另外还有 Batch Normalization。
#
# #### 61. 怎么理解贝叶斯模型的有效参数数据会根据数据集的规模自动调整
#
# 答:关于非参数模型:p72 ; `Chapter 5.2`
# 非参数模型不依赖于特定的概率模型,它的参数是无穷维的,数据集的规模的大小影响着模型使用更多或者更少的参数来对其进行建模。(并未在书中找到准确的答案,若有更好的回答,请联系我改正)
#
# 本答案是根据问题在_**Deep Learning**_上找到的答案;有些答案只是自己读书后在书上做的笔记的具体页面,毕竟原 po(http://t.cn/RObdPGk) 说还有另外一本书,所以该答案可能不是特别准确也不完善,答案也是给大家做个参考,若发现答案有问题,请联系我并指正,大家共同进步,谢谢!
#
| Deep Learning Interview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression Using Scikit-Learn
# This is the first in a series of notebooks focusing on applying the best practices of the `scikit-learn` API to standard ML tasks.
# The goal of these notebooks is to streamline the `scikit-learn` data science workflow.
# ## Importing the Libraries
# +
import shap
import warnings
import numpy as np
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_boston
from sklearn.feature_selection import RFE
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split, cross_val_score, KFold
warnings.filterwarnings('ignore')
# -
# ## Importing the Dataset
boston = load_boston()
df = pd.DataFrame(data=boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
df.head()
# ## Data Description and Problem Definition
## Boston Dataset Metadata description
print(boston['DESCR'])
# This is the famous `boston` dataset. It contains features about residential properties that have been sold in the past. The goal is to use the features set to predict the target, `MEDV`, the median value of the residential property. This is a linear regression prediction task, where we try finding a line that best fits our training data and is a suitable predictor of the median value of new residential properties on the market, given their features.
# ## Data Pre-Processing
# Before we get to the training of our model, we have to perform a series of task that are important in ensuring that we are building an accurate and valid model. These data pre-processing steps are often a series of sanity checks that we perform to ensure that our data is in the right format and shape before being passed as an input to the Machine Learning model.
# Data Pre-Processing almost always involves the following steps -
# 1. Handling missing values.
# 2. Splitting the data into $X$ (**features**) and $y$ (**target**).
# 3. Feature scaling and standardization.
# 4. Label encoding.
# 5. Splitting the data into **training**, **testing** and **validation** sets.
# 6. Handling Imbalance in the data.
# ### Handling Missing Values
msno.matrix(df);
# Our data does not have any missing values, we do not need to worry about imputing them in.
# ### Splitting our Data into $X$ and $y$
# We separate out our dataset features into a matrix, $X$ and our target into a vector, $y$.
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
X.head()
y.head()
# ### Feature Scaling and Standardization
# All of our features are numeric. Let's take a look at their numerical properties.
df.describe()
# Our features have values in varying orders of magnitude. While some are proportions, others are indices and some more are raw values. Since we are building a distance based linear regressor, we must apply standardization to our features.
#
# **Standardization** involves centering the variables so that the predictors have mean 0. This makes it easier to interpret the intercept term as the expected value of $Y_{i}$ when the predictor values are set to their means. Otherwise, the intercept is interpreted as the expected value of $Y_{i}$ when the predictors are set to 0, which may not be a realistic or interpretable.
#
# Another practical reason for scaling in regression is when one variable has a very large scale, In that case, the regression coefficients may be on a very small order of magnitude which can be a little annoying when you're reading computer output, so you may convert the variable to, for example, population size in millions. The convention that you standardize predictions primarily exists so that the units of the regression coefficients are the same.
#
# **Centering/scaling does not affect the statistical inference in regression models - the estimates are adjusted appropriately and the p-values will be the same.**
# +
## This initializes an instance of the `StandardScalar` class
# sc = StandardScaler()
# -
# ### Label Encoding/One Hot Encoding
# Machine learning algorithms cannot operate directly on labeled data. They require all input and output variables to be numeric. This is mostly a constraint of the efficient implementation of machine learning algorithms, since this allows implementations to store values using less disk space.
#
# All categorical data must be converted to numerical forms. If the categorical variable is an output variable, you may also want to convert predictions by the model back into a categorical form in order to present them or use them in some application.
#
# **LabelEncoder** can turn $[dog,cat,dog,mouse,cat]$ into $[1,2,1,3,2]$, but then the imposed ordinality means that the average of dog and mouse is cat.
#
# For categorical variables where no such ordinal relationship exists, **One-Hot-Encoding** must be used and has the advantage that the result is binary rather than ordinal.Everything sits in an orthogonal vector space. The disadvantage is that for high cardinality, the feature space can really blow up quickly and we start fighting with the curse of dimensionality.
#
# To fight the curse fo dimensionality, one could employ one-hot-encoding followed by PCA for dimensionality reduction. The combination of one-hot plus PCA can seldom be beat by other encoding schemes, since PCA finds the linear overlap and tends to group similar features into the same feature.
# Since our dataset doesn't have any categorical variables, we do not need to perform any label encoding on this dataset.
# ### Train-Test Split
X_train, X_test, y_train, y_test = train_test_split(X.values, y.values, test_size = 0.25, random_state = 0)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
# ## Fitting the Linear Regression Model
# +
## This initializes an instance of the LinearRegression Class
# lin_reg = LinearRegression()
# -
pipeline = Pipeline([
('scale', StandardScaler()),
('lin_reg', LinearRegression())
])
y_pred = pipeline.fit(X_train, y_train).predict(X_test)
# The mean squared error
print('Mean squared error: %.3F' % mean_squared_error(y_test, y_pred))
# The coefficient of determination: 1 is perfect prediction
print('Coefficient of determination: %.3F' % r2_score(y_test, y_pred))
# ### Cross Validation
# In Machine Learning, while developing a ML model, we split our dataset into 2 blocks.
# - The first block of the data on which we develop our model is the **Training** set and comprises between $65$-$80\%$ of the entire dataset.
# - The second block is reserved for the purposes of generating metrics for the evaluation of the developed model. This reserved block is called the **Testing** dataset and comprises the remaining $15$ - $35\%$ of the entire dataset.
#
# This method of developing ML models leads to more robust models as we try to find a balance between **bias** and **variance**. However, **k-Fold Cross Validation** goes a step further in the development of robust models.
#
# Visually, k-Fold cross validation operates as follows:
#
# <img src="k-fold-cv.png" width="540" height="540" align="center"/>
#
# The basic concept involves choosing a different "**fold**" as the testing set, iterating through all $k$ possible folds and running the model with that configuration. This is resampling method used to evaluate machine learning models on a limited data sample, called $k$-fold cross validation. When a specific value for $k$ is chosen, it may be used in place of $k$ in the reference to the model, such as k=10 becoming 10-fold cross-validation.
#
# Cross-validation is primarily used in applied machine learning to estimate the skill of a machine learning model on unseen data, using a limited sample in order to estimate how the model is expected to perform in general when used to make predictions on data not used during the training of the model.
# +
lm = LinearRegression()
# create a KFold object with 10 splits
folds = KFold(n_splits = 10, shuffle = True, random_state = 0)
scores = cross_val_score(lm, X_train, y_train, scoring='r2', cv=folds)
# scores = cross_val_score(lm, X, y, scoring='r2', cv=folds)
[round(i, 3) for i in scores]
# -
# ### Grid Search
# +
# creating a KFold object with 5 splits
folds = KFold(n_splits = 10, shuffle = True, random_state = 0)
# specify range of hyperparameters
hyper_params = [{'n_features_to_select': list(range(3, 14))}]
# specify model
lm = LinearRegression()
lm.fit(X_train, y_train)
rfe = RFE(lm)
# set up GridSearchCV()
model_cv = GridSearchCV(estimator = rfe,
param_grid = hyper_params,
scoring= 'r2',
cv = folds,
verbose = 1,
return_train_score=True)
# fit the model
model_cv.fit(X_train, y_train)
# -
# cv results
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
| src/00scikit-linear-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regularized Logistic Regression
# In this part of the exercise, you will implement regularized logistic regression to predict whether microchips from a fabrication plant passes quality assurance (QA). During QA, each microchip goes through various tests to ensure it is functioning correctly.<br>
# Suppose you are the product manager of the factory and you have the test results for some microchips on two different tests. From these two tests, you would like to determine whether the microchips should be accepted or rejected. To help you make the decision, you have a dataset of test results on past microchips, from which you can build a logistic regression model.<br>
#
# The file <strong><em>ex2data2.csv</em></strong> contains a training set.<br>
# The structure of the dataset described blow:<br>
# 1. First column = <strong>First test score</strong>
# 2. Second column = <strong>Second test score</strong>
# 3. Third column = <strong>Accepted (1=yes, 0=no)</strong>
#
# <br> <br>
# <strong>
# Our assignment has these sections:
# 1. Visualizing the Data
# 1. Loading dataset
# 2. Ploting scatter
# 1. 2D scatter
# 2. 3D scatter
# 2. Feature Mapping
# 3. Implementation
# 1. Cost Function and Gradient
# 2. Cost and Gradient Function Test
# 3. Learning Parameters Using <em>scipy.optimize.minimize</em>
# 4. Plotting Decision Boundry
# 5. Predicting on Test Data
# 6. Accuracy on Training Data
# 4. Main Loop to Compare Lambda
# 1. Train Using <em>scipy.optimize.minimize</em>
# 2. Predict Using Trained Thetas
# 5. Visualization of Differenet Decision Boundary Using Different Lambdas
# </strong>
#
# In each section full description provided.
# ## 1. Plotting Dataset
# Before starting on any task, it is often useful to understand the data by visualizing it. For this dataset, you can use a scatter plot to visualize the data, since it has only two properties to plot. (Many other problems that you will encounter in real life are multi-dimensional and can’t be plotted on a 2D plot.)
# ### 1.A Loading Dataset
# We just need to import our data in file to a good structrue to work on it. So best option for us in python is <strong>Pandas</strong>.
# +
# import library
import pandas as pd
import numpy as np
dataset = pd.read_csv('ex2data2.csv',names = ['Test #1','Test #2','Accepted'])
dataset.head()
# -
# splitting to x and y variables for features and target variable
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
print('x[0] ={}, y[0] ={}'.format(x[0],y[0]))
m, n = x.shape
print('#{} Number of training samples, #{} features per sample'.format(m,n))
# ### 1.B Plotting Scatter
# In this step we plot our data in 2D and 3D scatter.
# #### 1.B.a 3D Scatter
# +
# imporing libraries
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
# %matplotlib inline
# visualize our data
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x[:,0], x[:,1], y)
plt.show()
# -
# #### 1.B.b 2D Scatter
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = x, y
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Acceptance State (Training set)')
plt.xlabel('Test #1 Score')
plt.ylabel('Test #2 Score')
plt.legend()
plt.show()
# Above figure shows that <strong>our dataset cannot be separated into positive and negative examples by a straight-line</strong> through the plot. Therefore, a straightforward application of logistic regression will not perform well on this dataset since logistic regression will only be able to find a linear decision boundary.
# ## 2. Feature Mapping
# One way to fit the data better is to <strong>create more features</strong> from each data point. We will map the features into all <strong>polynomial</strong> terms of x<sub>1</sub> and x<sub>2</sub> up to the <strong>sixth power</strong>. Our new x should be like this: <img src='img/map_feature.jpg'><br>
# As a result of this mapping, our vector of two features (the scores on two QA tests) has been <strong>transformed into a 28-dimensional vector</strong>. A logistic regression classifier trained on this <strong>higher-dimension feature</strong> vector will have a more <strong>complex decision boundary</strong> and will appear <strong>nonlinear</strong> when drawn in our 2-dimensional plot.<br><br>
# While the feature mapping allows us to build a <strong>more expressive classifier</strong>, it also <strong>more susceptible to overfitting</strong>. In the next parts of the exercise, you will implement regularized logistic regression to fit the data and also see for yourself how <strong>regularization can help combat the overfitting problem</strong>.<br> <br>
# In this step we use <a href='http://scikit learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html'>preprocessing module</a> of sklearn library.
#
# +
# import library
from sklearn.preprocessing import PolynomialFeatures
# We also add one column of ones to interpret theta 0 (x with power of 0 = 1) by include_bias as True
pf = PolynomialFeatures(degree = 6, include_bias = True)
x_poly = pf.fit_transform(x)
pd.DataFrame(x_poly).head(5)
# -
# ## 3.A Cost Function and Gradient
# Now you will implement code to compute the cost function and gradient for regularized logistic regression. <br><br>
# Recall that the <strong>regularized cost function</strong> in logistic regression is: <img src='img/j_reg.jpg'><br><br>
# Note that you should <strong>not regularize</strong> the parameter θ<sub>0</sub>. In Python, recall that indexing starts from 0, hence, you should not be regularizing the theta(0) parameter (which corresponds to θ<sub>0</sub>) in the code.<br><br>
# The <strong>gradient of the cost function</strong> is a vector where the <strong>j<sub>th</sub></strong> element is defined as follows: <img src='img/gradient_reg.jpg'><br><br> And:<img src='img/gradient_reg_.jpg'><br>
# +
m,n = x_poly.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_ = 1
# reshape (-1,1) because we just have one feature in y column
y = y.reshape(-1,1)
# -
# <strong>h = hypothesis(x,theta)</strong> will compute <strong>sigmoid</strong> function on <strong>θ<sup>T</sup>X</strong> and return a number which <strong>0<=h<=1</strong>.<br>
# You can use <a href='https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.special.expit.html'>this</a> library for calculating sigmoid.
def sigmoid(z):
return 1/(1+np.exp(-z))
def lr_hypothesis(x,theta):
return np.dot(x,theta)
# <strong>compute_cost(theta, x, y, lambda):</strong> computes the cost of using theta as the parameter.
def compute_cost(theta,x,y,lambda_):
theta = theta.reshape(n,1)
infunc1 = -y*(np.log(sigmoid(lr_hypothesis(x,theta)))) - ((1-y)*(np.log(1 - sigmoid(lr_hypothesis(x,theta)))))
infunc2 = (lambda_*np.sum(theta[1:]**2))/(2*m)
j = np.sum(infunc1)/m+ infunc2
return j
# <strong>compute_gradient(theta, x, y,lambda):</strong> computes the gradient of the cost using theta as the parameter.
# gradient[0] correspond to gradient for theta(0)
# gradient[1:] correspond to gradient for theta(j) j>0
def compute_gradient(theta,x,y,lambda_):
gradient = np.zeros(n).reshape(n,)
theta = theta.reshape(n,1)
infunc1 = sigmoid(lr_hypothesis(x,theta))-y
gradient_in = np.dot(x.transpose(),infunc1)/m
gradient[0] = gradient_in[0,0] # theta(0)
gradient[1:] = gradient_in[1:,0]+(lambda_*theta[1:,]/m).reshape(n-1,) # theta(j) ; j>0
gradient = gradient.flatten()
return gradient
# ## 3.B Cost and Gradient Function Test
# Now with <strong>theta = 0 for n values and lambda = 1</strong>, we should see that the <strong>cost = 0.693</strong> and <strong>gradients for five first should be [0.0085, 0.0188, 0.0001,0.0503,0.0115]</strong>.
cost_temp = compute_cost(theta,x_poly,y,lambda_)
gradient_temp = compute_gradient(theta,x_poly,y,lambda_)
print('if theta = 0 and lambda = 1 =======>\n cost = {}\n ,\n gradient(j); j>0 = \n{}'
.format(cost_temp,gradient_temp[0:5]))
# Now with <strong>theta = 1 for n values and lambda = 10</strong>, we should see that the <strong>cost = 3.16</strong> and <strong>gradients for five first should be [0.3460, 0.1614, 0.1948, 0.2269, 0.0922]</strong>.
theta_temp = np.ones(n)
lambda_temp = 10
cost_temp = compute_cost(theta=theta_temp,x=x_poly,y=y,lambda_=lambda_temp)
gradient_temp = compute_gradient(theta=theta_temp,x=x_poly,y=y,lambda_=lambda_temp)
print('if theta = 1 and lambda = 10 =======>\n cost = {}\n \n gradient(j); j>0 = \n{}'
.format(cost_temp,gradient_temp[0:5]))
# ## 3.C Learning Parameters Using scipy.optimize.minimize
# <strong>Scipy</strong>'s <strong>minimize</strong> is an optimization solver that finds <strong>the minimum of an unconstrained<sup>1</sup> function</strong>. For regularized logistic regression, you want to optimize the cost function J(θ) with parameters θ. Concretely, you are going to use <strong>minimize</strong> to find the best parameters θ for the regularized logistic regression cost function, given a fixed dataset (of x and y values). You will pass to <strong>minimize</strong> the following inputs:<br>
# <li> The initial values of the parameters we are trying to optimize.</li>
# <li> A function that, when given the training set and a particular θ, computes the regularized logistic regression cost with respect to θ for the dataset (x, y) ======> <strong>compute_cost</strong></li>
# <li> A function that, when given the training set and a particular θ, computes the regularized logistic regression gradient with respect to θ for the dataset (x, y) ======> <strong>compute_gradient</strong></li>
# <rb>
# <sup>1</sup> Constraints in optimization often refer to constraints on the parameters. for example, constraints that bound the possible values θ can take (e.g., θ ≤ 1). Logistic regression does not have such constraints since θ is allowed to take any real value.<br> <br>
# For doing this step, we have many complex optimization fucntions. You can visit this <a href = 'https://docs.scipy.org/doc/scipy-0.10.0/reference/tutorial/optimize.html'>page</a> for more information. For optimizing this problem, we use this library <strong><a href='https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html'>scipy.optimize.minimize</a></strong> and <a href= 'https://docs.scipy.org/doc/scipy/reference/optimize.minimize-tnc.html#optimize-minimize-tnc'>TNC</a> solver method.
# Now some explanation about below code that do all loops and updates and other calculations only in one line.<br>
# we need two functions, one for cost and the other for gradient that described above. all should have inputs you assgning for <strong>X0</strong> and <strong>args</strong>.<br>
# All of your functions should <strong>return just one variable</strong> which <strong>for cost, optimizer just minmize its return value but for gradient, because the target variable is theta which you specified by X0, optimizer will update theta to minimize cost</strong>.<br>
# And at last, you define the <strong>mathematical model</strong> to do this <strong>optimization</strong> which in our case, it is <strong> Truncated Newton</strong> method.
# ## Evaluating Model
# For evaluating in our main loop to try out different values of lamba, we need to calculate <strong> probabilities and related predictions</strong> and then <strong>compare predicted value to real one to get accuracy</strong><br>.
# For this job we need some functions that you can see below:<br>
# ### Probability
# For this prediction, we need just put this test data as new x to the sigmoid fucntion. For better usage, <br>
# we implement <strong>get_probability(x,theta)</strong> function.
def get_propability(x,theta):
p = sigmoid(lr_hypothesis(x,theta))
return p
# ### Accuracy on Training Data
# In this step we will implement predictor function. Before this step we can calculate probability of admission of any student with respect ot two exam scores. Now we need to <strong>convert this probabilities to the 0 or 1</strong> value because we only have two class. To do this we need a <strong>threshold to map</strong> porbabilities higher than threshold to 1 and lower to 0. And <strong>mathematically approved the best threshold for logistic regression is 0.5</strong>. So we have this situation:<strong>
# 1. get_probability(x,theta) >= 0.5 then admission = 1
# 2. get_probability(x,theta) <= 0.5 then admission = 0
def predictor(x,theta):
y_pred = np.zeros(m).reshape(m,)
for i in range(0,m):
p = get_propability(x[i],theta)
if (p >= 0.5):
y_pred[i] = 1
else:
y_pred[i] = 0
return y_pred
# Now we will compare our predicted result to the true one with <a href='http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html'>confusion_matrix</a> of numpy library.
# +
from sklearn.metrics import confusion_matrix
# Function for accuracy
def acc(confusion_matrix):
t = confusion_matrix[0][0] + confusion_matrix[1][1]
f = confusion_matrix[0][1] + confusion_matrix[1][0]
ac = t/(t+f)
return (t,f,ac)
# -
# ## 4. Main Loop to Compare Lambda
# In this step, we also try different values of lambda and check the accuracy on training set and select the best one and also analyze the performance of our omptimization based on these values.<br>
# Suggested values to <strong>try for lamba is [0, 1, 10, 100]</strong>.
# +
# hyperparameters
m,n = x_poly.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_array = [0, 1, 10, 100]
# +
import scipy.optimize as opt
for i in range(0,len(lambda_array)):
# Train
print('======================================== Iteration {} ===================================='.format(i))
optimized = opt.minimize(fun = compute_cost, x0 = theta, args = (x_poly, y,lambda_array[i]),
method = 'TNC', jac = compute_gradient)
new_theta = optimized.x
# Prediction
y_pred_train = predictor(x_poly,new_theta)
cm_train = confusion_matrix(y,y_pred_train)
t_train,f_train,acc_train = acc(cm_train)
print('With lambda = {}, {} correct, {} wrong ==========> accuracy = {}%'
.format(lambda_array[i],t_train,f_train,acc_train*100))
# -
# We can see with <strong>big values of Lambda</strong> , <strong>accuracy goes down</strong>!
# ## 5. Visualization of Differenet Decision Boundary Using Different Lambdas
| Week 3 - Logistic Regression/Regularized Logistic Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !jupyter nbconvert --to script lesson3-planet.ipynb
# ## Multi-label prediction with Planet Amazon dataset
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.vision import *
import pathlib
# ## Getting the data
# The planet dataset isn't available on the [fastai dataset page](https://course.fast.ai/datasets) due to copyright restrictions. You can download it from Kaggle however. Let's see how to do this by using the [Kaggle API](https://github.com/Kaggle/kaggle-api) as it's going to be pretty useful to you if you want to join a competition or use other Kaggle datasets later on.
#
# First, install the Kaggle API by uncommenting the following line and executing it, or by executing it in your terminal (depending on your platform you may need to modify this slightly to either add `source activate fastai` or similar, or prefix `pip` with a path. Have a look at how `conda install` is called for your platform in the appropriate *Returning to work* section of https://course.fast.ai/. (Depending on your environment, you may also need to append "--user" to the command.)
# +
# # ! {sys.executable} -m pip install kaggle --upgrade
# -
# Then you need to upload your credentials from Kaggle on your instance. Login to kaggle and click on your profile picture on the top left corner, then 'My account'. Scroll down until you find a button named 'Create New API Token' and click on it. This will trigger the download of a file named 'kaggle.json'.
#
# Upload this file to the directory this notebook is running in, by clicking "Upload" on your main Jupyter page, then uncomment and execute the next two commands (or run them in a terminal). For Windows, uncomment the last two commands.
# +
# # ! mkdir -p ~/.kaggle/
# # ! mv kaggle.json ~/.kaggle/
# For Windows, uncomment these two commands
# # ! mkdir %userprofile%\.kaggle
# # ! move kaggle.json %userprofile%\.kaggle
# -
# You're all set to download the data from [planet competition](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space). You **first need to go to its main page and accept its rules**, and run the two cells below (uncomment the shell commands to download and unzip the data). If you get a `403 forbidden` error it means you haven't accepted the competition rules yet (you have to go to the competition page, click on *Rules* tab, and then scroll to the bottom to find the *accept* button).
path = Config.data_path()/'planet'
path.mkdir(parents=True, exist_ok=True)
path
# +
# # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path}
# # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path}
# # ! unzip -q -n {path}/train_v2.csv.zip -d {path}
# -
# To extract the content of this file, we'll need 7zip, so uncomment the following line if you need to install it (or run `sudo apt install p7zip-full` in your terminal).
# +
# # ! conda install --yes --prefix {sys.prefix} -c haasad eidl7zip
# -
# And now we can unpack the data (uncomment to run - this might take a few minutes to complete).
# +
# ! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path.as_posix()}
# -
# ## Multiclassification
# Contrary to the pets dataset studied in last lesson, here each picture can have multiple labels. If we take a look at the csv file containing the labels (in 'train_v2.csv' here) we see that each 'image_name' is associated to several tags separated by spaces.
base_path = pathlib.Path('/media/avemuri/DEV/Data/amazon_from_space/')
df = pd.read_csv(base_path/'train_v2.csv')
df.head()
# To put this in a `DataBunch` while using the [data block API](https://docs.fast.ai/data_block.html), we then need to using `ImageList` (and not `ImageDataBunch`). This will make sure the model created has the proper loss function to deal with the multiple classes.
tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)
# We use parentheses around the data block pipeline below, so that we can use a multiline statement without needing to add '\\'.
np.random.seed(42)
src = (ImageList.from_csv(base_path, 'train_v2.csv', folder='train-jpg', suffix='.jpg')
.split_by_rand_pct(0.2)
.label_from_df(label_delim=' '))
data = (src.transform(tfms, size=128)
.databunch().normalize(imagenet_stats))
print(data.loss_func)
print(type(data.label_list))
print(data.label_list.lists[1])
# `show_batch` still works, and show us the different labels separated by `;`.
data.show_batch(rows=3, figsize=(12,9))
# To create a `Learner` we use the same function as in lesson 1. Our base architecture is resnet50 again, but the metrics are a little bit differeent: we use `accuracy_thresh` instead of `accuracy`. In lesson 1, we determined the predicition for a given class by picking the final activation that was the biggest, but here, each activation can be 0. or 1. `accuracy_thresh` selects the ones that are above a certain threshold (0.5 by default) and compares them to the ground truth.
#
# As for Fbeta, it's the metric that was used by Kaggle on this competition. See [here](https://en.wikipedia.org/wiki/F1_score) for more details.
arch = models.resnet50
acc_02 = partial(accuracy_thresh, thresh=0.2)
f_score = partial(fbeta, thresh=0.2)
learn = cnn_learner(data, arch, metrics=[acc_02, f_score])
# We use the LR Finder to pick a good learning rate.
learn.lr_find()
learn.recorder.plot()
# Then we can fit the head of our network.
from torchsummary import summary
summary(learn.model, (3, 128, 128))
learn.unfreeze()
summary(learn.model, (3, 128, 128))
learn.freeze()
summary(learn.model, (3, 128, 128))
for x, y in learn.data.train_dl.dl:
print(x.shape, y.shape)
break
print(y[0])
lr = 0.01
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1-rn50')
# ...And fine-tune the whole model:
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, slice(1e-5, lr/5))
learn.save('stage-2-rn50')
# +
data = (src.transform(tfms, size=256)
.databunch().normalize(imagenet_stats))
learn.data = data
data.train_ds[0][0].shape
# -
learn.freeze()
learn.lr_find()
learn.recorder.plot()
lr=1e-2/2
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1-256-rn50')
learn.unfreeze()
learn.fit_one_cycle(5, slice(1e-5, lr/5))
learn.recorder.plot_losses()
learn.save('stage-2-256-rn50')
# You won't really know how you're going until you submit to Kaggle, since the leaderboard isn't using the same subset as we have for training. But as a guide, 50th place (out of 938 teams) on the private leaderboard was a score of `0.930`.
learn.export()
# ## fin
# (This section will be covered in part 2 - please don't ask about it just yet! :) )
# +
# #! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path}
#! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path}
# #! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg-additional.tar.7z -p {path}
#! 7za -bd -y -so x {path}/test-jpg-additional.tar.7z | tar xf - -C {path}
# -
test = ImageList.from_folder(path/'test-jpg').add(ImageList.from_folder(path/'test-jpg-additional'))
len(test)
learn = load_learner(path, test=test)
preds, _ = learn.get_preds(ds_type=DatasetType.Test)
thresh = 0.2
labelled_preds = [' '.join([learn.data.classes[i] for i,p in enumerate(pred) if p > thresh]) for pred in preds]
labelled_preds[:5]
fnames = [f.name[:-4] for f in learn.data.test_ds.items]
df = pd.DataFrame({'image_name':fnames, 'tags':labelled_preds}, columns=['image_name', 'tags'])
df.to_csv(path/'submission.csv', index=False)
# ! kaggle competitions submit planet-understanding-the-amazon-from-space -f {path/'submission.csv'} -m "My submission"
# Private Leaderboard score: 0.9296 (around 80th)
| amazon_from_sky/lesson3-planet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # step 1:write a fn that can print out a board .set up your board as a list,where each index 1-9 corresponds with a number on a number pad,so you get a 3 by 3 board representation
# +
lis = {7 :" ",8:" ",9:" ",4:" ",5:" ",6:" ",1:" ",2:" ",3:" "}
def print_board(): # fun to print the board...
print(" ",lis[7]," |"," ",lis[8]," |"," ",lis[9])
print("------------------")
print(" ",lis[4]," |"," ",lis[5]," |"," ",lis[6])
print("------------------")
print(" ",lis[1]," |"," ",lis[2]," |"," ",lis[3])
print_board()
# -
# # WRTITE A FN THAT CAN TAKE IN PLAYER INPUT AND ASSIGN THEIR MARKER AS X OR O.THINK USING WHILE LOOPS CONTINUALLY ASK UNTILL YOU GET A CORRECT ANSWER.
def marker(z): # marker() function generates marker 'X' or 'O'
global y
y = 0+z
global player
player = int(input("Enter your position")) # we need to provide position which is an integer
if y %2 != 0: # % operationn done to filter and get alternate 'O' and 'X'
mark = 'O'
else:
mark = 'X'
y+=1
return mark
# # WRITE A FN THAT TAKES IN THE BOARD LIST OBJECT,A MARKER X OR O AND A DESIRED POSITION (NUMBER 1-9) AND ASSIGNS IT TO THE BOard.
def assign(lis): # assign() used to assign marker 'X' or 'O' received from fun marker() and assign it
z=0 # to particular positio using numpad
while True:
mark = marker(z)
z+=1
lis.update({player:mark})
if y == 9:
break
print_board(lis)
# # functoin that takes in a board along with marker and checks to see if someone haswon
# +
def winner(lis): # winner() used to guess who is the winner
global p1 ,p2
p1=p2=0
print_board(lis)
if lis[1] == lis[2] == lis[3] or lis[1] == lis[5] == lis[9] or lis[1] == lis[4] ==lis[7]:
if lis[1] =='X':
p1+=1
else:
p2+=1
if lis[9] == lis[8]==lis[7] or lis[9] == lis[6] ==lis[3]:
if lis[9] =='X':
p1+=1
else:
p2+=1
if lis[5] ==lis[3] ==lis[7] or lis[5] == lis[8] ==lis[2] or lis[5] == lis[6] == lis[4]:
if lis[5] =='X':
p1+=1
else:
p2+=1
winner(lis)
if p1 > p2:
print("Player1 has won!!")
elif p1 ==p2:
print("There is a tie!!!")
else:
print("Player2 has won!!")
# -
# # fn that uses the random module to randomly decide which player goes first.you may want to lookup random.randint() return a string of which player went first.
#
# +
import random # we also use random module to assign randomly for players that who will go first!!
ran =['Player 1' , 'Player 2']
k = random.choice(ran)
print(k)
# -
# # here comes the hard part! use while loops and the fn you;hv made to run the game
# +
import random # this is best part where we integrate all stuff
ran =['Player 1' , 'Player 2']
lis = {7 :" ",8:" ",9:" ",4:" ",5:" ",6:" ",1:" ",2:" ",3:" "}
def print_board(lis): # THE print_board used to print the game board..
print("\n"," ",lis[7]," |"," ",lis[8]," |"," ",lis[9]) # 7 8 9
print("--------------------") # 4 5 6
print(" ",lis[4]," |"," ",lis[5]," |"," ",lis[6]) # 1 2 3
print("--------------------") #
print(" ",lis[1]," |"," ",lis[2]," |"," ",lis[3],"\n") # the above represents(num pad) the board!! so i use dict for this
def marker(z): #to generate marker using this function
global y # y and z are taken becz to keep the count on the i/p max we can 3*3 ele
y = 0+z # only.. so when it reaches we need to exit frm taking i/p
global player # player is nothing but the keypad or numpad position asked to plc
if y %2 != 0: # on the board!.% operator used to print either of the marks
mark = 'O' # also an exceptional case where if any player tries to enter their
player = int(input(str(ran[m])+" TURN!!!.... ")) # mark in place where already a mark exists!!! that this while does...
while lis[player] == 'O'or lis[player] == 'X': #till he fills in the ri8 place they will be looped and not allowed
print("THAT IS OUT OF RULES PLAY AGAIN!!\n") # to move forward!!
player = int(input(str(ran[m])+" TURN!!!.... ")) #
else:
mark = 'X'
player = int(input(str(ran[k])+" TURN!!!.... "))
while lis[player] == 'O'or lis[player] == 'X':
print("THAT IS OUT OF RULES PLAY AGAIN!!\n")
player = int(input(str(ran[k])+" TURN!!!.... "))
y+=1
return mark
def assign(lis): #assign() used to fill marker in respective places!!
z=0 #z keeps track of no of markers put and contniues with y in marker()
while True: #loop of continuous i/p from players
h = False #
mark = marker(z) #
z+=1 #
lis.update({player:mark}) #and update the same in dict
if y>=5: # for sure that if there alreay 5 markers filled then there no chance
winner(lis) # of announcing / guessing any winners logically so the conditio >=5
h = check() # every time i get next marker after after receiving 5 earlier..now i
if y == 9 or h == True: # need to check is there any winner for that using check() .....
if p1 == p2: # after everything is filled up there is winner so obviously its tie!!
print("There is a tie!!! ") #
break #since its assign fun we need to print the board every time we assign
print_board(lis) # so the board is printed!!!
def winner(lis): # now to declare at every moment(with atleat 5 updates on board) we need
global p1 ,p2 #decide and check is there any winner and allot points
p1=p2=0 #p1 always holds the valus for the player who plays first!!
print_board(lis) #p2 always holds the value for the player who plays second!!
if lis[1] == lis[2] == lis[3] or lis[1] == lis[5] == lis[9] or lis[1] == lis[4] ==lis[7]:
if lis[1] =='X': #
p1+=1 #there are 8 ways we can check whos winner
else: #considering 3X3 we came up with 8 conditions and they divided into
p2+=1 # 3 main conditions based on this we allot points
if lis[9] == lis[8]==lis[7] or lis[9] == lis[6] ==lis[3]:
if lis[9] =='X': #
p1+=1 #
elif lis[9] == 'O':
p2+=1
if lis[5] ==lis[3] ==lis[7] or lis[5] == lis[8] ==lis[2] or lis[5] == lis[6] == lis[4]:
if lis[5] =='X':
p1+=1
else:
p2+=1
def check(): # check() used to compare p1 and p2 values and decide winner!!
val = False #val variable is used to when we have to decide the winner in the middle
if(p1 or p2): # of the game!
if p1 > p2: #
print(str(ran[k])+" has won!! Congratulations!!!\n \n "+str(ran[m])+" better luck next : )")
elif p1< p2:
print(str(ran[m])+" has won!! Congratulations!!!\n \n "+str(ran[k])+" better luck next : )")
val = True
return val
while True: # this is loop to have infinte plays.....
print("------W_E_L_C_O_M_E---- T_O----T_I_C --T_A_C---T_O_E---GAME!!!! ")
global k ,m #k m variables hold who'l play first and secomd res..
k = random.randint(0,1) # using of random module to decide who goes first!!
if k == 0: #
m = 1
else:
m = 0
print(ran[k],"plays first!!!")
assign(lis)
#winner(lis)
#########
#check()
print("\n1 . Wanna Play Again!!!")
print("2 . Or wanna quit\n")
lis.update({7 :" ",8:" ",9:" ",4:" ",5:" ",6:" ",1:" ",2:" ",3:" "})# once player has finshed the game we need to give him
if int(input()) == 2: #blank board to play agian!.
print("AM FEELING BAD THAT YOU ARE LEAVING SO SOON!! \n ANYWAYS COME SOON DARLING....")
break
| TIC TAC TOE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.6
# language: julia
# name: julia-0.4
# ---
# # CrossfilterCharts.jl
# CrossfilterCharts.jl is a Julia module which harnesses the power of DC.js to automagically generate linked data visualizations.
using RDatasets
iris = dataset("datasets", "iris");
# CrossfilterCharts.jl is extremely easy to use. Here we are pulling a dataset using the RDatasets package in order to supply example datasets contained in `DataFrame`s.
#
# Basic use of CrossfilterCharts only requires importing the package and calling `dc()` on the dataframe to generate a visualization:
using CrossfilterCharts
dc(iris)
# Users have complete access to the DCOut object, and can choose to structure the visualization themselves:
df = dataset("mlmRev", "Exam")
dcout = DCOut(df)
infer_dimensions!(dcout) # infer all dimensions
infer_groups!(dcout) # infer all groups
quick_add!(dcout, :School, piechart)
quick_add!(dcout, :NormExam, barchart)
quick_add!(dcout, :SchGend, rowchart)
quick_add!(dcout, :SchAvg, linechart)
quick_add!(dcout, :VR, piechart)
quick_add!(dcout, :Sex, piechart)
dcout.charts[end].typ[:innerRadius] = "50"
add_bubblechart!(dcout, :School, :NormExam, :SchAvg, :StandLRT)
add_datacountwidget!(dcout)
add_datatablewidget!(dcout)
dcout
# CrossfilterCharts will now not automatically infer charts if there are missing or NaN values.
msleep = dataset("ggplot2","msleep")
dc(msleep)
| docs/CrossfilterCharts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="a182c10f"
# # Autoencoders and POD on flow past cylinder (FPC) dataset
#
# Example notebook that displays the functionality of the built package for FPC and reproduces the results from the report. Each of the sections can be ran
# independently, but this section has to always be ran.
# + executionInfo={"elapsed": 1934, "status": "ok", "timestamp": 1628505119393, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="67a04531"
# All of the necessary external package imports
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import os
# Built package imports
from ddganAE.utils import calc_pod
from ddganAE.models import AAE, AAE_combined_loss, CAE, SVDAE
from ddganAE.architectures.cae.D2 import *
from ddganAE.architectures.svdae import *
from ddganAE.architectures.discriminators import *
from ddganAE.preprocessing import convert_2d
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1628505119394, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="1636fb27"
# Setting seeds for reproduceability, note that exact reproduceability is very
# hard to achieve when using GPU's as we have no control over cuda's internal
# nondeterminism and so the results might still vary slightly from the report's
# results if ran again
seed = 42
tf.random.set_seed(seed)
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED']=str(seed)
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1628505119395, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="eyV4KZ9IC0r2"
# Set the datafile name, change to wherever the datafile is in your drive or local
# machine
filename = "/content/drive/MyDrive/Colab Notebooks/data/processed/snaphsots_field_Velocity_new_4_2000steps.npy"
# + [markdown] id="fd7622d3"
# ## Proper Orthogonal Decomposition (POD)
#
# First we try POD to benchmark the other models against
# + executionInfo={"elapsed": 550, "status": "ok", "timestamp": 1628505119938, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="3496c580"
snapshots_grids = np.load(filename)
# + executionInfo={"elapsed": 282, "status": "ok", "timestamp": 1628505120216, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="d0f74ee6"
# Some data reshaping
input_shape = (55, 42, 2)
snapshots = convert_2d(snapshots_grids, input_shape, 2000)
snapshots = np.array(snapshots).reshape(8000, *input_shape)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 118720, "status": "ok", "timestamp": 1628505238934, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="54c62d1f" outputId="7821a2be-e885-4e19-d7c4-11841817702b"
# Normalize data and calculate POD
# Could also try subtracting the mean but this does not give better results
layer = preprocessing.Normalization(axis=None)
layer.adapt(snapshots_grids)
snapshots_grids = snapshots_grids.swapaxes(0, 2)
x_train, x_val = train_test_split(snapshots_grids, test_size=0.1, random_state=seed)
x_train = layer(x_train).numpy().swapaxes(0, 2)
x_val = layer(x_val).numpy().swapaxes(0, 2)
# Calculate an R matrix
coeffs, R, s = calc_pod(x_train, nPOD=10)
# + executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1628505238938, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="FlS1cMkUV1_z"
# Use the R matrix calculated for the train dataset to generate coefficients for the val dataset
coeffs, R, s = calc_pod(x_val, nPOD=10, R=R)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1249, "status": "ok", "timestamp": 1628505240180, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="b4f75b14" outputId="bc30b58e-3f6e-435b-e7c2-f03f400499eb"
# Calculate MSE
mean = 0
for j in range(4):
recon = R @ coeffs[j]
for i in range(200):
mean += tf.keras.losses.MSE(recon[:, i], x_val[j, :, i]).numpy()/800
print("POD MSE loss of the normalized dataset's reconstruction: ", mean)
# + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1628505240181, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="78663ca8"
# Create reconstructed grids
reconstructed = np.zeros((4, 4620, 200))
for i in range(4):
reconstructed[i, :, :] = R @ coeffs[i]
# Undo normalization
reconstructed = (reconstructed * np.sqrt(layer.variance.numpy()) + layer.mean.numpy())
# + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1628505240182, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="24a31773"
# Reshape to fit in interpolation (legacy) fortran code
reconstructed = convert_2d(reconstructed, (55, 42, 2), 200)
reconstructed = np.array(reconstructed).swapaxes(1, 4)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"elapsed": 402, "status": "ok", "timestamp": 1628505240573, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="ca51233c" outputId="1510735b-574d-4b49-e87b-bb3f4e502cdb"
# Let's plot a reconstruction to see if it visually corresponds to what we expect
fig, ax = plt.subplots(1,2)
ax[0].contourf(reconstructed[0, 0, :, :, 100])
ax[1].contourf(snapshots[100, :, :, 0])
# + [markdown] id="M-lnpgexsGGq"
# Execute the cell below to save results
# + id="BVSsIOtesIFT"
# Uncomment line below to save results
np.save("reconstruction_pod_10coeffs.npy", reconstructed)
# + [markdown] id="fad3dc64"
# ## Convolutional Autoencoder
# + id="2b81d4b3"
# This cell does preprocessing, it is the same for the CAE and the AAE
# Let's load in the data, split and reshape for the autoencoders
snapshots_grids = np.load(filename)
# Some data reshaping
input_shape = (55, 42, 2)
snapshots = convert_2d(snapshots_grids, input_shape, 2000)
snapshots = np.array(snapshots).reshape(8000, *input_shape)
# Normalize and split dataset
layer = preprocessing.Normalization()
layer.adapt(snapshots)
x_train, x_val = train_test_split(snapshots, test_size=0.1, random_state=seed)
x_train = layer(x_train)
x_val = layer(x_val)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1628425614251, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="75fce7d3" outputId="9ef8a89b-1a67-415e-ab54-27d8f1fbfa32"
# The hyperparameters set in this cell and the next correspond to the optimal hyperparameters from hyperparameter
# optimization
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=seed)
optimizer = tf.keras.optimizers.Adam(lr=0.00005, beta_1=0.98, beta_2=0.9)
# Use this line to create a new model, select any from the list of models provided in the documentation or make
# your own. Separating the architecture from the model itself gives the user the
# freedom to try out any architectures without having to define an entirely new
# model.
encoder, decoder = build_denser_omata_encoder_decoder(input_shape, 10, initializer, info=True, act='elu', dense_act='relu')
# Use these lines instead of the above to load a previously trained model
# encoder = tf.keras.models.load_model("saved_model_cae/encoder")
# decoder = tf.keras.models.load_model("saved_model_cae/decoder")
cae = CAE(encoder, decoder, optimizer, seed=seed)
cae.compile(input_shape, pi_loss=False)
# + id="5b7df253"
# Tensorboard logs the results, run `tensorboard --logdir logs` in this directory in a terminal with acces to
# Tensorflow. Note that we can extract the MSE for the final report loss directly from tensorboard as the model
# evaluates the validation dataset at every epoch
cae.train(x_train, 200, val_data=x_val, batch_size=128)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 17, "status": "ok", "timestamp": 1628426072043, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="AUX8zd8L_5Mx" outputId="af35e03e-1913-4480-9dc2-3c99c02480a0"
print("CAE MSE loss of the normalized dataset's reconstruction: ", cae.autoencoder.evaluate(x_val, x_val)[0])
# + id="2190a865"
# Compress all the samples
snapshots = layer(snapshots)
res = cae.predict(snapshots)
# + id="ffc3f34c"
# Undo normalization
res = (res * np.sqrt(layer.variance.numpy()) + layer.mean.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1628426073603, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="0c6a4799" outputId="8a968c57-b715-4db9-c7b7-598c8fa09dd4"
# Let's plot a reconstruction to see if it visually corresponds to what we expect
fig, ax = plt.subplots(1,2)
ax[0].contourf(res[92,:,:,1])
ax[1].contourf(snapshots[92, :, :, 1])
# + [markdown] id="soOOWctprlQJ"
# Execute the two cells below to save the results and the model
# + id="ff718a9e"
# Reshape to how reconstruction legacy code wants it
reconstruction = res.reshape((4, 2000, 55, 42, 2)).swapaxes(1, 4)
np.save("cae_reconstruction.npy", reconstruction)
# + id="nIxu3hCaDPRy"
# uncomment lines below to save the model
# !mkdir -p saved_model
cae.encoder.save('saved_model/encoder')
cae.decoder.save('saved_model/decoder')
# + [markdown] id="6d5b29c6"
# ## Adversarial Autoencoder
# + id="59034082"
# This cell does preprocessing, it is the same for the CAE and the AAE
# Let's load in the data, split and reshape for the autoencoders
snapshots_grids = np.load(filename)
# Some data reshaping
input_shape = (55, 42, 2)
snapshots = convert_2d(snapshots_grids, input_shape, 2000)
snapshots = np.array(snapshots).reshape(8000, *input_shape)
# Normalize and split dataset
layer = preprocessing.Normalization()
layer.adapt(snapshots)
x_train, x_val = train_test_split(snapshots, test_size=0.1, random_state=seed)
x_train = layer(x_train)
x_val = layer(x_val)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1628426074970, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="64aa7a0a" outputId="d4ccc961-6490-4121-f558-7f47fee6a819"
# The hyperparameters set in this cell and the next correspond to the optimal hyperparameters from hyperparameter
# optimization
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
optimizer = tf.keras.optimizers.Adam(lr=0.0005, beta_1=0.8, beta_2=0.9)
# Use this line to create a new model, select any from the list of models provided in the documentation or make
# your own. Separating the architecture from the model itself gives the user the
# freedom to try out any architectures without having to define an entirely new
# model.
encoder, decoder = build_densest_omata_encoder_decoder(input_shape, 10, initializer, act='elu', dense_act='relu', info=True)
discriminator = build_custom_discriminator(10, initializer, info=True)
# Use these lines to load a previously trained model
# encoder = tf.keras.models.load_model("saved_model_aae/encoder")
# decoder = tf.keras.models.load_model("saved_model_aae/decoder")
# discriminator = tf.keras.models.load_model("saved_model_aae/discriminator")
# + [markdown] id="f5092aab"
# ### Separate losses
#
# Adversarial autoencoder with a separate loss for the discriminator, autoencoder, and generator. Each of these three are trained separately with this method.
#
# Execute either the following two cells or the cells under "combined losses".
# + id="df5171ec"
aae = AAE(encoder, decoder, discriminator, optimizer, seed=seed)
aae.compile(input_shape)
# + id="9309f6f3"
aae.train(x_train, 200, val_data=x_val)
# + [markdown] id="9ec005ee"
# ### Combined losses
#
# Aversarial autoencoder with a combined (and weighted) loss function for the discriminator and autoencoder, the generator is still trained independently. This model tends to perform significantly better.
# + id="265eed6c"
aae = AAE_combined_loss(encoder, decoder, discriminator, optimizer, seed=seed)
aae.compile(input_shape)
# + id="5d35cf08"
aae.train(x_train, 200, val_data=x_val, batch_size=64)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1131, "status": "ok", "timestamp": 1628427669880, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="31173699" outputId="fb898328-6e94-4510-8787-cbc2c2bac706"
# Calculate a loss value on the validation set
res_val = aae.adversarial_autoencoder.predict(x_val)[0]
print("AAE MSE loss of the normalized dataset's reconstruction: ", np.mean(tf.keras.losses.MSE(res_val, x_val)))
# + id="b07addbc"
snapshots = layer(snapshots)
res = aae.adversarial_autoencoder.predict(snapshots)[0]
# + id="1d0be908"
# Undo normalization
res = (res * np.sqrt(layer.variance.numpy()) + layer.mean.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"elapsed": 391, "status": "ok", "timestamp": 1628427671298, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="95aa8b5a" outputId="29017a41-feb1-4323-a379-164669df61ad"
fig, ax = plt.subplots(1,2)
ax[0].contourf(res[92,:,:,1])
ax[1].contourf(snapshots[92, :, :, 1])
# + [markdown] id="HJbPU82BucLO"
# Execute the two cells below to save the results and the model
# + id="e62d3409"
# Reshape to fit in interpolation (legacy) fortran code
reconstruction = res.reshape((4, 2000, 55, 42, 2)).swapaxes(1, 4)
# Uncomment line below to save results
np.save("aae_reconstruction.npy", reconstruction)
# + id="Z5qBvla2DU8l"
# uncomment lines below to save the model
# !mkdir -p saved_model
aae.encoder.save('saved_model/encoder')
aae.decoder.save('saved_model/decoder')
aae.discriminator.save('saved_model/discriminator')
# + [markdown] id="e29bd97c"
# ## SVD Autoencoder
# + id="a0bcb869"
# Data preprocessing
# Load grids
snapshots_grids = np.load(filename)
# Data normalization
layer = preprocessing.Normalization(axis=None)
layer.adapt(snapshots_grids)
snapshots_grids = snapshots_grids.swapaxes(0, 2)
x_train, x_val = train_test_split(snapshots_grids, test_size=0.1, random_state=seed)
x_train = layer(x_train).numpy().swapaxes(0, 2)
x_val = layer(x_val).numpy().swapaxes(0, 2)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 15, "status": "ok", "timestamp": 1628427672538, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="9786213d" outputId="a63f27a3-10cc-487c-8bb7-7c708344e1b1"
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
optimizer = tf.keras.optimizers.Nadam(lr=0.0005, beta_1=0.98, beta_2=0.99999)
# Use this line to create a new architecture, select any from the list of
# architectures provided in the documentation or make
# your own. Separating the architecture from the model itself gives the user the
# freedom to try out any architectures without having to define an entirely new
# model.
encoder, decoder = build_vinicius_encoder_decoder(30, 10, initializer, act='elu', dense_act='relu', info=False, reg=0, dropout=0.55, batchnorm=False)
# encoder = tf.keras.models.load_model("saved_model_svdae/encoder")
# decoder = tf.keras.models.load_model("saved_model_svdae/decoder")
# + id="403afee4"
svdae = SVDAE(encoder, decoder, optimizer, seed=seed)
svdae.compile(30, weight_loss=False)
# Only set this when loading in the model
# svdae.R = np.load("R_svdae.npy")
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 347089, "status": "ok", "timestamp": 1628428019621, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="04743d34" outputId="4fdc94cf-7c27-467a-cf98-98fbff15c9c1"
svdae.train(x_train, 200, val_data=x_val, batch_size=64)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35984, "status": "ok", "timestamp": 1628428055595, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="eb1085d4" outputId="d16a0993-fee6-4424-ea67-b7e43756190d"
# Generate a loss value and create an array of output grids
output = np.zeros((4, 2, 55, 42, 200))
loss = 0
for i in range(x_val.shape[0]):
for j in range(x_val.shape[2]):
original = x_val[i, :, j]
result = svdae.predict_single(original)
loss += tf.keras.losses.MSE(original, result)
result = (result * np.sqrt(layer.variance.numpy()) + layer.mean.numpy())
result = np.expand_dims(result,(0,2))
input_shape = (55, 42, 2)
result = convert_2d(result, input_shape, 1)
output[i, :, :, :, j] = np.moveaxis(np.array(result).reshape(55, 42, 2), 2, 0)
print("SVD-AE MSE loss of the normalized dataset's reconstruction: ", loss.numpy()/800)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"elapsed": 39, "status": "ok", "timestamp": 1628428055598, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="a7b16cf8" outputId="8526a92f-323a-4c18-9536-b33594d0dea1"
original = x_val[2, :, 30]
result = svdae.predict_single(original)
result = np.expand_dims(result,(0,2))
input_shape = (55, 42, 2)
result = convert_2d(result, input_shape, 1)
original = np.expand_dims(original,(0,2))
original = convert_2d(original, input_shape, 1)
fig, ax = plt.subplots(1,2)
ax[0].contourf(result[0][0, :, :, 0])
ax[1].contourf(original[0][0, :, :, 0])
# + [markdown] id="QvzFPHIRtsLd"
# Execute the two cells below to save the results and the model
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 40381, "status": "ok", "timestamp": 1628429573274, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02102953505130831508"}, "user_tz": -60} id="qjVyZSIW6riA" outputId="4e805701-249c-418c-e350-f097306edb5d"
# Generate a loss value and create an array of output grids
output = np.zeros((4, 2, 55, 42, 200))
loss = 0
for i in range(x_val.shape[0]):
for j in range(x_val.shape[2]):
original = x_val[i, :, j]
result = svdae.predict_single(original)
loss += tf.keras.losses.MSE(original, result)
result = (result * np.sqrt(layer.variance.numpy()) + layer.mean.numpy())
result = np.expand_dims(result,(0,2))
input_shape = (55, 42, 2)
result = convert_2d(result, input_shape, 1)
output[i, :, :, :, j] = np.moveaxis(np.array(result).reshape(55, 42, 2), 2, 0)
print("SVD-AE MSE loss of the normalized dataset's reconstruction: ", loss.numpy()/800)
# + id="0e6c6e19"
# Uncomment line below to save model
np.save("svdae_reconstruction.npy", output)
# + id="cfd77d87"
# Uncomment lines below to save model
# !mkdir -p saved_model
svdae.encoder.save('saved_model_svdae/encoder')
svdae.decoder.save('saved_model_svdae/decoder')
np.save('saved_model_svdae/R.npy', svdae.R) # Also need to save basis functions in this case
| examples/basic_usage_fpc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/waifuai/waifu_gpt2/blob/master/waifu_distilgpt2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="lUpY6ltkP_uk" colab_type="text"
# # Waifu DistilGPT2
# [GitHub repo: waifuai/waifu_gpt2](https://github.com/waifuai/waifu_gpt2)
#
# This is the DistilGPT2 implementation. It is optimized for speed. It produces response sentence. The code here is meant as a demonstration of waifu conversation. Its extremely high inference speed on cheap hardware (~1-2 seconds on CPU, as opposed to ~1-2 minutes on GPU which is the inference time of the previous waifu_gpt2 notebook.) means that it is now cheap to make real-time deep waifu conversations.
# + id="vs9uyn_hOzRa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="5f30d268-5953-4842-db6c-b673a54bac9f"
# !pip install transformers==2.5.1
# + id="FqUN2NemO5MI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 80} outputId="ca8e7966-9947-467e-8381-254018a357e3"
import time
import torch
from transformers import *
# + id="9uMSgr_MPBDo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 164, "referenced_widgets": ["a74d6866c18c4fbaa614b9c7922386cc", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "78626ffec81f4ceca9250591dbb429c4", "6a8c3f6e7d4949aa9d31638170880a7b", "<KEY>", "93458a883dbe445dba2f2d98c1aa4a97", "<KEY>", "<KEY>", "37e0ce0155774824b5c72dfe68ef8b8e", "3fc1b4dea51d498cb34ea9fa859a05da", "<KEY>", "3fec5f1e87774ff7892adf2d3b615d84", "<KEY>", "b7ee6b05309e41349a16607182809ff4", "<KEY>", "69329657ba5247ac9f1229a4a9a768a0", "3450e737c6124d6ba2af1613dfa775cc", "<KEY>", "ad5d583e039540c1bd99b7a718bce4f4", "<KEY>", "e37c30f4d1664c8f8a873e72145eb0ed"]} outputId="5c2e8d27-6fef-40c5-e23c-30abdbe3fc28"
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
# + id="7tFF_ePBPC7B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["7a3148515bb241a9885d7eb38b4ede16", "5847e16850944441b8bf3a270cca861a", "3fcb6de3464f4d1694dbc9a5beeded94", "4cec819b8b084458b466c8a193468c44", "ab5205aff71b4136ac19340f20a798dd", "7677609939c442858888053f9725e4af", "4cf362acfa5e40a98996df326e5c6de4", "89e00c41c0b14968aa62f6a77eca47a2"]} outputId="1be86e34-4e62-4531-8d5f-b695c0e5d5c3"
model = AutoModelWithLMHead.from_pretrained("distilgpt2")
# + id="gvfwJ11DPD2c" colab_type="code" colab={}
def predict_next_word(text):
indexed_tokens = tokenizer.encode(text)
tokens_tensor = torch.tensor([indexed_tokens])
with torch.no_grad():
outputs = model(tokens_tensor)
predictions = outputs[0]
# get the predicted next sub-word (in our case, the word 'man')
predicted_index = torch.argmax(predictions[0, -1, :]).item()
predicted_text = tokenizer.decode([predicted_index])
return predicted_text
def predict_ten_words(text):
tmp_text = text
for i in range(30):
word = predict_next_word(tmp_text)
tmp_text += word
if "." in tmp_text[len(text):]:
break
if '"' in tmp_text[len(text):]:
break
tmp_text = tmp_text[len(text):]
return tmp_text
def clean_result(result):
first_found = result.find('"')
clean_result = result[:first_found]
second_found = result.find('.')
clean_result = result[:second_found]
return clean_result
def predict_response(sentence):
input_sentence = '\
You said: "Senko I missed you so much!" \
I said: "I missed you too Subaru!" \
You said: "Our date was so fun too" \
I said: "Lets get married and be happy!" \
You said: "Yes that would be awesome!" \
I said: "I like hanging out with you because its so much fun!" \
You said: "' + sentence + '" \
I said: "\
'
predicted_response = predict_ten_words(input_sentence)
predicted_response = clean_result(predicted_response)
return predicted_response
# + id="TlP-fQLQPPAn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="86a370eb-d984-451d-80c9-6fe3fd9aecae"
a = time.time()
response = predict_response('Thank you!')
print(response)
b = time.time()
print(str(b-a), 'seconds elapsed.')
# + id="W4d5hzazP3Rf" colab_type="code" colab={}
| waifu_gpt2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Nearest Neighbor Methods
# + [markdown] slideshow={"slide_type": "skip"} toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Nearest-Neighbor-Methods" data-toc-modified-id="Nearest-Neighbor-Methods-1"><span class="toc-item-num">1 </span>Nearest Neighbor Methods</a></span><ul class="toc-item"><li><span><a href="#The-Houses-dataset" data-toc-modified-id="The-Houses-dataset-1.1"><span class="toc-item-num">1.1 </span>The Houses dataset</a></span><ul class="toc-item"><li><span><a href="#t-SNE" data-toc-modified-id="t-SNE-1.1.1"><span class="toc-item-num">1.1.1 </span>t-SNE</a></span></li></ul></li><li><span><a href="#The-OIL-dataset" data-toc-modified-id="The-OIL-dataset-1.2"><span class="toc-item-num">1.2 </span>The OIL dataset</a></span><ul class="toc-item"><li><span><a href="#K-NN" data-toc-modified-id="K-NN-1.2.1"><span class="toc-item-num">1.2.1 </span>K-NN</a></span></li><li><span><a href="#Distance-metrics" data-toc-modified-id="Distance-metrics-1.2.2"><span class="toc-item-num">1.2.2 </span>Distance metrics</a></span></li></ul></li></ul></li></ul></div>
# + code_folding=[] slideshow={"slide_type": "skip"}
# Imports
import warnings
warnings.simplefilter(action='ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.neighbors import KNeighborsClassifier, DistanceMetric
from sklearn.manifold import TSNE
from sklearn.metrics import classification_report, f1_score
from sklearn.model_selection import GridSearchCV, cross_val_score
from dataset import Dataset
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Houses dataset
#
# We'll start re-using the houses dataset. If you remeber the main difficulty here is that the target variable is a SalePrice, so let's see if we can use a KNN to predict a binned version of it, using only numerical variables.
# + code_folding=[] slideshow={"slide_type": "slide"}
# Setup the dataset
houses = Dataset('./data/houseprices_prepared.csv.gz')
houses.set_target('SalePrice');
# remove de Id, scale numeric features and remove skenwess
houses.drop_columns('Id').scale().ensure_normality()
# Known issues with NA's
houses.replace_na(column='Electrical', value='Unknown')
houses.replace_na(column=houses.names('categorical_na'), value='None');
# Some FE basic stuff to reduce some features by aggregating them
houses.aggregate(['1stFlrSF','2ndFlrSF','BsmtFinSF1','BsmtFinSF2'], 'HouseSF')
houses.aggregate(['OpenPorchSF','3SsnPorch','EnclosedPorch','ScreenPorch',
'WoodDeckSF'], 'HousePorch')
houses.aggregate(['FullBath', 'BsmtFullBath', 'HalfBath', 'BsmtHalfBath'],
'HouseBaths');
# + [markdown] slideshow={"slide_type": "slide"}
# My _k-NN_ problem will address the modeling using ONLY the numeric features, so I create a new Dataset called `subset` that will only contain numericals.
# + slideshow={"slide_type": "fragment"}
subset_df = pd.concat([houses.select('numerical'), houses.target], axis=1)
subset = Dataset.from_dataframe(subset_df).set_target('SalePrice')
subset.describe()
# + [markdown] slideshow={"slide_type": "slide"}
# I need to make my problem suitable for a classification algorithm like _k-NN_.
#
# So, given that my target variable is a continuous number, I must **discretize** it, so I decide to bin it, using the convenient method `KBinsDiscretizer` is Scikit Learn. I start by binning in 5 different buckets, but I can change that later.
#
# This means that my problem is now a classification problem where the `SalePrice` is now a value between 0 and 4.
# + slideshow={"slide_type": "fragment"}
saleprice_enc = KBinsDiscretizer(n_bins=5, encode='ordinal', strategy='kmeans')
saleprice_enc.fit(subset.target.values.reshape(-1, 1))
y_enc = saleprice_enc.transform(subset.target.values.reshape(-1, 1))
subset.target = pd.Series(y_enc.ravel(), name='SalePrice')
# + [markdown] slideshow={"slide_type": "slide"}
# I think I can go for KNN now.
# + slideshow={"slide_type": "fragment"}
X, y = subset.split()
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X.train.values, y.train.values)
#Predict the response for test dataset
y_pred = knn.predict(X.test.values)
# Model Accuracy, how often is the classifier correct?
print("F1 (micro):", f1_score(y.test, y_pred, average='micro'))
# + [markdown] slideshow={"slide_type": "slide"}
# Let's find the best value for K, using cross validation
# -
tuned_parameters = [{'n_neighbors': [3,5,7,9,11,13,15,17,19]}]
clf = GridSearchCV(KNeighborsClassifier(), tuned_parameters,
cv=5, scoring='f1_micro')
clf.fit(X.train, y.train)
print("Best parameters set found on development set:")
print(' ->', clf.best_params_)
print("Grid scores on development set:")
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print(" -> %0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
y_true, y_pred = y.test, clf.predict(X.test)
print("\n",classification_report(y_true, y_pred))
# + [markdown] slideshow={"slide_type": "slide"}
# ### t-SNE
#
# Plot the results using t-SNE. To know more about, refer to this [link](https://distill.pub/2016/misread-tsne/)
# + hide_input=false slideshow={"slide_type": "slide"}
colors = ['red','green','blue','purple','orange']
perplexity=25
X_embedded = TSNE(n_components=2, perplexity=perplexity).fit_transform(X.test)
plt.figure(figsize=(10,6))
plt.scatter(X_embedded[:, :1], X_embedded[:, 1:2],
c=y_pred.reshape(-1,1), cmap=ListedColormap(colors))
plt.title("Perplexity={}".format(perplexity)); plt.show();
# + [markdown] slideshow={"slide_type": "slide"}
# ## The OIL dataset
#
# Available [here](http://inverseprobability.com/3PhaseData.html).
# Configuration labels, originally as dummy variables:
#
# [1 0 0] == Homogeneous configuration
# [0 1 0] == Annular configuration
# [0 0 1] == Stratified configuration
#
# We convert them into a single categorical 3-valued character.
# + slideshow={"slide_type": "slide"}
oil_dummy_labels = ['1','2','3']
oil_labels = Dataset('./data/DataTrnLbls.txt',
header=None,
sep=' ',
names=oil_dummy_labels)
# Transform the dummies into the actual category label (column name)
oil_labels.features['target'] = oil_labels.features.idxmax(axis=1)
oil_labels.drop_columns(oil_dummy_labels);
# + [markdown] slideshow={"slide_type": "slide"}
# We read the actual values in the training set, setting the variable names as $x_1$, ... to $x_{12}$.
# + slideshow={"slide_type": "fragment"}
oil_data = Dataset('./data/DataTrn.txt',
header=None,
sep=' ',
names=['x{}'.format(i+1) for i in range(12)])
# + [markdown] slideshow={"slide_type": "slide"}
# Finally, we merge both datasets to form a single one with the target variable properly set as 'Configuration'.
# + slideshow={"slide_type": "fragment"}
oil = Dataset.from_dataframe(
pd.concat([oil_data.features, oil_labels.features], axis=1))
oil.set_target('target');
oil.data.head(3)
# + code_folding=[] hide_input=true slideshow={"slide_type": "slide"}
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
x = oil.features.iloc[:, 5].values.reshape(-1,1)
y = oil.features.iloc[:, 6].values.reshape(-1,1)
plt.figure(figsize=(8,6))
plt.scatter(x, y,
c=oil.target.values.reshape(-1, 1),
cmap=cmap_bold)
plt.xlabel('$x6$')
plt.ylabel('$x7$')
plt.title('Target variable for the oil dataset, using only $x6$ and $x7$')
plt.show();
# + [markdown] slideshow={"slide_type": "slide"}
# ### K-NN
# + slideshow={"slide_type": "fragment"}
X, y = oil.split()
X.train = X.train.iloc[:, 5:7]
X.test = X.test.iloc[:, 5:7]
n_neighbors=7
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X.train.values, y.train.values)
#Predict the response for test dataset
y_pred = knn.predict(X.test.values)
# Model Accuracy, how often is the classifier correct?
print("F1:", f1_score(y.test, y_pred, average='micro'))
# + code_folding=[] hide_input=false
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
# https://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
x_min, x_max = X.test.iloc[:, 0].min() - .2, X.test.iloc[:, 0].max() + .2
y_min, y_max = X.test.iloc[:, 1].min() - .2, X.test.iloc[:, 1].max() + .2
h = (x_max-x_min)/X.test.shape[0] # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape).astype(int)
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# Plot the points with no KNN surface
plt.figure(figsize=(16,7))
plt.subplot(1,2,1)
plt.scatter(X.test.iloc[:, 0], X.test.iloc[:, 1],
c=list(map(int, y.test.values)),
cmap=cmap_bold, edgecolor='k', s=20)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel("X6")
plt.ylabel("X7")
# Plot also the test points in the surface
plt.subplot(1,2,2)
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.scatter(X.test.iloc[:, 0], X.test.iloc[:, 1],
c=list(map(int, y.test.values)),
cmap=cmap_bold, edgecolor='k', s=20)
plt.xlabel("X6")
plt.ylabel("X7")
plt.title("3-Class classification (k = %i)" % n_neighbors)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# Let's do cross_val_score to see if we can infer what is the best value for $k$.
# + slideshow={"slide_type": "slide"}
f1 = []
min_k = 2
max_k = 21
for k in range(min_k, max_k):
knn = KNeighborsClassifier(n_neighbors=k, n_jobs=-1)
score = cross_val_score(knn, X.train, y.train, scoring='f1_micro', cv=5)
f1.append(score)
f1_means = np.mean(np.array(f1), axis=1)
# + [markdown] slideshow={"slide_type": "slide"}
# Let's plot the average F1_micro obtained per CV split, for each value of $k$.
# + hide_input=false slideshow={"slide_type": "slide"}
max_y1 = np.array(f1).max()
max_y2 = f1_means.max()
max_y = max(max_y1, max_y2)
min_y1 = np.array(f1).min()
min_y2 = f1_means.min()
min_y = min(min_y1, min_y2)
delta = 0.01
k_array = np.arange(min_k, max_k)
pos_max_f1 = np.where(f1_means == f1_means.max())[0][0]
best_k = k_array[pos_max_f1] - min_k
plt.figure(figsize=(15,6))
plt.subplot(1,2,1)
plt.ylim(min_y-delta, max_y+delta)
plt.plot(f1, '-')
plt.xticks(np.arange(max_k - min_k), np.arange(min_k, max_k))
plt.title('F1 score for different values of $K$')
plt.xlabel('K')
plt.ylabel('F1-Score')
plt.subplot(1,2,2)
plt.ylim(min_y-delta, max_y+delta)
plt.plot(f1_means, 'o-')
plt.axvline(x=best_k, linestyle='--', color='grey')
plt.xticks(np.arange(max_k - min_k), np.arange(min_k, max_k))
plt.title('F1-micro average for different values of $K$')
plt.xlabel('K')
plt.ylabel('F1-Score')
plt.show();
# + [markdown] slideshow={"slide_type": "slide"}
# ### Distance metrics
# + slideshow={"slide_type": "slide"}
f1 = []
metrics = ['euclidean','manhattan','chebyshev','haversine']
for metric in metrics:
knn = KNeighborsClassifier(n_neighbors=7, n_jobs=-1, metric=metric)
score = cross_val_score(knn, X.train, y.train, scoring='f1_micro', cv=5)
f1.append(score)
f1_means = np.mean(np.array(f1), axis=1)
# + hide_input=true
max_y1 = np.array(f1).max()
max_y2 = f1_means.max()
max_y = max(max_y1, max_y2)
min_y1 = np.array(f1).min()
min_y2 = f1_means.min()
min_y = min(min_y1, min_y2)
plt.figure(figsize=(8,5))
for i in range(len(metrics)):
plt.plot(f1[i], 'o-', label=metrics[i], alpha=0.5)
plt.title('F1 score for different values of $K$')
plt.xlabel('CV iteration')
plt.ylabel('F1-Score')
plt.legend(loc='best')
plt.show();
# + [markdown] slideshow={"slide_type": "slide"}
# Put everything together, we get no improvement...
# + slideshow={"slide_type": "slide"}
X, y = oil.split()
X.train = X.train.iloc[:, 5:7]
X.test = X.test.iloc[:, 5:7]
n_neighbors=7
knn = KNeighborsClassifier(n_neighbors=n_neighbors, metric='chebyshev')
knn.fit(X.train.values, y.train.values)
#Predict the response for test dataset
y_pred = knn.predict(X.test.values)
# Model Accuracy, how often is the classifier correct?
print("F1:", f1_score(y.test, y_pred, average='micro'))
# + [markdown] slideshow={"slide_type": "slide"}
# Thanks!
| Nearest Neighbors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# + [markdown] id="<PASSWORD>"
# # IBM Streams SPL Toolkits tutorial
#
# This tutorial demonstrates how to discover, use, build and launch SPL toolkits in a Python notebook.
#
# In this notebook, you'll see examples of how to:
# 1. [Setup](#setup)
# 2. [Discover Python packages](#discoverpypi)
# 3. [Discover toolkits](#discovertoolkits)
# 4. [Launch SPL main composite](#ghtksample)
# 5. [Work with a microservice of a toolkit](#ghtk)
# 6. [Integration of SPL operators in a Python topology](#splpy)
#
# # Overview
#
# **Introduction**
#
# IBM Streams provides toolkits as reusable assets to get a solution faster.
# The build service contains product "Out of the Box" toolkits and in addition you can use "Open Source" toolkits from GitHub (https://github.com/IBMStreams) and Customer developed toolkits.
# Many product toolkits are open source toolkits. This tutorial shows how to find them and how to check for updates.
#
# You will also learn in this tutorial, how to use SPL operators in the Python API, and how to work with existing SPL application like samples and microservices.
#
# **How it works**
#
# The Python applications created in this notebook are submitted to the IBM Streams service for execution.
#
# 
#
#
# ## Documentation
#
# - [Streams Python development guide](https://ibmstreams.github.io/streamsx.documentation/docs/latest/python/)
# - [Streams Python API](https://streamsxtopology.readthedocs.io/)
#
#
#
# ## <a name="setup"> </a> 1. Setup
#
# ### 1.1 Add credentials for the IBM Streams service
#
# In order to submit a Streams application you need to provide the name of the Streams instance.
#
# 1. From the navigation menu, click **Services > Instances**.
# 2. Update the value of `streams_instance_name` in the cell below according to your Streams instance name.
# + id="4f444ef6-732f-4f40-93a0-19d13f76abfb"
from icpd_core import icpd_util
streams_instance_name = "sample-streams" ## Change this to Streams instance
try:
cfg=icpd_util.get_service_instance_details(name=streams_instance_name, instance_type="streams")
except TypeError:
cfg=icpd_util.get_service_instance_details(name=streams_instance_name)
# + [markdown] id="33315933-00bf-49b8-9a03-c1333084614d"
# ### 1.2 Import the `streamsx.toolkits` package and verify the package version
# + id="55026542-8cbf-4ac1-9988-f1b5ad53fdff"
import streamsx.toolkits as tkutils
print("INFO: streamsx.toolkits package version: " + tkutils.__version__)
# + [markdown] id="4c7547f7-6d7f-4a88-9bae-62fd8cdcf2f3"
# ## <a name="discoverpypi"> </a> 2. Discover Python packages
# + [markdown] id="17fdbb88-e838-48c1-886d-a319917498b9"
# ### 2.1 Discover installed Python packages
# + id="e8b539c4-d181-44d9-a369-e386cb9ad2cf"
installed_packages = tkutils.get_installed_packages()
print (installed_packages)
# + [markdown] id="ed2a60cf-4ccb-479b-b61f-942fd18ff2d5"
# ### 2.2 Discover latest version of Python packages on pypi.org
# + id="76317db6-7bd0-4062-927f-03529dd42b7d"
pypi_packages = tkutils.get_pypi_packages()
print (pypi_packages)
# + [markdown] id="791968d5-4136-41de-986a-dfb4a11b3511"
# ### 2.3 Check for updates of Python packages
# + id="3c03c32c-590a-4ca2-a9c4-f9196b04976c"
from colorama import Fore, Back, Style
for x in pypi_packages:
if x in installed_packages:
if installed_packages[x] != pypi_packages[x]:
print(Style.BRIGHT + Fore.BLACK + 'NEW VERSION ' + x
+ ': ' + Back.GREEN + pypi_packages[x] + Back.RESET)
else:
print(Fore.RED + 'NEW PACKAGE ' + x + ': ' + pypi_packages[x])
# + [markdown] id="0d046249-02e0-4747-aab0-e191bfd2d087"
# ### 2.4 Optional: Upgrade the Python package
#
# Uncomment and change the name `<NAME>` of the package name in the cell below.
# Run the cell below if you want to upgrade to the latest version of package.
#
# + id="5910da20-416d-42e9-97ac-7969f2f563ed"
#import sys
# #!{sys.executable} -m pip install --user --upgrade streamsx.<NAME>
# + [markdown] id="39b7fe19-7579-451d-88df-cfc362409c0c"
# ## <a name="discovertoolkits"> </a> 3. Discover Toolkits
# + [markdown] id="6da7ccba-c64c-4a5c-a3e1-c4b36a1d8793"
# ### 3.1 Retrieve a list of toolkits available on the Streams build service
#
# This contains all product toolkits and custom toolkits that have been uploaded to the build service
# + id="96a550e4-1569-4092-8155-33149927eaaf"
build_service_toolkits = tkutils.get_build_service_toolkits(cfg)
print(build_service_toolkits)
# + [markdown] id="8f070348-bffd-4124-b199-7188570519d0"
# ### 3.2 Retrieve a list of product toolkits on GitHub
# + id="21642b5b-6855-4c78-8644-db23cdf69b78"
github_toolkits = tkutils.get_github_toolkits()
print(github_toolkits)
# + [markdown] id="440867e3-7362-4f51-80d9-d62836cf556f"
# ### 3.3 Check for toolkit updates
# + id="78f06b35-1ac0-4305-9db2-d5f428a4f740"
from colorama import Fore, Back, Style
for x in github_toolkits:
if x in build_service_toolkits:
if build_service_toolkits[x] < github_toolkits[x]:
print(Style.BRIGHT + Fore.BLACK + 'NEW VERSION ' + x
+ ': ' + Back.GREEN + github_toolkits[x] + Back.RESET)
else:
print(Fore.RED + 'NEW TOOLKIT ' + x + ': ' + github_toolkits[x])
# + [markdown] id="9ac71d75-92bc-496a-9bfa-3eaaa033a3cf"
# In order to use a newer version of a toolkit, you can download the toolkit from GitHub with the function
# `streamsx.toolkits.download_toolkit()` described here: https://streamsxtoolkits.readthedocs.io/en/latest/
#
# You can also upload a toolkit to the build service: https://streamsxtopology.readthedocs.io/en/stable/streamsx.build.html
# + [markdown] id="e7fba40d-5312-4112-b9ba-5641482e234c"
# ## <a name="ghtksample"> </a> 4. Launch SPL main composite
#
# This sample uses a toolkit from GitHub.
# * Downloads the toolkit
# * Selects a sample application as main composite
# * Builds and launches the application to the Streams instance
#
# ### 4.1 Download the toolkit from GitHub
# + id="b391a37c-9997-4512-a14a-0702344fc123"
nlp_tk = tkutils.download_toolkit('com.ibm.streamsx.nlp')
# + [markdown] id="c69423a0-b26c-4e0e-86bb-c70359079541"
# Download samples directory from repository
# + id="73a83ddb-6b33-453b-bf7d-91db4a65ff06"
nlp_samples = tkutils.download_toolkit('samples', repository_name='streamsx.nlp')
# + [markdown] id="0b05f1dd-9e06-4c62-994c-cf8be4499b25"
# List the samples directory
# + id="2842554d-8b88-4496-a04a-e9b154f993cd"
# !ls $nlp_samples
# + [markdown] id="3c5a8db9-ff16-44aa-b51e-1ba05c8a8edd"
# ### 4.2 Select the sample application
# **Hint:** The main composite must be defined as `public composite <composite name>` to be used as a topology.
# + id="bc34edcc-8a17-4df0-b365-f8ed297548d8"
sample_app = nlp_samples + '/LemmatizerSample'
main_composite_name = 'nlp.sample::LemmatizerSample'
import streamsx.spl.op as op
# here it is important, to include also the application, here 'sample_app', into the toolkits list:
r = op.main_composite(kind=main_composite_name, toolkits=[sample_app, nlp_tk])
# 'r' is a tuple containing the resulting Topology, and an Invoke of the main composite
topo = r[0]
# + [markdown] id="adda16c7-85e6-422f-b22d-98645cdae5e1"
# <a name="submit"></a>
# ### 4.3. Submit the application
#
# A running Streams application is called a *job*. This next cell submits the application for execution and prints the resulting job id.
# + id="b20b10c3-de39-4f78-821f-f33ca6df7de2"
from streamsx.topology import context
# Disable SSL certificate verification if necessary
cfg[context.ConfigParams.SSL_VERIFY] = False
submission_result = context.submit(context.ContextTypes.DISTRIBUTED,
topo,
config=cfg)
# The submission_result object contains information about the running application, or job
if submission_result.job:
print("Job Id: ", submission_result.job.id , "Job Name: ", submission_result.job.name)
# + [markdown] id="5f816cf7-db64-42b6-8143-37e73deb04a2"
# <a name="status"></a>
# ### 4.4 See job status
# The tools available to monitor the running application depend on the version of Streams and your development environment.
#
# - **If you are using a Cloud Pak for Data 3.5 project:** When you submit the `Topology`, you create a new <i>job run</i>. The job represents the application and the job run represents a single instance of the running application.
#
# 1. Open your project and click on the **Jobs** tab. This will show a list of the project's jobs.
# 1. Under the **Job name** column, find your job based on the `Job Name` [printed when you submitted the job](#submit). This will list all of the job runs for that job.
# 1. Click the **Run name** to open the job run. The run name will be the same as the `Job Name` printed above.
# 1. This will open the Job Details page.
#
# 1. To open the Job Graph, click the **Streams job graph** link.
#
# 1. To download logs, click the **Logs** tab and click **Create snapshot**, then download the snapshot.
#
#
# - **For all other development environments and versions of Streams**, [see this page for more information](http://ibmstreams.github.io/streamsx.documentation/docs/spl/quick-start/qs-4).
# + [markdown] id="64bf80e0-beeb-41df-960e-20b545c01f8f"
# ### 4.5 Cancel the job
#
# The Streams job is running in the Streams service. You can cancel it within the notebook or delete it from **Projects** > **Jobs**.
# + id="56afe3e2-e456-4ae5-9319-fa46b1a08148"
# cancel the job directly using the Job object
submission_result.job.cancel()
# + [markdown] id="0eaef4b5-5766-4960-a32a-194ec445a367"
# ## Summary
#
# We launched an existing SPL application with the Python API.
# + [markdown] id="02868eeb-10f7-408d-8615-e1a964037bc2"
# ## <a name="ghtk"> </a> 5. Work with a microservice of a toolkit
#
# This sample uses a toolkit from GitHub. The microservice is a part of the toolkit.
# * Downloads the toolkit
# * Selects a microservice application as main composite
# * Builds and launches the application to the Streams instance
# * Creates an application that uses the microservice.
# It ingest tuples to the microservice and to receive the resulting tuples from the microservice.
#
# ### 5.1 Download the toolkit from GitHub
# + id="80b426dd-2952-45ec-b3be-c6846db92237"
nlp_tk = tkutils.download_toolkit('com.ibm.streamsx.nlp')
# + [markdown] id="50e2bd37-6ed8-43ac-bc6c-bee341673fbc"
# ### 5.2 Select the microservice application
#
# The nlp toolkits provides one microservice called `UimaService` that processes UIMA PEAR file.
#
# This microservice subscribes to the following topics.
#
# * `streamsx/nlp/documents` - ingest topic of type String
# * `streamsx/nlp/update/pear` - update pear topic of type String. String contains the filename of the pear file location.
#
# The following topic is published by the microservice:
# * `streamsx/nlp/annotations` - resulting annotations. Each tuple represents a processed document. Output is of type Json. CAS output is transformed to JSON and format depends on PEAR.
# + id="f6191af1-2e85-4ba9-99ca-88de990dcd42"
nlp_microservice = 'com.ibm.streamsx.nlp.services::UimaService'
import streamsx.spl.op as op
r = op.main_composite(kind=nlp_microservice, toolkits=[nlp_tk])
topo_nlp_microservice = r[0]
# + [markdown] id="69a4e85c-732a-4b5a-8332-daf3e06b5bfb"
# ### 5.3. Submit the microservice application
#
# A running Streams application is called a *job*. This next cell submits the application for execution and prints the resulting job id.
# + id="8d9d0320-ba2c-4520-b98c-2f56f0a810c6"
from streamsx.topology import context
# Disable SSL certificate verification if necessary
cfg[context.ConfigParams.SSL_VERIFY] = False
submission_result_microservice = context.submit(context.ContextTypes.DISTRIBUTED,
topo_nlp_microservice,
config=cfg)
# The submission_result object contains information about the running application, or job
if submission_result_microservice.job:
print("JobId: ", submission_result_microservice.job.id , "Name: ", submission_result_microservice.job.name)
# + [markdown] id="990538c8-03af-452f-b505-6670632f54e3"
# ### 5.4. Create an application to connect to the microservice
#
# This application generates documents to be processed by the "UimaService" and receives the output of the "UimaService".
# + id="e3678bf1-cd2f-4791-8ac5-6af3799baa44"
from streamsx.topology.topology import Topology
from streamsx.topology.schema import CommonSchema
import time
class StringData(object):
def __init__(self, count, delay=True):
self.count = count
self.delay = delay
def __call__(self):
if self.delay:
time.sleep(10)
doc = 'Text Sample\n'+'April 4, 2019 Distillery Lunch Seminar UIMA and its Metadata 12:00PM-1:00PM in HAW GN-K35 \n'+'April 16, 2019 KM & I Department Tea \n'+'Title: An Eclipse-based TAE Configurator Tool \n'+'3:00PM-4:30PM in HAW GN-K35 \n'+'May 11, 2019 UIMA Tutorial \n'+'9:00AM-5:00PM in HAW GN-K35 \n'
for i in range(self.count):
yield doc + ' - doc_' + str(i)
topo = Topology("NLPSample", namespace="sample")
s = topo.source(StringData(1000)).as_string()
s.publish("streamsx/nlp/documents", schema=CommonSchema.String)
ts = topo.subscribe("streamsx/nlp/annotations", schema=CommonSchema.Json)
ts.print()
ts.isolate()
# + [markdown] id="5a0c974b-c6d4-41d3-a12e-e00e6024cfbb"
# ### 5.5. Submit the application
#
# A running Streams application is called a *job*. This next cell submits the application for execution and prints the resulting job id.
# + id="064a752c-a43b-4bb3-a132-556346d0a0f8"
from streamsx.topology import context
# Disable SSL certificate verification if necessary
cfg[context.ConfigParams.SSL_VERIFY] = False
submission_result_nlp_sample = context.submit(context.ContextTypes.DISTRIBUTED,
topo,
config=cfg)
# The submission_result object contains information about the running application, or job
if submission_result_nlp_sample.job:
print("JobId: ", submission_result_nlp_sample.job.id, "Name: ", submission_result_nlp_sample.job.name)
# + [markdown] id="dd011bc0-688b-4f8f-8926-0bb40776c550"
# ### 5.6 See job status
#
# To view job status and logs, [follow the steps described in section 4.4](#status).
# + [markdown] id="eeb635bc-771b-4c5f-895a-557813ffd8a4"
# ### 5.7 Cancel the jobs
#
# The Streams jobs are running in the Streams service. You can cancel them within the notebook or delete them from **Projects** > **Jobs**.
# + id="51711854-2a09-4acb-90c9-90b66a60963f"
# cancel the job in the IBM Streams service
submission_result_nlp_sample.job.cancel()
# cancel the microservice job in the IBM Streams service
submission_result_microservice.job.cancel()
# + [markdown] id="74f83034-9410-47d7-95fd-4b9ccfd1398b"
# ## Summary
#
# We launched an application with the Python API that works as microservice. We created an application to connect to this microservice with `publish` and `subsribe`.
# + [markdown] id="d9f2c130-9a04-4885-9b99-1c2ba0fa789b"
# ## <a name="splpy"> </a> 6. Integration of SPL operators
#
#
# Integration of SPL operators:
# https://ibmstreams.github.io/streamsx.topology/doc/pythondoc/streamsx.spl.op.html#
#
#
# ### 6.1 Download the 'com.ibm.streamsx.nlp' toolkit
# + id="38e24590-e6e4-490b-a31f-3d9d94d2f035"
nlp_tk = tkutils.download_toolkit('com.ibm.streamsx.nlp')
# + [markdown] id="ba96aa3d-5300-46f6-9517-c84ed5ebc23a"
# ### 6.2 Create sample application using SPL primitive operators
# + id="84257e22-41ac-4d15-abd6-b9cefe5b5381"
topo_spl = Topology("WrapSPLOperatorsSample", namespace="sample")
# + [markdown] id="ab9ae85a-f418-4c9d-a069-79e440ac84b4"
# In this example we are using a `Beacon` as source operator, which produces an infinite stream of tuples with the text *The cow jumps over the moon*.
#
#
# stream<rstring document> Beacon = spl.utility::Beacon() {
# param
# initDelay: 5.0;
# period: 0.01;
# output Beacon: document="The cow jumps over the moon";
# }
#
# + id="b686b262-18ec-4ff9-ad0b-df6e33a0bdaf"
import streamsx.spl.op as op
# Beacon operator is used to generate tuples with a single attribute of type rstring
pulse = op.Source(topo_spl, kind='spl.utility::Beacon',
schema='tuple<rstring document>',
params={'initDelay': 5.0, 'period': 0.01})
# output clause for the 'document' attribute of the output schema definition
pulse.document = pulse.output('"The cow jumps over the moon"')
# + [markdown] id="12d1ca46-6df0-400a-8e0a-0d99846f1489"
# Invoke operator (one input stream and one output stream)
#
# In SPL the invocation of the NgramBasic operator would look like below:
#
# stream<map<rstring, uint32> ngramMap> NgramBasic = com.ibm.streamsx.nlp::NgramBasic (Beacon) {
# param
# documentAttribute: 'document';
# size: 3u;
# minSize: 1u;
# output NgramBasic: ngramMap = NgramCount();
# }
# + id="4cfc287c-cd30-4ba2-972d-188cb38e6a46"
from streamsx.spl.types import uint32
from streamsx.spl.toolkit import add_toolkit
ngrams = op.Invoke(topo_spl,
inputs = [pulse.stream],
kind = "com.ibm.streamsx.nlp::NgramBasic",
schemas = 'tuple<map<rstring, uint32> ngramMap>',
params = {'documentAttribute': 'document'})
# there are different ways to specify operator parameters: use the params argument in the Invoke,
# or use the params attribute of the resulting Invoke object:
ngrams.params['size'] = uint32(3)
ngrams.params['minSize'] = uint32(1)
# assign the NgramCount() function to the 'ngramMap' attribute of the output stream 0:
ngrams.ngramMap = ngrams.output(ngrams.outputs[0], 'NgramCount()')
# Dump output stream to console log
ngrams.outputs[0].print()
add_toolkit(topo_spl, nlp_tk)
# + [markdown] id="7366167f-aa9b-48eb-8865-aecbf91b3e58"
# ### 6.3 Submit the application
#
# A running Streams application is called a *job*. This next cell submits the application for execution and prints the resulting job id.
# + id="57b112a7-d71b-4f18-93c4-3a3cde684202"
from streamsx.topology import context
# Disable SSL certificate verification if necessary
cfg[context.ConfigParams.SSL_VERIFY] = False
submission_result_spl_sample = context.submit(context.ContextTypes.DISTRIBUTED,
topo_spl,
config=cfg)
# The submission_result object contains information about the running application, or job
if submission_result_spl_sample.job:
print("JobId: ", submission_result_spl_sample.job.id, "Name: ", submission_result_spl_sample.job.name)
# + [markdown] id="4183cc8295ad454bbfab510d5ad7d46c"
# ### 6.4 View job status
#
# To view job status and logs, [follow the steps described in section 4.4](#status).
# + [markdown] id="740b99ad-896c-4352-b1dd-cc8de2ad5770"
# ### 6.5 Cancel the job
#
# The Streams job is running in the Streams service. You can cancel it within the notebook or delete it from **Projects** > **Jobs**.
# + id="36322728-1f61-46e3-b757-21ab4facd502"
# cancel the job directly using the Job object
submission_result_spl_sample.job.cancel()
# + [markdown] id="7990719f-78fc-4c2b-946c-9f506c253233"
# ## Summary
#
# In this notebook, you saw how to use SPL operators with the Python Topology API, find available toolkits, and download toolkits from GitHub.
#
#
# Learn more about the [Python API from the documentation](https://streamsxtopology.readthedocs.io/en/stable/index.html/). You can also visit the [Streams community for more resources](https://ibm.biz/streams-articles).
| Streams-SPLToolkitsTutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [IAPR][iapr]: Lab 2 ‒ Object description
#
#
# **Group ID:** 36
#
# **Author 1 (sciper):** <NAME> (326760)
# **Author 2 (sciper):** <NAME> (325421)
# **Author 3 (sciper):** <NAME> (271174)
#
# **Release date:** 26.03.2021
# **Due date:** 23.04.2021
#
#
# ## Important notes
#
# The lab assignments are designed to teach practical implementation of the topics presented during class well as preparation for the final project, which is a practical project which ties together the topics of the course.
#
# As such, in the lab assignments/final project, unless otherwise specified, you may, if you choose, use external functions from image processing/ML libraries like opencv and sklearn as long as there is sufficient explanation in the lab report. For example, you do not need to implement your own edge detector, etc.
#
# **! Before handling back the notebook !** rerun the notebook from scratch `Kernel` > `Restart & Run All`
#
#
# [iapr]: https://github.com/LTS5/iapr
# ## 0. Extract relevant data
# We first need to extract the `lab-02-data.tar.gz` archive.
# To this end, we use the [tarfile] module from the Python standard library.
#
# [tarfile]: https://docs.python.org/3.6/library/tarfile.html
# +
import tarfile
import os
data_base_path = os.path.join(os.pardir, 'data')
data_folder = 'lab-02-data'
data_part1 = os.path.join(data_base_path, data_folder, 'part1')
data_part2 = os.path.join(data_base_path, data_folder, 'part2')
tar_path = os.path.join(data_base_path, data_folder + '.tar.gz')
with tarfile.open(tar_path, mode='r:gz') as tar:
tar.extractall(path=data_base_path)
# -
# ---
# ## Part 1
# In the `lab-02-data/part1` folder, you will find 28x28 grey-scale pictures of handwritten "0" and "1".
# These digits have been extracted from MNIST dataset (http://yann.lecun.com/exdb/mnist/).
#
# Your goal is to extract, from each of those images, a 2-dimensional feature vector (i.e. 2 features) and to plot them all on a 2D graph.
# If you have chosen good features, the vectors of the "0"'s should nicely cluster in one part of the plane and those of the "1"'s in another.
#
# Please try:
# 1. Fourier Descriptors (15pts).
# 1. Implementation (10 pts).
# 2. Showing invariance to rotation, translation and scaling (5 pts).
# 2. Additional method of your choice (5 pts)
#
#
# **Note:** for the Fourier descriptors, the u_k signal has to be constructed by following the contour point after point. Some pre-processing (image binarization, possibly some Mathematical Morphology) might be useful.
# ### 1.1 Data visualization
# +
import skimage.io
import matplotlib.pyplot as plt
# %matplotlib inline
def load(path, digit = '0'):
digit_path = os.path.join(path, digit)
digit_names = [nm for nm in os.listdir(digit_path) if '.png' in nm] # make sure to only load .png
digit_names.sort() # sort file names
ic = skimage.io.imread_collection([os.path.join(digit_path, nm) for nm in digit_names])
digit_im = skimage.io.concatenate_images(ic)
return digit_im, digit_names
# Load zeros and ones
zeros_im_raw, zeros_names = load(data_part1, digit = '0')
ones_im_raw, ones_names = load(data_part1, digit = '1')
# Plot images
fig, axes = plt.subplots(2, len(zeros_im_raw), figsize = (12, 3))
for ax, im, nm in zip(axes[0], zeros_im_raw, zeros_names):
ax.imshow(im, cmap = 'gray')
ax.axis('off')
ax.set_title(nm)
for ax, im, nm in zip(axes[1], ones_im_raw, ones_names):
ax.imshow(im, cmap = 'gray')
ax.axis('off')
ax.set_title(nm)
# -
# ### 1.2 Fourier descriptors (15 pts)
# #### "fourier_descriptor" fonction.
#
# Computes the fourier descriptors according to the lecture's formula.
# It performs the rotation invariance by taking the modulus of the fourier descriptor and the scaling invariance by
# dividing by the energy. We decide to not include the 0th order fourier descriptor in the energy because it contains the DC part of the contour and therefore can bias the result and cannot ensure the translation invariance.
#
# #### inputs
#
# - contour : shape contour defined by successive [x,y] coordinate from the top left corner of the shape
#
#
# #### outputs
#
# - cont_dft : complex fourier descriptors (without any invariance normalization)
# - cont_dft_amp / energy : fourier descriptors with scaling and rotation invariance
# - energy : contour energy
def fourier_descriptor(contour):
contour = contour.reshape(-1,2)
cont_complex = list(map(lambda x: x[0] + x[1] * 1j, contour)) # transverse the cordinates to complex numbers
cont_dft = np.fft.fft(cont_complex) # dft
cont_dft_amp = list(map(lambda x: np.sqrt(x.real**2 + x.imag**2), cont_dft)) # calculate amplitude of dft
energy = np.sqrt(sum(pow(cont_dft_amp[i + 1],2) for i in range(len(cont_dft_amp) - 1))) # calculate energy
return cont_dft, cont_dft_amp / energy, energy
# #### "find_larger_blob" fonction.
#
# Keep the largest blob among every other ones found in the image.
#
# #### input
#
# img : original image
#
# #### output
#
# mask : image with the largest blob
def find_larger_blob(img):
img_new = img.copy()
# get connected components and statistics
labels = measure.label(img_new)
stats = measure.regionprops(labels)
area_max = -1
# find the largest blob
for i in range(len(stats)):
if stats[i].area > area_max:
area_max = stats[i].area
max_blob = i
# put the largest blob on an image
mask = np.zeros(img_new.shape)
mask[labels == stats[max_blob].label] = 1
return mask
# #### "outilne_image" fonction.
#
# Performs the fourier description of the object contour starting with an 8-bits image.
# A first binarization is performed to deal with binary images.
# Then, a skeletonization step is done to thin the shape. Because some twos and threes images have closed loops,
# it is necessary to apply a filling hole algorithm to fill these loops.
# Outlines are set on the largest binary shape found on the image by subtracting a dilated version to the skeltonized image.
#
# #### inputs
#
# imgs : original image
# bin_t : binary threshold
#
# #### outputs
#
# bin_img : thresholded image
# bin_outline : outline of the shape
def outline_image(imgs, bin_t):
img = imgs.copy()
# thresholding
img[img > bin_t] = 255
img[img <= bin_t] = 0
bin_img = img.copy()
# skeletonization
skel = skeletonize(img / 255)
skel = skel.astype(np.uint8)
# filling holes
cv2.floodFill(skel, None, (0,0), 255)
skel = cv2.bitwise_not(skel)
skel[skel > 0] = 1
# find the largest binary shape (remove noisy shapes that can be detected)
skel = find_larger_blob(skel)
skel = skel.astype(np.uint8)
# outline the remaining shape
b = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3));
dilate = cv2.dilate(skel, b, borderType = cv2.BORDER_REFLECT);
bin_outline = dilate - skel
return bin_img, bin_outline
# #### Preprocessing
#
# Digit images are first binarized to deal with binary images. The threshold has been adatped to have well defined digit shapes.
#
# +
import cv2
import numpy as np
# binarization
bin_thresh = 100
ret, zeros_im = cv2.threshold(zeros_im_raw, bin_thresh, 255, cv2.THRESH_BINARY)
ret, ones_im = cv2.threshold(ones_im_raw, bin_thresh, 255, cv2.THRESH_BINARY)
# Plot images after binarization
plt.figure(figsize = (18,6))
plt.suptitle("images after binarization")
for i in range(len(zeros_im)):
plt.subplot(4,10,i + 1)
plt.axis('off')
plt.imshow(zeros_im[i], cmap = 'gray')
for i in range(len(ones_im)):
plt.subplot(4,10,i + len(zeros_im) + 1)
plt.axis('off')
plt.imshow(ones_im[i], cmap = 'gray')
# -
# #### Applying fourier description on digit contours
#
# Contours are directly extracted from the binary images (succcesive pixels starting from the top-left pixel). Because many contours can be found, only the largest one is kept on each image. Finally, the fourier description is performed, under translation, rotation and scaling invariance. Results are presented below.
#
# Fourier descriptors are scaled by their energy and we graph the 2nd fourier descriptor in function of the 1st one. Doing that way, translation invariance is ensured. Results we got are nicely separable (2 classes) without any outliers.
# +
from skimage import measure
import math
from math import e
from skimage.morphology import skeletonize
# get the zeros and ones images
imgs = np.vstack((zeros_im, ones_im))
# initialization
fdx = 1
fdy = 2
fd1_scale = np.zeros((imgs.shape[0],1))
fd2_scale = np.zeros((imgs.shape[0],1))
contours_test = []
plt.figure(figsize = (18,6))
plt.suptitle("Binary images with their respective computed contour in red")
for i in range(imgs.shape[0]):
img = imgs[i]
# find shape contours
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort the contours by their area in descending order
area = []
for cont in contours:
area.append(cv2.contourArea(cont))
area_descend = np.argsort(-np.array(area))
max_area = area_descend[0]
# compute fourier descriptor
_, fd_scale, _ = fourier_descriptor(contours[max_area])
plt.subplot(4,10,i + 1)
plt.axis('off')
plt.imshow(img, cmap = 'gray')
plt.plot(contours[max_area].reshape(-1,2)[:,0], contours[max_area].reshape(-1,2)[:,1], color = 'red')
# keep only fourier descriptor fdx and fdy
fd1_scale[i] = fd_scale[fdx]
fd2_scale[i] = fd_scale[fdy]
# graph of fourier descriptor 1 and 2 for each image
plt.figure(figsize = (8,6))
plt.plot(fd1_scale[0:10], fd2_scale[0:10], color = 'r', linestyle = '', marker = '+', label = 'zeros')
plt.plot(fd1_scale[10:20], fd2_scale[10:20], color = 'b', linestyle = '', marker = '+', label = 'ones')
plt.legend(loc = 'upper right')
plt.xlabel('Fourier descriptor n°{}' .format(fdx))
plt.ylabel('Fourier descriptor n°{}' .format(fdy))
plt.title('Classification based on fourier descriptors, divided by energy')
plt.show()
# -
#
# ### Fourier descriptor invariance
#
# Three types of invariance will be shown in the following.
#
# - translation : to be invariant to translation, the 0th fourier descriptor should be discarded because
# it contains the DC part of the contour. Translation is coded in the contour DC part.
#
# - rotation : to be rotated invariant, the modulus of fourier descriptor should be taken instead of the real part.
# Fourier descriptors are complex numbers ; the rotation affects the phase of the contour but the modulus remains
# the same.
#
# - scaling : to be scaling invariant, fourier descriptors should divided by one of them (for exemple, divide
# fourier descriptors by the 1st one). This will remove the scaling factor. It is also possible to divide them by the
# contour energy, which is the way we are doing.
#
# +
# get a set of images
imgs_for_invar = ones_im.copy()
# variables initialization
translation_factor = 2
scaling_factor = 2
affine_trans_matrix = np.float32([[1,0,translation_factor],[0,1,0]])
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3));
fd0_label = 0
fd1_label = 1
fd2_label = 2
fd3_label = 3
# fourier descriptor arrays initialization
fd0_arr = np.zeros((imgs_for_invar.shape[0] * 4, 1)).astype(complex)
fd1_arr = np.zeros((imgs_for_invar.shape[0] * 4, 1)).astype(complex)
fd2_arr = np.zeros((imgs_for_invar.shape[0] * 4, 1)).astype(complex)
energy_arr = np.zeros((imgs_for_invar.shape[0] * 4, 1))
for i in range(imgs_for_invar.shape[0]):
# get an image
img = imgs_for_invar[i]
# performs rotation and translation on the binary image
img_rot = np.flip(np.transpose(img), axis = 1)
img_trans = cv2.warpAffine(img, affine_trans_matrix, img.shape)
# stack the orignal outline image with the rotated and translated one
imgs = np.dstack((img, img_trans))
imgs = np.dstack((imgs, img_rot))
for j in range(imgs.shape[2]):
# find the shape contour from the oringinal/rotated/translated image
contours, hierarchy = cv2.findContours(imgs[:,:,j], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort the contours by their area in descending order
area = []
for cont in contours:
area.append(cv2.contourArea(cont))
area_descend = np.argsort(-np.array(area))
max_area = area_descend[0]
# compute fourier descriptor of the contour
fd, _, energy = fourier_descriptor(contours[max_area])
fd0_arr[i * 4 + j] = fd[fd0_label]
fd1_arr[i * 4 + j] = fd[fd1_label]
fd2_arr[i * 4 + j] = fd[fd2_label]
energy_arr[i * 4 + j] = energy
# procedure for scaling invariance
# find the shape contour from the oringinal image
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort the contours by their area in descending order
area = []
for cont in contours:
area.append(cv2.contourArea(cont))
area_descend = np.argsort(-np.array(area))
max_area = area_descend[0]
# compute fourier descriptor of the scaled contour
fd, _, energy = fourier_descriptor(contours[max_area] * scaling_factor)
fd0_arr[i * 4 + 3] = fd[fd0_label]
fd1_arr[i * 4 + 3] = fd[fd1_label]
fd2_arr[i * 4 + 3] = fd[fd2_label]
energy_arr[i * 4 + 3] = energy
# -
# #### Translation invariance
#
# Translation invariance is perfomed of a shifted version of the original binary image (the one resulting after the pre-processing step). The 0th fourier descriptor encodes for translation. Thus, not taking it as a feature will ensure the translation invariance as showing in the 2 following plots. On both plots, since fourier descriptors are complex numbers, data points correspond to the real part of each fourier descriptor. Of course, the invariance to translation can also be demonstrated if taking the modulus of the fourier descriptor.
# +
# plot images (original and translated version)
plt.figure(figsize = (8,6))
plt.subplot(1,2,1)
plt.imshow(img, cmap = 'gray')
plt.title('Original image exemple')
plt.subplot(1,2,2)
plt.imshow(img_trans, cmap = 'gray')
plt.title('2-pixels translated image exemple')
# plot fourier descriptors (without correction)
plt.figure(figsize = (18,6))
plt.subplot(1,2,1)
plt.xlabel('Fourier descriptor n°{}' .format(fd1_label))
plt.ylabel('Fourier descriptor n°{}' .format(fd0_label))
plt.title('Without translation correction')
plt.plot(np.real(fd1_arr[0::4]), np.real(fd0_arr[0::4]), color = 'r', linestyle = '', marker = '+', label = 'original')
plt.plot(np.real(fd1_arr[1::4]), np.real(fd0_arr[1::4]), color = 'g', linestyle = '', marker = '+', label = 'translated')
plt.legend(loc = 'lower right')
# plot fourier descriptors (with correction)
plt.subplot(1,2,2)
plt.xlabel('Fourier descriptor n°{}' .format(fd1_label))
plt.ylabel('Fourier descriptor n°{}' .format(fd2_label))
plt.title('With translation correction')
plt.plot(np.real(fd1_arr[0::4]), np.real(fd2_arr[0::4]), color = 'r', linestyle = '', marker = '+', label = 'original')
plt.plot(np.real(fd1_arr[1::4]), np.real(fd2_arr[1::4]), color = 'g', linestyle = '', marker = '+', label = 'translated')
plt.legend(loc = 'lower right')
plt.show()
# -
# #### Rotation invariance
#
# Rotation invariance is perfomed of a 90° rotated version of the original binary image (the one resulting after the pre-processing step). Rotation only affects the contour's phase. On the left plot, the real part of the first and second fourier descriptor is taken whereas the modulus is computed for each of them on the right plot. As you can see, results for rotation are identical to the original when taking the modulus of fourier descriptors. The 0th order fourier descriptor is not graphed ; we only take the first and the second one. This is because even if we performed a pure rotation, a hidden translation operation is also performed.
# +
# plot images (original and rotated version)
plt.figure(figsize = (8,6))
plt.subplot(1,2,1)
plt.imshow(img, cmap = 'gray')
plt.title('Original image exemple')
plt.subplot(1,2,2)
plt.imshow(img_rot, cmap = 'gray')
plt.title('90° rotated image exemple')
# plot fourier descriptors (without correction)
plt.figure(figsize = (18,6))
plt.subplot(1,2,1)
plt.xlabel('Fourier descriptor n°{}' .format(fd1_label))
plt.ylabel('Fourier descriptor n°{}' .format(fd2_label))
plt.title('Without rotation correction')
plt.plot(np.real(fd1_arr[0::4]), np.real(fd2_arr[0::4]), color = 'r', linestyle = '', marker = '+', label = 'original')
plt.plot(np.real(fd1_arr[2::4]), np.real(fd2_arr[2::4]), color = 'g', linestyle = '', marker = '+', label = 'rotated')
plt.legend(loc = 'lower right')
# plot fourier descriptors (with correction)
plt.subplot(1,2,2)
plt.xlabel('Fourier descriptor n°{}' .format(fd1_label))
plt.ylabel('Fourier descriptor n°{}' .format(fd2_label))
plt.title('With rotation correction')
plt.plot(abs(fd1_arr[0::4]), abs(fd2_arr[0::4]), color = 'r', linestyle = '', marker = '+', label = 'original')
plt.plot(abs(fd1_arr[2::4]), abs(fd2_arr[2::4]), color = 'g', linestyle = '', marker = '+', label = 'rotated')
plt.legend(loc = 'upper right')
plt.show()
# -
# #### Scaling invariance
#
# The scaling invariance is not perfomed on an image anymore, as it is the case for translation and rotation invariance. Because of sampling and contour length effects, we take the contour extracted from the original binary image and scale it by a factor 2. Fourier descriptors are then computed on the scaled version of the original contour. On the left plot, the modulus of fourier descriptors of the original and scaled contour are shown. On the right plot, we divided each fourier descriptor by its respective contour energy. This finally shows that dividing by the contour energy gets rid of the scaling factor.
# +
# plot fourier descriptors (without correction)
plt.figure(figsize = (18,6))
plt.subplot(1,2,1)
plt.xlabel('Fourier descriptor n°{}' .format(fd1_label))
plt.ylabel('Fourier descriptor n°{}' .format(fd2_label))
plt.title('Without scaling correction')
plt.plot(abs(fd1_arr[0::4]), abs(fd2_arr[0::4]), color = 'r', linestyle = '', marker = '+', label = 'original')
plt.plot(abs(fd1_arr[3::4]), abs(fd2_arr[3::4]), color = 'g', linestyle = '', marker = '+', label = 'scaled')
plt.legend(loc = 'upper left')
# plot fourier descriptors (with correction)
plt.subplot(1,2,2)
plt.xlabel('Fourier descriptor n°{}' .format(fd1_label))
plt.ylabel('Fourier descriptor n°{}' .format(fd2_label))
plt.title('With scaling correction')
plt.plot(abs(fd1_arr[0::4]) / energy_arr[0::4], abs(fd2_arr[0::4]) / energy_arr[0::4], color = 'r', linestyle = '', marker = '+', label = 'original')
plt.plot(abs(fd1_arr[3::4]) / energy_arr[3::4], abs(fd2_arr[3::4]) / energy_arr[3::4], color = 'g', linestyle = '', marker = '+', label = 'scaled')
plt.legend(loc = 'upper right')
plt.show()
# -
# ### 1.3 Additional method (5 pts)
# The additional method chosen here is a blob-based method. The previously computed outline images are filled to be
# transformed into blobs. Some statistical parameters, named features, are computed on such blobs. Based on that
# features, it is possible to nicely separate the "zeros" from the "ones", as shown in the next plot.
#
# Here, the elongation feature and solidity feature are extracted.
#
# - Elongation : it is the ratio between the maximum diameter of the object and the minimum diameter in the
# perpendicular direction.
#
# - Solidity : defines how well the blob can be approximated by a geometric structure.
# It is the ratio between the blob area and the convex area (of the geometric shape that includes the blob).
# +
# initialization
imgs = np.vstack((zeros_im_raw, ones_im_raw))
bin_t = 120
feature1 = np.zeros((imgs.shape[0], 1))
feature2 = np.zeros((imgs.shape[0], 1))
plt.figure(figsize = (18,6))
plt.suptitle("Outline and blob images of 0 and 1 digits")
for i in range(imgs.shape[0]):
img = imgs[i].copy()
# get image outlines
img, outline = outline_image(img, bin_t)
plt.subplot(4,10,i + 1)
plt.axis('off')
plt.imshow(outline, cmap = 'gray')
# fill contours holes
blob = outline.copy()
cv2.floodFill(blob, None, (0,0), 255)
blob = cv2.bitwise_not(blob)
blob[blob > 0] = 255
plt.subplot(4,10,i + 21)
plt.axis('off')
plt.imshow(blob, cmap = 'gray')
# find the largest binary shape (remove noisy shapes that can be detected)
img = find_larger_blob(blob)
# get connected components
labels = measure.label(img)
stats = measure.regionprops(labels)
# compute features
feature1[i] = stats[0].minor_axis_length / stats[0].major_axis_length
feature2[i] = stats[0].solidity
# plot features for each image
plt.figure()
plt.plot(feature1[0:10], feature2[0:10], color = 'r', linestyle = '', marker = '+', label = 'zeros')
plt.plot(feature1[10:20], feature2[10:20], color = 'b', linestyle = '', marker = '+', label = 'ones')
plt.legend(loc = 'lower left')
plt.xlabel('Elongation')
plt.ylabel('Solidity')
plt.title('Classification based on features')
plt.show()
# -
# ---
# ## Part 2
# The `lab-02-data/part2` folder contains grey-scale pictures of handwritten "2" and "3".
# Extract the same feature (typically 2 Fourier descriptors) as in part 1 also on these images and plot them on the same graph as the features of the "0" and "1".
# Is it possible to discriminate all these 4 digits with a 2-dimensional feature vector?
# ### 2.1 Data visualization
# +
# Load twos and threes
twos_im_raw, twos_names = load(data_part2, digit = '2')
threes_im_raw, threes_names = load(data_part2, digit = '3')
# Plot images
fig, axes = plt.subplots(2, len(twos_im_raw), figsize = (12, 3))
for ax, im, nm in zip(axes[0], twos_im_raw, twos_names):
ax.imshow(im, cmap = 'gray')
ax.axis('off')
ax.set_title(nm)
for ax, im, nm in zip(axes[1], threes_im_raw, threes_names):
ax.imshow(im, cmap = 'gray')
ax.axis('off')
ax.set_title(nm)
# -
# ### 2.2 Fourier descriptors - 4 digits (10 pts)
# In this part, we reuse the code of the first part to compute fourier descriptors of "twos" and "threes" digits. Binarization is used as the pre-processing step, with the same threshold.
# +
# binarization
bin_thresh = 100
ret, twos_im = cv2.threshold(twos_im_raw, bin_thresh, 255, cv2.THRESH_BINARY)
ret, threes_im = cv2.threshold(threes_im_raw, bin_thresh, 255, cv2.THRESH_BINARY)
# Plot images
plt.figure(figsize = (18,6))
plt.suptitle("images after binarization")
for i in range(len(twos_im)):
plt.subplot(4,10,i + 1)
plt.axis('off')
plt.imshow(twos_im[i], cmap = 'gray')
for i in range(len(threes_im)):
plt.subplot(4,10,i + len(twos_im) + 1)
plt.axis('off')
plt.imshow(threes_im[i], cmap = 'gray')
# -
# For fourier description, translation, rotation and scaling invariance are applied on the first and second fourier descriptors that we use as features. The results show that if it was possible to separate the "zeros" from the "ones", then, it is not so possible to separate the "twos" from the "threes" (right plot). We could find 2 classes but there will be outliers on each classes. When putting all together (left plot), it comes out that it is also not possible to completely separte them, except for the "zeros" class which is nicely separated from the other ones. The "ones" class could be reasonably separated if we accept to loose 2 "ones" outliers and get one "twos" outlier. This results probably come from the fact that 2 features is not enough to characterize each of the digit in a unique manner. In order to have them separated, we can think about a 3-features representation or may be apply another 2-features method.
# +
# Extract twos and threes images
imgs = np.vstack((twos_im, threes_im))
# initialization
plt.figure(figsize = (18,6))
plt.suptitle("Binary images with their respective computed contour in red")
fd3 = np.zeros((imgs.shape[0], 1))
fd4 = np.zeros((imgs.shape[0], 1))
for i in range(imgs.shape[0]):
img = imgs[i]
# find shape contours
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort the contours by their area in descending order
area = []
for cont in contours:
area.append(cv2.contourArea(cont))
area_descend = np.argsort(-np.array(area))
max_area = area_descend[0]
# compute fourier descriptor
_, fd_scale, _ = fourier_descriptor(contours[max_area])
plt.subplot(4,10,i + 1)
plt.axis('off')
plt.imshow(img, cmap = 'gray')
plt.plot(contours[max_area].reshape(-1,2)[:,0], contours[max_area].reshape(-1,2)[:,1], color = 'red')
# keep only fourier descriptor fdx and fdy
fd3[i] = fd_scale[fdx]
fd4[i] = fd_scale[fdy]
# plot fourier descriptors for each image
plt.figure(figsize = (18,6))
plt.subplot(1,2,1)
plt.plot(fd1_scale[0:10], fd2_scale[0:10], color = 'r', linestyle = '', marker = '+', label = 'zeros')
plt.plot(fd1_scale[10:20], fd2_scale[10:20], color = 'b', linestyle = '', marker = '+', label = 'ones')
plt.plot(fd3[0:10], fd4[0:10], color = 'g', linestyle = '', marker = '+', label = 'twos')
plt.plot(fd3[10:20], fd4[10:20], color = 'y', linestyle = '', marker = '+', label = 'threes')
plt.legend(loc = 'upper right')
plt.xlabel('Fourier descriptor n°{}'.format(fdx))
plt.ylabel('Fourier descriptor n°{}'.format(fdy))
plt.title('Classification based on fourier descriptors')
plt.subplot(1,2,2)
plt.plot(fd3[0:10], fd4[0:10], color = 'g', linestyle = '', marker = '+', label = 'twos')
plt.plot(fd3[10:20], fd4[10:20], color = 'y', linestyle = '', marker = '+', label = 'threes')
plt.legend(loc = 'upper right')
plt.xlabel('Fourier descriptor n°{}'.format(fdx))
plt.ylabel('Fourier descriptor n°{}'.format(fdy))
plt.title('Classification based on fourier descriptors')
plt.show()
# -
# #### Optional Other method : Same as the additional method in the part 1 (based on blob statistics)
#
# "Twos" and "threes" are here transformed into blob (as "zeros" and "ones" in the first part) and the same statistical parameters are computed. Results are presented below. What we can see here is that, if it was possible to separate the "zeros" from the "ones", then, it is more difficult to separate the "twos" from the "threes" (as for the fourier descriptors, classes will have outliers). When putting all together (left plot), it comes out that it is possible to nicely separte three classes : "zeros", "ones" and a last one with "threes" and "twos" together with 2 "ones" outliers in the "twos"/"threes" class. Of course, it is possible to found 4 classes but the accuracy on "threes" and "twos" is smaller. The same conclusion as above can be drawn here, regarding the number of features which is too low.
# +
# initialization
imgs = np.vstack((twos_im_raw, threes_im_raw))
bin_t = 120
feature3 = np.zeros((imgs.shape[0], 1))
feature4 = np.zeros((imgs.shape[0], 1))
plt.figure(figsize = (18,6))
plt.suptitle("Outline and blob images of 2 and 3 digits")
for i in range(imgs.shape[0]):
img = imgs[i].copy()
# find image outlines
img,outline = outline_image(img, bin_t)
plt.subplot(4,10,i + 1)
plt.axis('off')
plt.imshow(outline, cmap = 'gray')
# fill contour holes
blob = outline.copy()
cv2.floodFill(blob, None, (0,0), 255)
blob = cv2.bitwise_not(blob)
blob[blob > 0] = 255
plt.subplot(4,10,i + 21)
plt.axis('off')
plt.imshow(blob, cmap = 'gray')
# find the largest binary shape (remove noisy shapes that can be detected)
img = find_larger_blob(blob)
# get connected components
labels = measure.label(img)
stats = measure.regionprops(labels)
# compute features
feature3[i] = stats[0].minor_axis_length / stats[0].major_axis_length
feature4[i] = stats[0].solidity
# plot features for each image
plt.figure(figsize = (18,6))
plt.subplot(1,2,1)
plt.plot(feature1[0:10], feature2[0:10], color = 'r', linestyle = '', marker = '+', label = 'zeros')
plt.plot(feature1[10:20], feature2[10:20], color = 'b', linestyle = '', marker = '+', label = 'ones')
plt.plot(feature3[0:10], feature4[0:10], color = 'g', linestyle = '', marker = '+', label = 'twos')
plt.plot(feature3[10:20], feature4[10:20], color = 'y', linestyle = '', marker = '+', label = 'threes')
plt.legend(loc = 'lower left')
plt.xlabel('Elongation')
plt.ylabel('Solidity')
plt.title('Classification based on features')
plt.subplot(1,2,2)
plt.plot(feature3[0:10], feature4[0:10], color = 'g', linestyle = '', marker = '+', label = 'twos')
plt.plot(feature3[10:20], feature4[10:20], color = 'y', linestyle = '', marker = '+', label = 'threes')
plt.legend(loc = 'lower right')
plt.xlabel('Elongation')
plt.ylabel('Solidity')
plt.title('Classification based on features')
plt.show()
# -
| Jiaan_lab_02_object_description.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (cvxpy)
# language: python
# name: cvxpy
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Seminar 17.
#
# # Linear programming. Simplex method
# (main reference is [this book](https://www.amazon.com/Introduction-Linear-Optimization-Scientific-Computation/dp/1886529191))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Current place in syllabus
#
# - Unconstrained optimization
# - ~~One dimensional mimnimization~~
# - ~~Gradient descent~~
# - ~~Newton method and quasi-Newton methods~~
# - ~~Conjugate gradient method~~
# - Constrained optimization
# - <span style="color:red">Linear programming: simplex method</span>
# - Linear programming: primal barrier method
# - Projected gradient method and Frank-Wolfe method
# - Penalty and barrier function methods
# - Augmented Lagrangian method
# - Sequaential quadratic programming
# - Other topics
# - ~~Least squares problem~~
# - Proximal methods
# - Optimal methods and lower bounds
# - Mirror descent
# - Review of stochastic methods
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problem statement
# Given vectors $c \in \mathbb{R}^n$, $b \in \mathbb{R}^m$ and matrix $A \in \mathbb{R}^{m \times n}$ such that $m < n$ and $\mathrm{rank}(A) = m$
# - Standard form
# \begin{align*}
# &\min_x c^{\top}x \\
# \text{s.t. } & Ax \leq b\\
# & x_i \geq 0, \; i = 1,\dots, n
# \end{align*}
# - Canonical form (below we consider this form)
# \begin{align*}
# &\min_x c^{\top}x \\
# \text{s.t. } & Ax = b\\
# & x_i \geq 0, \; i = 1,\dots, n
# \end{align*}
# + [markdown] slideshow={"slide_type": "slide"}
# ### Reformulation of the forms
# + [markdown] slideshow={"slide_type": "fragment"}
# - $Ax = b \equiv
# \begin{cases}
# Ax \leq b\\
# Ax \geq b
# \end{cases}
# $
# - $Ax \leq b \equiv
# \begin{cases}
# Ax + z = b\\
# z \geq 0
# \end{cases}$
# - free variable $x \equiv
# \begin{cases}
# x = u - v\\
# u \geq 0, \; v \geq 0
# \end{cases}$
# - change sign with multiplication by $-1$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Application
#
# - Producing optimal quantity of product with resources constraints:
# \begin{align*}
# &\max_x c^{\top}x \\
# \text{s.t. } & Ax \leq b\\
# & x_i \geq 0, \; i = 1,\dots, n,
# \end{align*}
# where $x_i$ - quantity of the $i$-th product, $c_i$ is a revenue from the $i$-th product, $b_i$ - available quantity of the $i$-th resource, $a_{ij}$ is a quantity of the $i$-th resource, which is required to produce unit of the $j$-th product.
#
# - Flows in networks: transport problem, max flow problem, minimal cost path in communication network to pass message
#
# - Regression problem in $\ell_1$ and $\ell_{\infty}$ norms can be formulated as linear programming
# + [markdown] slideshow={"slide_type": "slide"}
# ## What solution can be given by optimization method?
#
# - The problem is feasible and $x^*$ is minimizer
# - The problem is unbounded and objective minimum is $-\infty$
# - The problem is infeasible i.e. feasible set is empty
# + [markdown] slideshow={"slide_type": "slide"}
# ## Preliminaries
#
# - Linear programming problem is minimization of the linear function on the multidimensional polytope
# + [markdown] slideshow={"slide_type": "fragment"}
# **Questions:**
#
# - when the problem is feasible?
# - among what points should we search solution?
# - how can we find solution?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Definitions
#
# 1. A point from the feasible set is called *vertex* of the polytope if it does not lie inside the interval between two others points of polytope
# 2. A point $x$ is called *extreme point* of polytope if
# - it lies inside polytope
# - there exists such set $\mathcal{B} \subset \{1, \dots, n \}$, that
# - $|\mathcal{B}| = m$
# - $i \notin \mathcal{B} \Rightarrow x_i = 0$
# - a matrix $B = [a_i]_{i \in \mathcal{B}}$ is non-singular, where $a_i$ is the $i$-th columns of matrix $A$. Matrix $B$ is called *basis matrix*
# + [markdown] slideshow={"slide_type": "fragment"}
# **Theorem**
#
# All extreme points of polytope correspond to vertices of the polytope.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Fundamental theorem of linear programmimg
#
# 1. If feasible set of the linear programming problem is not empty, then it has at least one extreme point.
# 2. If the linear programming problem has solution, then at least one of them is extreme point.
# 3. If the linear programming problem is bounded and feasible set is not empty, then it has finite solution.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Simplex method
#
# - Simplex method was [proposed](https://www.rand.org/pubs/reports/R366.html) by <NAME> in 1947
# - Method was proposed in the period of active rising of computerization
# - It became famous because of numerous applications in economics and planning of manufacturing
# - It is included in the [list](http://www.uta.edu/faculty/rcli/TopTen/topten.pdf) of the top 10 algorithms of the XX century by SIAM editors version
# + [markdown] slideshow={"slide_type": "slide"}
# ### General scheme
#
# 1. Find arbitrary extreme point of the feasible point
# 2. Move to the other adjacent extreme point such that the objective function decreases
# 3. If there exists another extreme point where objective function is smaller than current value, move to this extreme point
#
# **Q:** how formalize and perform these steps?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pseudocode of one iteration of simplex method
#
# Given extreme point $x$, corresponding basis matrix $B$ and set of indices $\mathcal{B}$.
#
# 1. Compute *reduced costs* $\overline{c}_j = c_j - c^{\top}_{\mathcal{B}}B^{-1}a_j$ for all $j \not\in \mathcal{B}$.
# - if $\overline{c}_j \geq 0$ for all $j$, then current point is optimal and we can't decrease objective function
# - otherwise <span style="color:red"> select </span> index $j^*$ such that $\overline{c}_{j^*} < 0$
# 2. Compute $u = B^{-1}a_{j^*}$
# - if all entries of $u$ are non-positive, then the peoblem is unbounded, optimal value is $-\infty$
# - if there are positive entries, then compute
#
# $$
# \theta^* = \min_{\{i | u_i > 0\}} \frac{x_{\mathcal{B}(i)}}{u_i}
# $$
#
# 3. <span style="color:red"> Select </span> such index $\ell$ that
#
# $$
# \theta^* = \frac{x_{\mathcal{B}(\ell)}}{u_{\ell}}.
# $$
#
# Compose new basis matrix $\hat{B}$ through replacing column $a_{\mathcal{B}(\ell)}$ with column $a_{j^*}$. New extreme point $\hat{x}$, corresponding to the basis matrix $\hat{B}$, is defined as
#
# $$
# \begin{align*}
# & \hat{x}_{j^*} = \theta^*\\
# & \hat{x}_{\mathcal{B}(k)} = x_{\mathcal{B}(k)} - \theta^*u_k, \text{if } k \neq \ell
# \end{align*}
# $$
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Different implementations of simplex method
#
# - Naive simplex method
# - Revised simplex method
# - <span style="color:red"> Tableau simplex method </span>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why there exist different implementations and what difference between them?
#
# - The most complicated operation in every iteration is computing $B^{-1}A$
# - Matrices $B$ in iterations $i$ and $i+1$ differ in only one column
# - How compute $B^{-1}A$ efficiently?
# + [markdown] slideshow={"slide_type": "slide"}
# ### Naive implementation
#
# - Solve linear sustem $By = A$ in every iteration and conpute reduced costs with vector $c^{\top}_{\mathcal{B}}y$
# - Complexity - $O(m^3 + mn)$
# - Information from previous iteration is not used
# - If the system $By = A$ can be solved fast, complexity is significantly reduced. It depends on the structure of the original matrix $A$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Revised simplex method
#
# - Update $B$ can be preformed by adding matrix $U = (a_q - a_p)e^{\top}_p$, where $e_p$ is the $p$-th unit vector, $a_j$ is the $j$-th column of matrix $A$
# - Column $a_p$ is replaced with column $a_q$
# - Matrix $U$ has rank-one
# - $(B + U)^{-1}$ with [Sherman-Morrison-Woodbury formula](https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula) can be computed with $O(m^2)$ flops
# - Update factors $L$ and $U$ in LU decomposition of the matrix $B$ is better way, but derivation of the update formulas is less trivial
#
# - The best total complexity is $O(m^2)$, if reduced costs are computed with pivoting, and the worst total complexity is $O(mn)$, if all reduced costs are computed.
#
# More details see [here](http://www.maths.ed.ac.uk/hall/RealSimplex/25_01_07_talk1.pdf) and [here](http://www.maths.ed.ac.uk/hall/RealSimplex/25_01_07_talk2.pdf)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Tableau simplex method
#
# - More details and examples are below
# - Complexity is proportianl to the size of table $O(mn)$
# - Possible issue with stability during performing of elementary operations
# + [markdown] slideshow={"slide_type": "slide"}
# ## Correctness theorem
#
# Assume
# - feasible set is not empty
# - every extreme point is *nondegenerate*.
#
# Then simplex method
# stops after finite number of iterations and
#
# gives one of the following result
#
# - the optimal extreme point $x^*$ is found
# - the problem is unbounded and optimal objective is $-\infty$
#
# **Definition.** Extreme point is called *degenerate*, if more than $n - m$ of its entries are zero
#
# **Q:** what is geometric interpretation of degeneracy?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problem degeneracy of the extreme point
#
# If one of the extreme point is *degenerate*,
#
# then it can lead to **cycle** of simplex method!
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Solution
#
# - Lexicographic rule of selection $\ell$
# - Bland's rule - selection of the minimal $j^*$ and $\ell$
#
# [<NAME>](https://people.orie.cornell.edu/bland/) is American methematician,
#
# one of the author of the [oriented matroids](https://en.wikipedia.org/wiki/Oriented_matroid) theory.
# + [markdown] slideshow={"slide_type": "slide"}
# ## How to find initial extreme point?
#
# - Two-phase simplex method
# - M-method
# + [markdown] slideshow={"slide_type": "slide"}
# ### Two-phase simplex method
#
# To find initial extreme point compose the following auxilliary problem assuming that $b_i \geq 0, \; i =1, \dots,m$. This assumption is easy to satisfied with multiplication corresponding rows of $A$ and elements of $b$ by $-1$.
# \begin{align*}
# & \min_{z, y} y_1 + \ldots + y_m \\
# \text{s.t. } & Az + y = b\\
# & z \geq 0, \; y \geq 0
# \end{align*}
#
# - Initial extreme point for this problem is obvious $z = 0, \; y = b$
# - If the optimal value of the objective function in this problem is not **zero**, then feasible set of the original problem is empty
# - If the optimal value of the objective function is **zero**, then $y^* = 0$ and initial extreme point $x_0 = z^*$.
# + [markdown] slideshow={"slide_type": "slide"}
# #### What is initial base matrix?
#
# **Issue:** slack variable equal to zero can be in the basis corresponding to $x_0$
#
# **Solution:** procedure of driving slack variables out of the basis
# + [markdown] slideshow={"slide_type": "slide"}
# #### Driving slack variables out of the basis
#
# 1. Select slack variable $x_k = 0$ from the basis found after solving first-phase problem
# 2. Check the row of the table corresponding to this variable
# - if in this row all elements corresponding to the columns for original variables are zero, then this row can be eliminated because it is linearly dependent on other rows of the matrix
# - otherwise, use this non-zero element as pivot element and exclude the variable corresponding to the row and include variable corresponding to the column. The process is similar to the one iteration of simplex method, except the requirement of the positiveness of this nonzero element.
# + [markdown] slideshow={"slide_type": "slide"}
# ## All in all
#
# - After driving slack variable out of the basis, the value of new variable is still zero, therefore this procedure dies not change the value of objective function
# - The final basis corresponding to the solution of the first phase problem after driven slack variable out of the basis should be used in the second phase
# - The first phase - search of the initial extreme point, the second phase - solving original problem starting from the solution and basis matrix from the first phase
# + [markdown] slideshow={"slide_type": "slide"}
# ### M-method
#
# **Idea:** unite two phases of two-phase simplex method
#
# into the single phase
#
# \begin{align*}
# & \min_{z, y} c^{\top}z + M(y_1 + \ldots + y_m) \\
# \text{s.t. } & Az + y = b\\
# & z \geq 0, \; y \geq 0,
# \end{align*}
#
# where $M$ is arbitrary large positive real number.
#
# Usually it is unknown in advance,
#
# therefore you can use it as free parameter that can be done large enough if necessary
# + [markdown] slideshow={"slide_type": "slide"}
# ### Examples of solving problems with tableau simplex method
#
# Examples are available [here](./examples.pdf)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example of usage
# + slideshow={"slide_type": "fragment"}
import scipy.optimize as scopt
import numpy as np
n = 1000
m = 10
c = 10 * np.random.rand(n)
b = np.random.rand(m)
A = np.random.randn(m, n)
res = scopt.linprog(c, A, b, bounds=[(-1, None) for i in range(n)])
print(res)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Complexity
# + [markdown] slideshow={"slide_type": "fragment"}
# - It was shown that in the worst case running time of simplex method is <span style="color:red;font-weight:bold"> exponentially</span> depends on problem dimension!
# - However in practise running time is proportional to number of constraints and simplex method converges much faster
# - Why this is so is still unknown...
# + [markdown] slideshow={"slide_type": "slide"}
# ### Example by <NAME> (1972) [[1]](https://en.wikipedia.org/wiki/Klee–Minty_cube)
#
# In the following problem
# \begin{align*}
# & \max_{x \in \mathbb{R}^n} 2^{n-1}x_1 + 2^{n-2}x_2 + \dots + 2x_{n-1} + x_n\\
# \text{s.t. } & x_1 \leq 5\\
# & 4x_1 + x_2 \leq 25\\
# & 8x_1 + 4x_2 + x_3 \leq 125\\
# & \ldots\\
# & 2^n x_1 + 2^{n-1}x_2 + 2^{n-2}x_3 + \ldots + x_n \leq 5^n\\
# & x \geq 0
# \end{align*}
# starting from the point $x_0 = 0$ and and following to trajectory of simplex method one has to visit $2^n - 1$ vertices.
#
# **Exercise**: solve this problem for $n = 2$ and $n = 3$, and generalize solution for arbitrary $n$.
# + slideshow={"slide_type": "slide"}
def generate_KleeMinty_test_problem(n):
c = np.array([2**i for i in range(n)])
c = c[::-1]
bounds = [(0, None) for i in range(n)]
b = np.array([5**(i+1) for i in range(n)])
a = np.array([1] + [2**(i+1) for i in range(1, n)])
A = np.zeros((n, n))
for i in range(n):
A[i:, i] = a[:n-i]
return -c, A, b, bounds
# + slideshow={"slide_type": "slide"}
n = 5
c, A, b, bounds = generate_KleeMinty_test_problem(n)
print(c)
print(A)
print(b)
print(bounds)
# + slideshow={"slide_type": "slide"}
res = scopt.linprog(c, A, b, bounds=bounds)
print(res)
# + slideshow={"slide_type": "slide"}
n_list = range(3, 16)
n_iters = np.zeros(len(n_list))
times = np.zeros(len(n_list))
for i, n in enumerate(n_list):
c, A, b, bounds = generate_KleeMinty_test_problem(n)
res = scopt.linprog(c, A, b, bounds=bounds, options={"maxiter": 2**max(n_list)})
# time = %timeit -o scopt.linprog(c, A, b, bounds=bounds, options={"maxiter": 2**max(n_list) + 1})
n_iters[i] = res.nit
times[i] = time.best
# + slideshow={"slide_type": "slide"}
USE_COLAB = False
# %matplotlib inline
import matplotlib.pyplot as plt
if not USE_COLAB:
plt.rc("text", usetex=True)
plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
plt.plot(n_list, n_iters - np.array([2**n - 1 for n in n_list]), label="$K_t - K_{exp}$")
# plt.semilogy(n_list, [2**n - 1 for n in n_list], label="Theory")
plt.xlabel("Dimension, $n$", fontsize=24)
plt.ylabel("Number of iterations, $K$", fontsize=24)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.legend(fontsize=18)
plt.subplot(1, 2, 2)
plt.semilogy(n_list, times)
plt.xlabel("Dimension, $n$", fontsize=24)
plt.ylabel("Computation time", fontsize=24)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Recap
#
# - Linear programming problem
# - Applications
# - Simplex method and its complexity
| Spring2017-2019/17-LinProgSimplex/Seminar17en.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dicts
import pandas as pd
from collections import defaultdict
# ## Converting data frame to dict
# +
df = pd.DataFrame({
"Fare": [4, 10, 20],
"Sex": ["m", "f", "m"],
"Embarked": ["S", "C", "S"]
})
display(df)
print("Default:")
display(df.to_dict())
print("Records:")
display(df.to_dict("records"))
# -
# ## Iterating
# +
dictionary = {
"2020-04-01": 6,
"2020-05-01": 8
}
for key, value in dictionary.items() :
print(key + ": " + str(value))
# -
# ### Iterating by ascending value
# +
ages = {
"matt": 35,
"riker": 2,
"mason": 5,
"ava": 4
}
for name, age in sorted(ages.items(), key=lambda item: item[1]):
print("{}: {}".format(name, age))
# -
# ## Deleting an item
d = {"a": 5, "b": 6, "c": 7}
del d["a"]
print("New dict:", d)
# Or you can also return the deleted item:
d = {"a": 5, "b": 6, "c": 7}
item = d.pop("a")
print("New dict:", d)
print("Deleted item:", item)
# ## Number of keys
d = {"a": 5, "b": 6, "c": 7}
len(d)
# ## Default Dict
word_counts = defaultdict(int)
word_counts["machine"] = 10
print(word_counts)
print("machine:", word_counts["machine"])
print("learning:", word_counts["learning"])
# ## Creating a dict from a list
# ### With no key values
dict.fromkeys(["a", "b", "c"])
# ### With key values
dict.fromkeys(["a", "b", "c"], 5)
| Dicts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # day 12: Stochastic Gradient Descent
#
# # Objectives
#
# * See how Stochastic Gradient Descent can be used on a simple model (1-dim. linear regression)
# * See how SGD can be used on a complex model (MLPClassifier)
# * Understand impact of batch size and learning rate (aka step size)
#
#
# # Outline
# * [Review: Loss and Gradient for 1-dim. Linear Regression](#part1)
# * [Part 1: Stochastic Estimates of the Loss](#part1)
# * [Part 2: Stochastic Estimates of the Gradient](#part2)
# * [Part 3: Stochastic Gradient Descent Algorithm in a few lines of Python](#part3)
# * [Part 4: Using sklearn code to train MLPClassifier with SGD](#part4)
#
# # Takeaways
#
# * Stochastic estimates of loss functions are possible when the function is *additive* over training examples
#
# * Stochastic gradient descent is a simple algorithm that can be implemented in a few lines of Python
# * * Practical issues include selecting step size and batch size $B$
#
# * Selecting batch size trades off two things:
# * * Runtime cost of computing each gradient estimate (scales with $O(B)$, so smaller is better)
# * * Quality of the estimate (larger $B$ leads to less variance)
import numpy as np
import sklearn.neural_network
# +
# import plotting libraries
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('seaborn') # pretty matplotlib plots
import seaborn as sns
sns.set('notebook', font_scale=1.25, style='whitegrid')
# -
# # Create simple dataset: y = 1.234 * x + noise
#
# We will *intentionally* create a toy dataset where we know that a good solution has slope near 1.234.
#
# We'll generate N = 1000 examples.
#
# Naturally, the best slope for the finite dataset we create won't be exactly 1.234 (because of the noise added plus the fact that our dataset size is limited).
def create_dataset(N=1000, slope=1.234, noise_stddev=0.1, random_state=0):
random_state = np.random.RandomState(int(random_state))
# input features
x_N = np.linspace(-2, 2, N)
# output features
y_N = slope * x_N + random_state.randn(N) * noise_stddev
return x_N, y_N
xtrain_N, ytrain_N = create_dataset(N=1000, noise_stddev=0.3)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5,5))
plt.plot(xtrain_N, ytrain_N, 'k.');
plt.xlabel('x');
plt.ylabel('y');
# # Review: Gradient Descent for 1-dim. Linear Regression
# ## Define prediction model
#
# Consider the *simplest* linear regression model. A single weight parameter $w \in \mathbb{R}$ representing the slope of the prediction line. No bias/intercept.
#
# To make predictions, we just compute the weight multiplied by the input feature
# $$
# \hat{y}(x) = w \cdot x
# $$
# ## Define loss function
# We want to minimize the total *squared error* across all N observed data examples (input features $x_n$, output responses $y_n$)
#
# Given a full dataset of $N$ examples, we can compute the loss as:
# \begin{align}
# \min_{w \in \mathbb{R}} ~~ &\ell(w)
# \\
# \text{calc_loss}(w) = \ell(w) &= \frac{1}{N} \sum_{n=1}^N \frac{1}{2} (y_n - w x_n)^2
# \end{align}
# Given a random minibatch of $B$ examples, we can *estimate* the loss as:
# \begin{align}
# \text{estimate_loss}(w) = &= \frac{1}{B} \sum_{n=1}^B \frac{1}{2} (y_n - w x_n)^2
# \end{align}
# ### Exercise 1A: Complete the code below
#
# You should make it match the math expression above.
def calc_loss(w, xbatch_B, ybatch_B):
''' Compute loss for slope-only least-squares linear regression
Will compute the estimate of the loss given a minibatch of x,y values
Args
----
w : float
Value of slope parameter
Returns
-------
loss : float
Mean of squared error loss at provided w value
'''
B = xbatch_B.shape[0]
yhat_B = xbatch_B * w
half_mean_squared_error = 0.5 * np.mean(np.square(ybatch_B - yhat_B))
return half_mean_squared_error
# ### Define the gradient function
#
# Given a full dataset of $N$ examples, we can compute the gradient as:
#
# \begin{align}
# \text{calc_grad}(w) = \ell'(w) &= \frac{1}{N} \frac{\partial}{\partial w} [ \sum_{n=1}^N \frac{1}{2} (y_n - w x_n)^2]
# \\
# &= \frac{1}{N} \sum_{n=1}^N (y_n - w x_n) (-x_n)
# \\
# &= \frac{1}{N} \sum_{n=1}^N (w x_n - y_n) (x_n)
# \\
# &= w \left( \frac{1}{N} \sum_{n=1}^N x_n^2 \right) - \frac{1}{N} \sum_{n=1}^N y_n x_n
# \end{align}
#
# Given a random minibatch of $B$ examples, we can *estimate* the gradient using:
#
# \begin{align}
# \text{estimate_grad}(w) &= \frac{1}{B} \frac{\partial}{\partial w} [ \sum_{n=1}^B \frac{1}{2} (y_n - w x_n)^2]
# \\
# &= w \left( \frac{1}{B} \sum_{n=1}^B x_n^2 \right) - \frac{1}{B} \sum_{n=1}^B y_n x_n
# \end{align}
# Below, we've implemented the gradient calculation in code for you
def calc_grad(w, xbatch_B, ybatch_B):
''' Compute gradient for slope-only least-squares linear regression
Will compute a deterministic estimate of the gradient given a minibatch of x,y values
Args
----
w : float
Value of slope parameter
Returns
-------
g : float
Value of derivative of loss function at provided w value
'''
g = w * np.mean(np.square(xbatch_B)) - np.mean(xbatch_B * ybatch_B)
return g
# # Part 1: Stochastic estimates of the loss
# ### Plot whole-dataset loss evaluated at each w from -3 to 8
#
# We should see a "bowl" shape with one *global* minima, because our optimization problem is "convex"
G = 101
w_grid = np.linspace(-3, 8, G) # create array of 300 values between -3 and 8
print(w_grid)
# +
loss_grid = np.zeros(G)
for gg in range(G):
loss_grid[gg] = calc_loss(w_grid[gg], xtrain_N, ytrain_N)
plt.plot(w_grid, loss_grid, 'b.-');
plt.xlabel('w');
plt.ylabel('loss(w)');
plt.ylim([0, 35]);
# -
# ### Discussion 1a: Visually, at what value of $w$ does the loss function have a minima? Is it near where you would expect (hint: look above for the "true" slope value used to generate the data)
# ### Sampling a minibatch of size B
#
# We have provided some starter code that samples a minibatch of B examples from a training set of N examples
#
# Uses a provided `random_state` pseudo-random number generator, which defaults to numpy's if not specified.
def draw_minibatch(xtrain_N, ytrain_N, batch_size=100, random_state=np.random):
''' Sample a minibatch of desired size from provided training set
Returns
-------
xbatch_B : 1D array, size (B,)
x values of minibatch
ybatch_B : 1D array, size (B,)
y values of minibatch
'''
N = ytrain_N.size
selected_row_ids = random_state.choice(np.arange(N), size=batch_size, replace=False)
xbatch_B = xtrain_N[selected_row_ids].copy()
ybatch_B = ytrain_N[selected_row_ids].copy()
return xbatch_B, ybatch_B
# ### Show several minibatches of size 1
draw_minibatch(xtrain_N, ytrain_N, 1)
draw_minibatch(xtrain_N, ytrain_N, 1)
draw_minibatch(xtrain_N, ytrain_N, 1)
# ### Show example minibatch of size 500
#
# +
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, 500)
print("Showing first 5 entries of x array of shape %s" % str(xbatch_B.shape))
print(xbatch_B[:5])
print("Showing first 5 entries of y array of shape %s" % str(ybatch_B.shape))
print(ybatch_B[:5])
# -
# ### Exercise 1b: Would you expect the loss on the minibatch to be better at $w=1.0$ or at $w = -1.0$?
# TODO write answer here to guess, then use code below to figure it out.
# Show the loss at this random minibatch when $w = 1.0$
calc_loss(1.0, xbatch_B, ybatch_B)
# Show the loss at this random minibatch when w = -2.0
calc_loss(-1.0, xbatch_B, ybatch_B)
# ### Exercise 1c: Can you draw a random minibatch of size 25 and display *all* of it?
# +
xbatch_25, ybatch_25 = draw_minibatch(xtrain_N, ytrain_N, 25)
print("Showing x array")
print(xbatch_25)
print("Showing y array")
print(ybatch_25)
# -
# ### Exercise 1d: What is the loss at your minibatch at $w=1.0$ and $w = 1.5$?
calc_loss(1.0, xbatch_25, ybatch_25)
calc_loss(-1.0, xbatch_25, ybatch_25)
# ## Plot: Compute the *stochastic estimate* of loss as a function of $w$, using batch size 50
#
# We'll make 3 lines, for 3 separate *trials* of this procedure, so we can see how much each trial's curve might vary
for trial in range(3):
random_state = np.random.RandomState(trial) # set seed based on the trial id
stoch_loss_grid = np.zeros(G)
for gg in range(G):
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size=50, random_state=random_state)
stoch_loss_grid[gg] = calc_loss(w_grid[gg], xbatch_B, ybatch_B)
plt.plot(w_grid, stoch_loss_grid, '.-', label='Trial %d' % trial);
plt.xlabel('w');
plt.ylabel('loss(w)');
plt.ylim([0, 35]);
# ## Exercise 1e: Compute stochastic estimate of loss as function of w, for batch_size = 5
# +
for trial in range(3):
random_state = np.random.RandomState(trial) # set seed based on the trial id
stoch_loss_grid = np.zeros(G)
for gg in range(G):
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size=5, random_state=random_state) # TODO use draw_minibatch
stoch_loss_grid[gg] = calc_loss(w_grid[gg], xbatch_B, ybatch_B) # TODO use calc_loss at the current batch
plt.plot(w_grid, stoch_loss_grid, '.-', label='Trial %d' % trial);
plt.xlabel('w');
plt.ylabel('loss(w)');
# -
# ## Exercise 1f: Compute stochastic estimate of loss as function of w, for batch_size = 1
# Repeat the above for batch_size = 1
# +
for trial in range(3):
random_state = np.random.RandomState(trial) # set seed based on the trial id
stoch_loss_grid = np.zeros(G)
for gg in range(G):
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size=1, random_state=random_state) # TODO use draw_minibatch
stoch_loss_grid[gg] = calc_loss(w_grid[gg], xbatch_B, ybatch_B) # TODO use calc_loss at the current batch
plt.plot(w_grid, stoch_loss_grid, '.-', label='Trial %d' % trial);
plt.xlabel('w');
plt.ylabel('loss(w)');
# -
# ## Discussion 1f: What can you say about the *variance* of the stochastic estimates
#
# How does variance change as a function of batch size ?
# ## Part 2: Stochastic estimates of the gradient
# ## Sanity check: plot whole dataset gradient evaluated at each w from -3 to 8
# +
grad_grid = np.zeros(G)
for gg in range(G):
grad_grid[gg] = calc_grad(w_grid[gg], xtrain_N, ytrain_N)
plt.plot(w_grid, grad_grid, 'b.-');
plt.xlabel('w');
plt.ylabel('grad(w)');
plt.ylim([-7, 9]);
# -
# ## Plot: Compute the *stochastic estimate* of grad as a function of $w$, using batch size 50
# +
for trial in range(3):
random_state = np.random.RandomState(trial) # set seed based on the trial id
stoch_grad_grid = np.zeros(G)
for gg in range(G):
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size=50, random_state=random_state)
stoch_grad_grid[gg] = calc_grad(w_grid[gg], xbatch_B, ybatch_B)
plt.plot(w_grid, stoch_grad_grid, '.-', label='Trial %d' % trial);
plt.xlabel('w');
plt.ylabel('grad(w)');
plt.ylim([-7, 9]);
# -
# ## Exercise 2a: Repeat the above plot at batch_size = 5
#
# +
for trial in range(3):
random_state = np.random.RandomState(trial) # set seed based on the trial id
stoch_grad_grid = np.zeros(G)
for gg in range(G):
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size=5, random_state=random_state)
stoch_grad_grid[gg] = calc_grad(w_grid[gg], xbatch_B, ybatch_B)
plt.plot(w_grid, stoch_grad_grid, '.-', label='Trial %d' % trial);
plt.xlabel('w');
plt.ylabel('grad(w)');
plt.ylim([-7, 9]);
# -
# ## Exercise 2c: Repeat the above plot at batch_size = 1
#
# TODO copy code from above.
# +
for trial in range(3):
random_state = np.random.RandomState(trial) # set seed based on the trial id
stoch_grad_grid = np.zeros(G)
for gg in range(G):
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size=1, random_state=random_state)
stoch_grad_grid[gg] = calc_grad(w_grid[gg], xbatch_B, ybatch_B)
plt.plot(w_grid, stoch_grad_grid, '.-', label='Trial %d' % trial);
plt.xlabel('w');
plt.ylabel('grad(w)');
plt.ylim([-7, 9]);
# -
# ## Discussion 2d: What happens to the variance of the grad estimate as batch_size increases?
#
# ## Discussion 2d: What happens to the cost of computing the estimate as batch_size increases?
#
# ## Part 3: Stochastic Gradient Descent (GD) as an algorithm in Python
#
# ### Define minimize_via_sgd algorithm
#
# Can you understand what each step of this algorithm does?
def minimize_via_sgd(xtrain_N, ytrain_N, init_w=0.0, batch_size=10, step_size=0.001, max_iters=100, random_state=0):
''' Perform minimization of provided loss function via gradient descent
Each "iteration" performs one or more gradient updates, until total training set has been "seen"
Args
----
xtrain_N : numpy array, shape (N,)
ytrain_N : numpy array, shape (N,)
init_w : float
batch_size : int
step_size : float
max_iters : positive int
Return
----
wopt: float
array of optimized weights that approximately gives the least error
info_dict : dict
Contains information about the optimization procedure useful for debugging
Entries include:
* trace_loss_list : list of loss values
* trace_grad_list : list of gradient values
'''
N = int(ytrain_N.size)
B = int(batch_size)
if isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
w = 1.0 * init_w
# Create some lists to track progress over time (for debugging)
trace_loss_list = []
trace_w_list = []
trace_grad_list = []
step_id = 0
for iter_id in range(max_iters):
n_examples_seen_this_iter = 0
while n_examples_seen_this_iter < N:
xbatch_B, ybatch_B = draw_minibatch(xtrain_N, ytrain_N, batch_size, random_state=random_state)
n_examples_seen_this_iter += batch_size
loss = calc_loss(w, xbatch_B, ybatch_B)
grad = calc_grad(w, xbatch_B, ybatch_B)
w = w - step_size * grad
step_id += 1
print(" iter %5d/%d done | step %5d | w % 13.5f | loss % 13.4f | grad % 13.4f" % (
iter_id, max_iters, step_id, w, loss, grad))
trace_loss_list.append(loss)
trace_w_list.append(w)
trace_grad_list.append(grad)
wopt = w
info_dict = dict(
trace_loss_list=trace_loss_list,
trace_w_list=trace_w_list,
trace_grad_list=trace_grad_list)
return wopt, info_dict
# ### Discussion 2a: Which line of the above function does the *parameter update* happen?
#
# Remember, in math, the parameter update of gradient descent is this:
# $$
# w \gets w - \alpha \nabla_w \ell(w)
# $$
#
# where $\alpha > 0$ is the step size.
#
# In words, this math says *move* the parameter $w$ from its current value a *small step* in the "downhill" direction (indicated by gradient).
# TODO write down here which line above *you* think it is, then discuss with your group
# ### Try it! Run SGD with step_size = 0.01 and batch_size = 200
#
# Running the cell below will have the following effects:
#
# 1) one line will be printed for every iteration, indicating the current w value and its associated loss
#
# 2) the "optimal" value of w will be stored in the variable named `wopt` returned by this function
#
# 3) a dictionary of information useful for debugging will be stored in the `info_dict` returned by this function
wopt, info_dict = minimize_via_sgd(xtrain_N, ytrain_N, step_size=0.01, batch_size=200);
# +
fig, axes = plt.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(12,3))
axes[0].plot(info_dict['trace_loss_list'], '.-');
axes[0].set_title('loss');
axes[1].plot(info_dict['trace_grad_list'], '.-');
axes[1].set_title('grad');
axes[2].plot(info_dict['trace_w_list'], '.-');
axes[2].set_title('w');
# -
# ### Discussion 2b: Does it appear from the *parameter* values in trace above that the SGD procedure converged?
# ### Discussion 2c: Does it appear from the *loss* values in trace above that the SGD procedure converged?
# ### Discussion 2d: Does it appear from the *grad* values in trace above that the SGD procedure converged?
# ## Try with smaller batch_size = 10
wopt, info_dict = minimize_via_sgd(xtrain_N, ytrain_N, step_size=0.01, batch_size=10);
# +
fig, axes = plt.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(12,3))
axes[0].plot(info_dict['trace_loss_list'], '.-');
axes[0].set_title('loss');
axes[1].plot(info_dict['trace_grad_list'], '.-');
axes[1].set_title('grad');
axes[2].plot(info_dict['trace_w_list'], '.-');
axes[2].set_title('w');
# -
# ### Try with even smaller batch_size of 1
# +
# TODO write code here
# -
# ### Discussion 3b: What happens here with this smaller batch size? Is it converging?
# # Part 5: SGD for MLPClassifier
#
#
# Let's revisit the XOR dataset from our previous lab, and try SGD for it.
def make_xor_dataset(n_per_blob=50, stddev=0.4, random_state=0):
random_state = np.random.RandomState(random_state)
cov_22 = np.square(stddev) * np.eye(2)
x_00 = random_state.multivariate_normal([-1, -1], cov_22, size=n_per_blob)
x_01 = random_state.multivariate_normal([-1, +1], cov_22, size=n_per_blob)
x_10 = random_state.multivariate_normal([+1, -1], cov_22, size=n_per_blob)
x_11 = random_state.multivariate_normal([+1, +1], cov_22, size=n_per_blob)
N = n_per_blob * 4
x_N2 = np.vstack([x_00, x_11, x_01, x_10])
assert x_N2.shape == (N, 2)
y_N = np.hstack([np.ones(N//2), np.zeros(N//2)]).astype(np.int32)
assert y_N.shape == (N,)
# Shuffle the order
perm_ids = random_state.permutation(N)
x_N2 = x_N2[perm_ids].copy()
y_N = y_N[perm_ids].copy()
return x_N2, y_N
x_tr_N2, y_tr_N = make_xor_dataset(n_per_blob=50)
# +
plt.figure(figsize=(4,4))
plt.plot(x_tr_N2[y_tr_N==0,0], x_tr_N2[y_tr_N==0,1], 'rx', label='y=0', mew=2);
plt.plot(x_tr_N2[y_tr_N==1,0], x_tr_N2[y_tr_N==1,1], 'b+', label='y=1', mew=2);
plt.xlabel('x_1');
plt.ylabel('x_2');
plt.legend(bbox_to_anchor=(1.0, 0.5));
# -
# # Setup: Create Utility function for visualizing classifier predictions
#
# You do NOT need to understand the details of this function. We'll just use it as is.
def plot_pretty_probabilities_for_clf(
clf,
ax=None,
x1_grid=(-2.1, 2.1, 50), x2_grid=(-2.1, 2.1, 50),
x1_ticks=[-1, 0, 1], x2_ticks=[-1, 0, 1],
do_show_colorbar=False,
c_ticks=np.asarray([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]),
c_num_levels=100,
):
''' Display predicted probabilities from classifier as color contour plot
Args
----
clf : sklearn object that implements classifier API
ax : matplotlib axis handle, or None
If provided, will use axis handle as primary axis to plot on.
If None, will use the current handle, or make new one as necessary.
x1_grid : tuple-like or array-like
If tuple of length 3, interpreted as args to np.linspace
Otherwise, cast to array and assumed to be a 1d grid of x1 values
x2_grid : tuple-like or array-like
If tuple of length 3, interpreted as args to np.linspace
Otherwise, cast to array and assumed to be a 1d grid of x2 values
'''
# Activate the current axis, if necessary
if ax is None:
cur_ax = plt.gca()
else:
cur_ax = ax
plt.sca(cur_ax)
# Create dense grid of x1 and x2 values
# useful for visualizing probabilities
if isinstance(x1_grid, tuple) and len(x1_grid) == 3:
x1_grid = np.linspace(x1_grid[0], x1_grid[1], x1_grid[2])
if isinstance(x2_grid, tuple) and len(x2_grid) == 3:
x2_grid = np.linspace(x2_grid[0], x2_grid[1], x2_grid[2])
x1_grid = np.asarray(x1_grid).flatten()
x2_grid = np.asarray(x2_grid).flatten()
c_levels = np.linspace(0.0, 1.0, c_num_levels)
# Get regular grid of G x H points, where each point is an (x1, x2) location
G = x1_grid.size
H = x2_grid.size
x1_GH, x2_GH = np.meshgrid(x1_grid, x2_grid)
# Combine the x1 and x2 values into one array
# Flattened into M = G x H rows
# Each row of x_M2 is a 2D vector [x_m1, x_m2]
x_M2 = np.hstack([x1_GH.flatten()[:,np.newaxis], x2_GH.flatten()[:,np.newaxis]])
# Predict proba for each point in the flattened grid
yproba1_M = clf.predict_proba(x_M2)[:,1]
# Reshape the M probas into the GxH 2D field
yproba1_GH = np.reshape(yproba1_M, x1_GH.shape)
# Contour plot
cmap = plt.cm.RdYlBu
my_contourf_h = plt.contourf(
x1_GH, x2_GH, yproba1_GH,
levels=c_levels, vmin=0, vmax=1.0,
cmap=cmap, alpha=0.5)
# Edit the ticks observed
if x1_ticks is not None:
plt.xticks(x1_ticks, x1_ticks);
if x2_ticks is not None:
plt.yticks(x2_ticks, x2_ticks);
if do_show_colorbar:
left, bottom, width, height = plt.gca().get_position().bounds
cax = plt.gcf().add_axes([left + 1.1*width, bottom, 0.03, height])
plt.colorbar(my_contourf_h, orientation='vertical', cax=cax, ticks=c_ticks);
plt.sca(cur_ax);
# ## Try an MLP with 2 hidden units, batch size of 25
#
# This time, we'll use SGD as built in solver
mlp_2hidden_run5 = sklearn.neural_network.MLPClassifier(
hidden_layer_sizes=[2],
activation='relu',
solver='sgd',
learning_rate_init=0.1,
random_state=5,
batch_size=25,
)
# +
# Fit the model to training data
mlp_2hidden_run5.fit(x_tr_N2, y_tr_N)
# -
# ### Visualize the results using our utility function
#
# +
plt.figure(figsize=(4,4))
plot_pretty_probabilities_for_clf(mlp_2hidden_run5, do_show_colorbar=True, ax=plt.gca());
plt.plot(x_tr_N2[y_tr_N==0,0], x_tr_N2[y_tr_N==0,1], 'rx', label='y=0', mew=2);
plt.plot(x_tr_N2[y_tr_N==1,0], x_tr_N2[y_tr_N==1,1], 'b+', label='y=1', mew=2);
# +
### Visualize the trace of the loss
plt.plot(mlp_2hidden_run5.loss_curve_, 'k.-')
# -
# # Try several trials with batch_size of 25
# +
for trial in [1, 2, 3, 4, 5]:
mlp_2hidden_batchsize25 = sklearn.neural_network.MLPClassifier(
hidden_layer_sizes=[2],
activation='relu',
solver='sgd',
learning_rate_init=0.05,
random_state=trial,
batch_size=25,
)
# Fit the model to training data
mlp_2hidden_batchsize25.fit(x_tr_N2, y_tr_N)
# Visualize the trace of the loss
plt.plot(mlp_2hidden_batchsize25.loss_curve_, '.-', label='trial %d' % trial)
plt.legend();
# -
# ### Now try several trials with a batch_size of 1
# +
for trial in [1, 2, 3, 4, 5]:
mlp_2hidden_batchsize1 = sklearn.neural_network.MLPClassifier(
hidden_layer_sizes=[2],
activation='relu',
solver='sgd',
learning_rate_init=0.05,
random_state=trial,
batch_size=1,
)
# Fit the model to training data
mlp_2hidden_batchsize1.fit(x_tr_N2, y_tr_N)
# Visualize the trace of the loss
plt.plot(mlp_2hidden_batchsize1.loss_curve_, '.-', label='trial %d' % trial)
plt.legend();
# -
# ### Discussion 4a: What is happening here?
| labs/day12_StochasticGradientDescent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!pip install numpy scipy matplotlib ipython scikit-learn mglearn sympy pandas pillow
# -
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import pandas as pd
from IPython.display import display
eye = np.eye(4)
print(eye)
sparse_mtx = sparse.csr_matrix(eye)
print(sparse_mtx)
x = np.linspace(-10,10,100)
y = np.sin(x)
plt.plot(x,y,marker='x')
plt.show()
data = {'Name': ["John", "Anna", "Peter", "Linda"], 'Location': ["Nairobi", "Napoli", "London", "Buenos Aires"], 'Age':[51, 21, 34, 45]}
data_pandas = pd.DataFrame(data)
display(data_pandas)
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import pandas as pd
from IPython.display import display
import mglearn
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
x, y = mglearn.datasets.make_wave(n_samples=100)
line = np.linspace(-3,3,1000,endpoint=False).reshape(-1,1)
reg = DecisionTreeRegressor(min_samples_split=3).fit(x,y)
plt.plot(line, reg.predict(line), label="decision tree")
regline = LinearRegression().fit(x,y)
plt.plot(line, regline.predict(line), label= "Linear Rgression")
plt.show()
| doc/Programs/JupyterFiles/Examples/Lecture Examples/Morten Lecture Data Examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seed List Cleanup
#
# Prepare a clean list of seeds (candidates for pseudo-crawls)
# - add columns required to get page locations and metrics from Common Crawl
# - remove duplicated seeds
# - normalize URLs
# +
import pandas as pd
df = pd.read_csv('candidate_websites_for_crawling.csv')
df.head()
# -
# select mandatory columns and assign simple and SQL-compatible column names
df = df.iloc[:, [0,1,2,6]]
df.columns = ['id', 'title', 'link', 'language']
df.head()
df.shape
# +
# normalize URLs and look for obsolete path prefixes
from urllib.parse import urlparse
def normalize_url(url):
if url == 'reddit.com/r/singapore':
url = 'https://www.reddit.com/r/singapore/'
u = urlparse(url)
path = u.path
path = path.replace('//', '/')
# normalize empty path (root path)
if path == '':
path = '/'
# remove trailing file name
if path[-1] != '/' and '.' in path.split('/')[-1]:
path = '/'.join(path.split('/')[:-2])
return '%s://%s%s' % (u.scheme, u.netloc, path)
def get_path_prefix(url):
return urlparse(url).path
df['link'] = df['link'].apply(normalize_url)
df['url_path_prefix'] = df['link'].apply(get_path_prefix)
df['url_path_prefix'].value_counts().to_frame()
# -
# Some path prefixes seem to be mandatory
# - language selectors: `/es/`, `/spanish/`
# - location selectors: `/r/singapore/` (reddit.com)
#
# Others only point to the homepage and would limit the recall to just this page:
# - `/search/label/inicio`, `/pagina/bienvenidos-al-comite-de-sanidad-vegetal-cosave`
#
# For now: we keep only prefixes up to 16 characters. However, clean curated URL prefixes might improve the data set in future runs.
# +
def normalize_path_prefix(url):
u = urlparse(url)
path = u.path
if len(path) > 16:
path = '/'
return '%s://%s%s' % (u.scheme, u.netloc, path)
df['link'] = df['link'].apply(normalize_path_prefix)
df['url_path_prefix'] = df['link'].apply(get_path_prefix)
df['url_path_prefix'].value_counts().to_frame()
# +
# add columns required to get the counts from Common Crawl
import surt
import tldextract
def get_host(url):
return urlparse(url).netloc.lower().lstrip('.')
def get_surtkey(url):
return surt.surt(url)
def get_registered_domain(host):
return tldextract.extract(host).registered_domain
df['url_host_name'] = df['link'].apply(get_host)
df['url_host_registered_domain'] = df['url_host_name'].apply(get_registered_domain)
df['url_surtkey'] = df['link'].apply(get_surtkey)
df.head()
# -
# look for duplicates
df[df.duplicated(subset=['url_surtkey'], keep=False)]
# deduplicate
df.drop_duplicates(subset=['url_surtkey'], inplace=True)
df.shape
# export the clean seed list
df.to_csv('seeds.csv', index=False)
df.to_parquet('seeds.gz.parquet', compression='gzip', index=False)
| cc_pseudo_crawl/sourcing_sheet_seeds/cleanup-seeds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solution Notebook
# ## Problem: Implement Count sort.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Pythonic-Code](#Pythonic-Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Is a naive solution sufficient (ie not in-place)?
# * Yes
# * Are duplicates allowed?
# * Yes
# * Can we assume the input is valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None -> Exception
# * Empty input -> []
# * One element -> [element]
# * Two or more elements
# ## Algorithm
#
# Wechat's animation:
# 
#
#
# Complexity:
# * Time: O(n + k) average, best, worst
# * Space: O(n + k)
#
#
# See [Countsort on wikipedia](https://zh.wikipedia.org/wiki/%E8%AE%A1%E6%95%B0%E6%8E%92%E5%BA%8F):
#
#
# See: [deep understand Countsort](https://mp.weixin.qq.com/s?__biz=MzUyNjQxNjYyMg==&mid=2247484043&idx=1&sn=3743a5e3f79fba00ca794704e0c5a1ba&chksm=fa0e6d0acd79e41c1fbbbc976b1ec390cc9345252984914abd662454675aa7dcaf49c3e125e1&scene=21#wechat_redirect)
# ## Code
# +
from __future__ import division
class CountSort(object):
def sort(self, data):
if data is None:
raise TypeError('data cannot be None')
if len(data)<=1:
return data
len_data = len(data)
max_digit = max(data)
min_digit = min(data)
# 新建一个新的list,index的元素用来计录某元素出现的次数
buckets = [0] * (max_digit + 1 - min_digit)
for d in range(len_data):
index = data[d] - min_digit
buckets[index] += 1
new_data = []
for j in range(len(buckets)):
number = buckets[j]
# 该元素出现的次数,放到输出list中
while number>0:
new_data.append(min_digit+j)
number -= 1
return new_data
# -
# ## Unit Test
#
#
# +
# %%writefile test_count_sort.py
from nose.tools import assert_equal, assert_raises
class TestCountSort(object):
def test_count_sort(self):
count_sort = CountSort()
print('None input')
assert_raises(TypeError, count_sort.sort, None)
print('Empty input')
assert_equal(count_sort.sort([]), [])
print('One element')
assert_equal(count_sort.sort([5]), [5])
print('Two or more elements, have negative')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(count_sort.sort(data), sorted(data))
print('Success negative data: test_count_sort\n')
print('Two or more elements')
data1 = [5, 1, 7, 2, 6, 3, 5, 7, 10]
assert_equal(count_sort.sort(data1), sorted(data1))
print('Success: test_count_sort\n')
def main():
test = TestCountSort()
test.test_count_sort()
if __name__ == '__main__':
main()
# -
# %run -i test_count_sort.py
| sorting_searching/counting_sort/counting_sort_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Reading data
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
csv_file='data/sonar_data.csv'
# csv_file='data/sonar_data_standing.csv'
col_names = ["time", "seq", "stamp", "frame_id", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9"]
df = pd.read_csv(csv_file, names=col_names, header=0, usecols=range(4, 14))
df = df.applymap(lambda x: np.nan if x == -1 else x) # Set -1 to NaN
df
# -
df.describe()
df["s0"]
# ### Some other calculation not necessary
# +
# dfm = pd.DataFrame() # Max delta values
# SONAR_COUNT = 10
# for i in range(SONAR_COUNT):
# dfm.loc[0, f"maxdy{i}"] = max(dy)
# dfm
# -
# ### Filtering data
# +
# # %matplotlib notebook
# %matplotlib inline
dfd = pd.DataFrame() # Delta data
dff = pd.DataFrame() # Filtered data
dfff = pd.DataFrame() # Filtered filtered data
RANGE_THRESH = 10
SONAR_COUNT = 10
for i in range(SONAR_COUNT):
sname = f"s{i}"
sonar = df[sname]
y=list(sonar)
dy = [y[n]-y[n-1] for n in range(len(y))]
yf = [j if dj < RANGE_THRESH else float('nan') for j, dj in zip(y, dy)]
# yf = [dj * 0.1 + j*0.9 if dj < RANGE_THRESH else float('nan') for j, dj in zip(y, dy)]
yff = [0.5 * yf[n] + 0.5 * yf[n-1] for n in range(len(yf))]
dfd.loc[:, sname] = dy
dff.loc[:, sname] = yf
dfff.loc[:, sname] = yff
print(dff)
print(dfff)
# sy = list(df[sname].rolling(window=2).mean())
# dsy = [sy[n]-sy[n-1] for n in range(len(sy))]
# if True:
# a = 0
# b = min(a + 200, df.s0.count())
# x = x[a:b]
# y = y[a:b]
# dy = dy[a:b]
# sy = sy[a:b]
# dsy = dsy[a:b]
# y = [j if dj < 3 else float('nan') for j, dj in zip(y, dy)]
# -
# ### Plotting data
# +
# # %matplotlib notebook
# %matplotlib inline
dfm = pd.DataFrame()
SONAR_COUNT = 10
for i in range(SONAR_COUNT):
sname = f"s{i}"
sonar = df[sname]
# x=range(len(sonar))
# y=list(sonar)
# dy = [y[n]-y[n-1] for n in range(len(y))]
# sy = list(df[sname].rolling(window=2).mean())
# sy = y
# dsy = [sy[n]-sy[n-1] for n in range(len(sy))]
if True:
a = 1750
b = min(a + 500, df.s0.count())
# y[50:100] = [np.nan, ] * 50
# y = np.array(y)
# dy = np.array(dy)
# y[dy > 7] = np.nan
# y = [j if dj < 3 else float('nan') for j, dj in zip(y, dy)]
plt.figure(figsize=(15, 40))
plt.subplot(SONAR_COUNT * 2, 1, 2 * i + 1)
plt.title(f"Data {sname}")
plt.xlim(a, b)
# plt.plot(dfff.index[a:b], dfff[sname][a:b], "b")
plt.plot(df.index[a:b], df[sname][a:b], "k--")
plt.plot(dff.index[a:b], dff[sname][a:b], "g")
# plt.scatter(df.index[a:b], df[sname][a:b])
# plt.scatter(dff.index[a:b], dff[sname][a:b], color="g")
plt.subplot(SONAR_COUNT * 2, 1, 2 * i + 2)
plt.title(f"Delta {sname}")
plt.xlim(a, b)
# plt.plot(dfd.index[a:b], dfd[sname][a:b])
plt.plot(dff.index[a:b], dff[sname][a:b], color="b")
plt.tight_layout(pad=0.8)
plt.show()
# a = input()
dfm
| Python/jupyter/sonar_outliers_reaserch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# fix python path if working locally
from utils import fix_pythonpath_if_working_locally
fix_pythonpath_if_working_locally()
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from darts import TimeSeries
from darts.models import NBEATSModel
from darts.dataprocessing.transformers import Scaler, MissingValuesFiller
from darts.metrics import mape, r2_score
# -
def display_forecast(pred_series, ts_transformed, forecast_type, start_date=None):
plt.figure(figsize=(8,5))
if (start_date):
ts_transformed = ts_transformed.drop_before(start_date)
ts_transformed.univariate_component(0).plot(label='actual')
pred_series.plot(label=('historic ' + forecast_type + ' forecasts'))
plt.title('R2: {}'.format(r2_score(ts_transformed.univariate_component(0), pred_series)))
plt.legend();
df = pd.read_csv('energy_dataset.csv', delimiter=",")
df['time'] = pd.to_datetime(df['time'], utc=True)
df['time']= df.time.dt.tz_localize(None)
df_day_avg = df.groupby(df['time'].astype(str).str.split(" ").str[0]).mean().reset_index()
filler = MissingValuesFiller()
scaler = Scaler()
series = scaler.fit_transform(filler.transform(
TimeSeries.from_dataframe(df_day_avg, 'time', ['generation hydro run-of-river and poundage'])
))
series.plot()
train, val = series.split_after(pd.Timestamp('20170901'))
# ### General model
model_nbeats = NBEATSModel(
input_chunk_length=30,
output_chunk_length=7,
generic_architecture=True,
num_stacks=10,
num_blocks=1,
num_layers=4,
layer_widths=512,
n_epochs=100,
nr_epochs_val_period=1,
batch_size=800,
model_name='nbeats_run'
)
model_nbeats.fit(train, val_series=val, verbose=True)
pred_series = model_nbeats.historical_forecasts(
series,
start=pd.Timestamp('20170901'),
forecast_horizon=7,
stride=5,
retrain=False,
verbose=True
)
display_forecast(pred_series, series['0'], '7 day', start_date=pd.Timestamp('20170901'))
# ### Interpretable model
model_nbeats = NBEATSModel(
input_chunk_length=30,
output_chunk_length=7,
generic_architecture=False,
num_blocks=3,
num_layers=4,
layer_widths=512,
n_epochs=100,
nr_epochs_val_period=1,
batch_size=800,
model_name='nbeats_interpretable_run'
)
model_nbeats.fit(series=train, val_series=val, verbose=True)
pred_series = model_nbeats.historical_forecasts(
series,
start=pd.Timestamp('20170901'),
forecast_horizon=7,
stride=5,
retrain=False,
verbose=True
)
display_forecast(pred_series, series['0'], '7 day', start_date=pd.Timestamp('20170901'))
| examples/08-NBEATS-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# lime_explain.py
# sudo -E /opt/tljh/user/bin/pip3 install lime
# conda activate jupyterlab-debugger38
# -
from lime.lime_image import *
# import lime
import pandas as pd
import yaml #pyyaml
import os
import datetime
import dill
import cv2 #opencv-python
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import sys
#sys.path.append('/home/ubuntu/covid-cxr/src/')
sys.path.insert(0, os.path.abspath('../'))
from visualization.visualize import visualize_explanation
from predict import predict_instance, predict_and_explain
from data.preprocess import remove_text
def setup_lime():
'''
Load relevant information and create a LIME Explainer
:return: dict containing important information and objects for explanation experiments
'''
# Load relevant constants from project config file
cfg = yaml.full_load(open("/home/ubuntu/covid-cxr/config.yml", 'r'))
lime_dict = {}
lime_dict['NUM_SAMPLES'] = cfg['LIME']['NUM_SAMPLES']
lime_dict['NUM_FEATURES'] = cfg['LIME']['NUM_FEATURES']
lime_dict['IMG_PATH'] = cfg['PATHS']['IMAGES']
lime_dict['RAW_DATA_PATH'] = cfg['PATHS']['RAW_DATA']
lime_dict['IMG_DIM'] = cfg['DATA']['IMG_DIM']
lime_dict['PRED_THRESHOLD'] = cfg['PREDICTION']['THRESHOLD']
lime_dict['CLASSES'] = cfg['DATA']['CLASSES']
lime_dict['CLASS_MODE'] = cfg['TRAIN']['CLASS_MODE']
lime_dict['COVID_ONLY'] = cfg['LIME']['COVID_ONLY']
KERNEL_WIDTH = cfg['LIME']['KERNEL_WIDTH']
FEATURE_SELECTION = cfg['LIME']['FEATURE_SELECTION']
# Load train and test sets
lime_dict['TRAIN_SET'] = pd.read_csv(cfg['PATHS']['TRAIN_SET'])
lime_dict['TEST_SET'] = pd.read_csv(cfg['PATHS']['TEST_SET'])
# Create ImageDataGenerator for test set
test_img_gen = ImageDataGenerator(preprocessing_function=remove_text,
samplewise_std_normalization=True, samplewise_center=True)
test_generator = test_img_gen.flow_from_dataframe(dataframe=lime_dict['TEST_SET'], directory=cfg['PATHS']['RAW_DATA'],
x_col="filename", y_col='label_str', target_size=tuple(cfg['DATA']['IMG_DIM']), batch_size=1,
class_mode='categorical', validate_filenames=False, shuffle=False)
lime_dict['TEST_GENERATOR'] = test_generator
# Define the LIME explainer
lime_dict['EXPLAINER'] = LimeImageExplainer(kernel_width=KERNEL_WIDTH, feature_selection=FEATURE_SELECTION,
verbose=True)
dill.dump(lime_dict['EXPLAINER'], open(cfg['PATHS']['LIME_EXPLAINER'], 'wb')) # Serialize the explainer
# Load trained model's weights
lime_dict['MODEL'] = load_model(cfg['PATHS']['MODEL_TO_LOAD'], compile=False)
return lime_dict
# +
def explain_xray(lime_dict, idx, save_exp=True):
'''
Make a prediction and provide a LIME explanation
:param lime_dict: dict containing important information and objects for explanation experiments
:param idx: index of image in test set to explain
:param save_exp: Boolean indicating whether to save the explanation visualization
'''
# Get i'th preprocessed image in test set
lime_dict['TEST_GENERATOR'].reset()
for i in range(idx + 1):
x, y = lime_dict['TEST_GENERATOR'].next()
x = np.squeeze(x, axis=0)
x
# Get the corresponding original image (no preprocessing)
orig_img = cv2.imread(lime_dict['RAW_DATA_PATH'] + lime_dict['TEST_SET']['filename'][idx])
new_dim = tuple(lime_dict['IMG_DIM'])
orig_img = cv2.resize(orig_img, new_dim, interpolation=cv2.INTER_NEAREST) # Resize image
# Make a prediction for this image and retrieve a LIME explanation for the prediction
start_time = datetime.datetime.now()
#explanation = explainer.explain_instance(images[0].astype('double') # added double for tensorflow2_latest_p37
explanation, probs = predict_and_explain(x.astype('double'), lime_dict['MODEL'], lime_dict['EXPLAINER'],
lime_dict['NUM_FEATURES'], lime_dict['NUM_SAMPLES'])
print("Explanation time = " + str((datetime.datetime.now() - start_time).total_seconds()) + " seconds")
# Get image filename and label
img_filename = lime_dict['TEST_SET']['filename'][idx]
label = lime_dict['TEST_SET']['label'][idx]
# Rearrange prediction probability vector to reflect original ordering of classes in project config
probs = [probs[0][lime_dict['CLASSES'].index(c)] for c in lime_dict['TEST_GENERATOR'].class_indices]
# Visualize the LIME explanation and optionally save it to disk
if save_exp:
file_path = lime_dict['IMG_PATH']
else:
file_path = None
if lime_dict['COVID_ONLY'] == True:
label_to_see = lime_dict['TEST_GENERATOR'].class_indices['COVID-19']
else:
label_to_see = 'top'
_ = visualize_explanation(orig_img, explanation, img_filename, label, probs, lime_dict['CLASSES'], label_to_see=label_to_see,
dir_path=file_path)
return
# +
if __name__ == '__main__':
lime_dict = setup_lime()
i = 3 # Select i'th image in test set
explain_xray(lime_dict, i, save_exp=True) # Generate explanation for image
# -
| src/interpretability/lime_explain.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Laden von böntigeten Softwarebibliotheken und Festlegung von Grundeinstellungen
# +
# Importieren von Bibliotheken die benötigt werden
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# %matplotlib inline
import numpy as np
import pandas as pd
import pandas_profiling
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
import time
# Definiere Farben für Schaubilder
fh_teal = '#179c7d'
fh_orange = '#f29400'
fh_blue = '#1f82c0'
fh_red = '#e2001a'
fh_lightgreen = '#b1c800'
fh_beige = '#feefd6'
fh_grey = '#e1e3e3'
# Globale Einstellung für Schriftgröße, Farben usw. in Schaubildern
fh_palette = [fh_teal, fh_orange, fh_blue, fh_red, fh_lightgreen, fh_beige, fh_grey]
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
sns.set_palette(fh_palette)
params = {'legend.fontsize': 22,
'figure.figsize': (20, 14),
'axes.labelsize': 22,
'axes.titlesize': 26,
'xtick.labelsize': 20,
'ytick.labelsize': 20}
mpl.rcParams.update(params)
# + [markdown] heading_collapsed=true
# # Initiale Exploration der Daten
# + hidden=true
# Festlegung der Namen der einzenlnen Merkmale (Spalten)
index = ['engine#','tCycles']
settings = ['setting1','setting2','setting3']
sensors = ['sensor' + str(i) for i in range(1,24)]
col_names = index + settings + sensors
# + hidden=true
# Laden der Trainingsdaten
df_train = pd.read_csv('./CMAPSSData/train_FD001.txt',sep=' ', names=col_names)
# -
# ---
# ---
# ---
# # Aufgabe 1
# Nicht alle Sensoren liefern sinnvolle Informationen. Welche Sensoren sind das und sollten von der weiteren Analyse ausgeschlossen werden?
# Hier können die Ergebnisse von Aufgabe 1.1 genommen werden. Die Daten und Sensoren können in den nächsten beiden Zeellen noch einmal betrachtet werden.
# Ein Blick auf die Trainingsdaten. Jede Zeile ist eine Beobachtung, jede Spalte ein Merkmal.
# Der Wert NaN (Abkürzung für: not a number) bedeutet, dass an dieser Stelle der Wert fehlt.
df_train
# Funktion, mit der in einem Schaubild der Sensorverlauf von mehreren Engines
# dargestellt werden kann
def plot_sensor(sensor_name):
plt.figure(figsize=(13,5))
for i in df_train['engine#'].unique():
if (i % 20 == 0):
plt.plot(np.arange(0, df_train[df_train['engine#']==i].shape[0]),
sensor_name,
data=df_train[df_train['engine#']==i])
plt.xlim(250, 0)
plt.xticks(np.arange(0, 275, 25))
plt.ylabel(sensor_name)
plt.xlabel('Remaining Useful Life')
plt.show()
# In der nächsten Zelle kann noch einmal der Verlauf von Sensoren dargestellt werden. Hierfür für <font color='darkred'>engine#</font> durch den Namen eines anderen Merkmals ersetzen (Spalten in der Tabelle oberhalb). Die <font color='darkred'>' '</font> müssen stehen bleiben.
#
# Erstellen eines Schaubilds
plot_sensor('engine#')
# # Ende Aufgabe 1
# ---
# ---
# ---
# In der nächsten Zelle werden alle Merkmale, die keine Informationen oder Mehrwert liefern gelöscht. Dabei werden alle Merkmale (Spalten) gelöscht, die in der zweiten und dritten Zeile in der Zelle unterhalb stehen.
# Löschen von Merkmalen (Spalten), die keine Information beinhalten
drop_cols = ['setting3', 'sensor1', 'sensor5', 'sensor6', 'sensor10',
'sensor16', 'sensor18', 'sensor19', 'sensor22', 'sensor23']
df_train.drop(drop_cols,axis=1, inplace=True)
# # Feature engineering / Merkmale konstruieren
# ### Erstellung der Restlaufdauer (Remaining Useful Lifetime, RUL)
# +
# Definition der remaining useful lifetime (RUL)
def add_remaining_useful_life(df):
# Erhalte die maximale Anzahl von Durchläufen pro Triebwerk (engine)
grouped_by_unit = df_train.groupby(by="engine#")
max_cycle = grouped_by_unit["tCycles"].max()
# Erstelle eine neue Spalte "max_cycle", die die maximale Anzahl an Zyklus für jedes Triebwerk beinmhaltet
result_frame = df_train.merge(max_cycle.to_frame(name='max_cycle'), left_on='engine#', right_index=True)
# Berechne die RUL für jede Beobachtung (Zeile)
remaining_useful_life = result_frame["max_cycle"] - result_frame["tCycles"]
result_frame["RUL"] = remaining_useful_life
# Lösche die Spalte "max_cycle", da sie nicht Länger benötigt wird
result_frame = result_frame.drop("max_cycle", axis=1)
return result_frame
df_train = add_remaining_useful_life(df_train)
# -
# Zeige die Testdatenmatrix mit dem neuen Merkmal "RUL"
df_train
# + hidden=true
# Laden der Testdaten
df_test = pd.read_csv("./CMAPSSData/test_FD001.txt",sep=" ", names=col_names)
df_test.drop(drop_cols,axis=1, inplace=True)
df_test
# + hidden=true
def evaluate(y_true, y_hat, label='test'):
mse = mean_squared_error(y_true, y_hat)
rmse = np.sqrt(mse)
variance = r2_score(y_true, y_hat)
print('{} set RMSE:{}, R2:{}'.format(label, rmse, variance))
# -
# ### Erstelln von Trainings- und Testdatensatz
# Die Testdaten und die Trainingsdaten werden jeweils in zwei Teile Aufgeteilt. Dabei ist X_train bzw. X_test immer der Datensatz, den der Algorithmus als Input bekommt und y_train bzw. y_test ist das Label, welches vom Algorithmus vorausgesagt werden soll.
# + hidden=true
# Erstelln von Trainings- und Testdatensatz
X_train = df_train.copy()
y_train = X_train.pop('RUL')
X_test = df_test.copy()
X_test = df_test.groupby('engine#').last().reset_index()
y_test = pd.read_csv(('./CMAPSSData/RUL_FD001.txt'), sep='\s+', header=None, names=['RUL'])
# + [markdown] hidden=true
# # Modellierung
# + [markdown] hidden=true
# #### Importiere benötigte Bibliotheken und Funktionen für die Modellierung
# + hidden=true
import tensorflow.keras as tfk
from sklearn import linear_model
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import MinMaxScaler
# -
# #### Normalisieren der Daten
# Die Normalisierung der Daten wird für einige Algorithmen vorausgesetzt.
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# + [markdown] heading_collapsed=true hidden=true
# #### Definition der Loss Function
# Mit Hilfe dieser Funktion werden im folghenden die Algorithmen optimiert und können anhand der Größe des Fehler verglichen werden
# + hidden=true
def root_mean_squared_error(y_true, y_pred):
return tfk.backend.sqrt(tfk.backend.mean(tfk.backend.square(y_pred - y_true)))
# -
# ## Zum Start ein einfaches Modell / ein einfacher Algorithmus: Lineare Regression
# Definition des Modells / Algorithmus
model_linear = LinearRegression()
# Training des Modells / Algorithmus
model_linear.fit(X_train_scaled, y_train)
# +
# Model Evaluation
y_hat_train = model_linear.predict(X_train_scaled)
evaluate(y_train, y_hat_train, 'train')
y_hat_test = model_linear.predict(X_test_scaled)
evaluate(y_test, y_hat_test)
# -
# ---
# ---
# ---
# # Aufgabe 2
# Der Fehler für das Trainingsset ist "train set RMSE", der Fehler des Testsets ist "test set RMSE". Es fällt auf, dass der Testfehler kleiner ist als der Trainingsfehler. Im Allgemeinen sollte dies genau umgekehrt sein.
# <br>
# <br>
# Versuchen Sie folgende Fragen zu beantworten:
# <br>
# Warum ist der Trainingsfehler normalerweise kleiner als der Testfehler?
# # Ende Aufgabe 2
# ---
# ---
# ---
# # Genaue Betrachtung des RUL
# ---
# ---
# ---
# # Aufgabe 3
# Genaue Betrachtung der Restnutzungsdauer (RUL), die wie vorhin selbst erstellt haben.
# <br>
# Was fällt bei dieser auf?
# <br>
# Worin könnten Probleme liegen (Trainingsdaten vs. Testdaten)?
# +
# Erstellen zweier Schaubilder. Diese zeigen die Verteilung der RUL im Trainings- und im Testdatensatz
df_max_rul = df_train[['engine#', 'RUL']].groupby('engine#').max().reset_index()
fig = plt.figure(tight_layout=True)
ax = fig.add_subplot(211)
df_max_rul['RUL'].hist(ax=ax, bins=range(25, 350, 20))
plt.ylabel('Häufigkeit')
plt.title('Trainingsdaten')
ax2 = fig.add_subplot(212)
y_test['RUL'].hist(ax=ax2, bins=range(25, 350, 20))
plt.xlabel('RUL')
plt.ylabel('Häufigkeit')
plt.title('Testdaten')
plt.show()
# +
# Erstellung eines Schabilds, welches die RUL und einen beispielhaften Sensor für ein Triebwerk zeigt
fig, ax1 = plt.subplots(1,1, figsize=(13,5))
unit20_RUL = np.array(df_train.loc[df_train['engine#']==20, 'RUL'])
unit20_sensor12 = np.array(df_train.loc[df_train['engine#']==20, 'sensor12'])
signal = ax1.plot(unit20_RUL, unit20_sensor12, color=fh_blue)
plt.xlim(250, 0)
plt.xticks(np.arange(0, 275, 25))
ax1.set_ylabel('Sensor 12', labelpad=20)
ax1.set_xlabel('RUL', labelpad=20)
ax2 = ax1.twinx()
rul_line = ax2.plot(unit20_RUL, unit20_RUL, 'k', linewidth=4)
ax2.set_ylabel('RUL', labelpad=20)
ax2.set_ylim(0, 250)
ax2.set_yticks(
np.linspace(ax2.get_ybound()[0], ax2.get_ybound()[1], 6))
ax1.set_yticks(
np.linspace(ax1.get_ybound()[0], ax1.get_ybound()[1], 6))
lines = signal+rul_line
labels = ['sensor12', 'RUL']
ax1.legend(lines, labels, loc=0)
plt.show()
# -
# # Ende Aufgabe 3
# ---
# ---
# ---
# # Erstellung einer besseren / optimierten Restlaufdauer
# Berechnung der optimierten Restlaufdauer RUL
y_train_optimized = y_train.clip(upper=125)
# +
# Erstellung eines Schabilds, welches die RUL, die optimierte RUL und einen beispielhaften Sensor für ein Triebwerk zeigt
fig, ax1 = plt.subplots(1,1, figsize=(13,5))
signal = ax1.plot(unit20_RUL, unit20_sensor12, color=fh_blue)
rul = df_train.loc[df_train['engine#']==20, 'RUL']
unit20_RUL_optimized = unit20_RUL.copy()
unit20_RUL_optimized[unit20_RUL_optimized >= 125] = 125
plt.xlim(250, 0)
plt.xticks(np.arange(0, 275, 25))
ax1.set_ylabel('Sensor 12', labelpad=20)
ax1.set_xlabel('engine', labelpad=20)
ax2 = ax1.twinx()
rul_line = ax2.plot(unit20_RUL, unit20_RUL, 'k', linewidth=4)
rul_line2 = ax2.plot(unit20_RUL, unit20_RUL_optimized, '--', linewidth=4, color=fh_teal)
ax2.set_ylim(0, 250)
ax2.set_yticks(
np.linspace(ax2.get_ybound()[0], ax2.get_ybound()[1], 6))
ax1.set_yticks(
np.linspace(ax1.get_ybound()[0], ax1.get_ybound()[1], 6))
lines = signal+rul_line+rul_line2
labels = ['Sensor 12', 'RUL', 'Optimierte RUL']
ax1.legend(lines, labels, loc=0)
# -
# #### Lineare Regression mit optimiertem RUL
# +
# Model Definition
lm = LinearRegression()
# Model Training
lm.fit(X_train_scaled, y_train_optimized)
# Model Evaluation
y_hat_train = lm.predict(X_train_scaled)
evaluate(y_train_optimized, y_hat_train, 'train')
y_hat_test = lm.predict(X_test_scaled)
evaluate(y_test, y_hat_test)
# -
# # Weitere Modelle
# + [markdown] heading_collapsed=true
# ## Model Definition
# + [markdown] hidden=true
# ### Definition eines Random Forest
# + hidden=true
model_forest = RandomForestClassifier(n_estimators=10)
# + [markdown] heading_collapsed=true hidden=true
# ### Definition eines Neural Network (Input Layer und Output Layer)
# + hidden=true
inputs = tfk.Input(shape=(18,))
outputs = tfk.layers.Dense(1, activation=tfk.activations.relu)(inputs)
model_nn = tfk.Model(inputs=inputs, outputs=outputs)
model_nn.compile(optimizer='rmsprop',loss = root_mean_squared_error)
# + [markdown] hidden=true
# ### Definition eines Tiefen Neuronalen Netzes (Deep Neural Network; Input Layer, 3 Hidden Layer, Output Layer)
# + hidden=true
inputs = tfk.Input(shape=(18,))
x = tfk.layers.Dense(18,
activation=tfk.activations.relu,
kernel_initializer='glorot_normal',
activity_regularizer=tfk.regularizers.l1(0.00002))(inputs)
x = tfk.layers.Dense(70,
activation=tfk.activations.relu,
kernel_initializer='glorot_normal',
activity_regularizer=tfk.regularizers.l1(0.00002))(x)
x = tfk.layers.Dense(60,
activation=tfk.activations.relu,
kernel_initializer='glorot_normal',
activity_regularizer=tfk.regularizers.l1(0.00002))(x)
x = tfk.layers.Dense(50,
activation=tfk.activations.relu,
kernel_initializer='glorot_normal',
activity_regularizer=tfk.regularizers.l1(0.00002))(x)
x = tfk.layers.Dense(40,
activation=tfk.activations.relu,
kernel_initializer='glorot_normal',
activity_regularizer=tfk.regularizers.l1(0.00002))(x)
x = tfk.layers.Dense(10,
activation=tfk.activations.relu,
kernel_initializer='glorot_normal',
activity_regularizer=tfk.regularizers.l1(0.00002))(x)
outputs = tfk.layers.Dense(1,
activation=tfk.activations.relu,
activity_regularizer=tfk.regularizers.l1(0.00002))(x)
model_deep_nn = tfk.Model(inputs=inputs, outputs=outputs)
model_deep_nn.compile(optimizer=tfk.optimizers.RMSprop(learning_rate=0.01),
loss = root_mean_squared_error,
metrics=[root_mean_squared_error])
# + [markdown] hidden=true
# #### Definition einer Support Vector Maschine (SVM)
# + hidden=true
supVecMac = svm.SVR(kernel='linear')
# + [markdown] heading_collapsed=true
# # Model Training
# + [markdown] hidden=true
# ### Training des Random Forest
# + hidden=true
model_forest.fit(X_train_scaled, y_train_optimized)
y_pred = model_forest.predict(X_train_scaled)
print('Trainingsfehler (RMSE):', np.sqrt(metrics.mean_squared_error(y_train_optimized, y_pred)))
# + [markdown] heading_collapsed=true hidden=true
# ### Training des Neural Network
# + hidden=true
model_nn.fit(X_train_scaled, y_train_optimized, batch_size=10, epochs=10)
# + [markdown] hidden=true
# ### Training des Deep Neural Networks
# + hidden=true
model_deep_nn.fit(X_train_scaled, y_train_optimized, batch_size=10, epochs=10)
# + [markdown] heading_collapsed=true hidden=true
# ### Training der Support Vector Machine
# + hidden=true
supVecMac.fit(X_train_scaled, y_train_optimized)
y_pred = supVecMac.predict(X_train_scaled)
print('Trainingsfehler (RMSE):', np.sqrt(metrics.mean_squared_error(y_train_optimized, y_pred)))
# + [markdown] heading_collapsed=true
# # Model Evaluation & vergleich aller Modelle
# +
model_linear = linear_model.LinearRegression()
model_linear.fit(X_train_scaled, y_train_optimized)
models = [model_linear, model_forest, model_nn, model_deep_nn, supVecMac]
metric = metrics.mean_squared_error
metric_per_model = []
for model in models:
prediction = model.predict(X_test_scaled)
cost = metric(y_test, prediction)
cost = np.sqrt(cost)
metric_per_model.append(cost)
# + hidden=true
x = np.arange(len(metric_per_model))
model_names = ['Lineare \n Regression', 'Random Forest', 'Neural \n Network', 'Deep Neural \n Network', 'Support \n Vector Machine']
fig, ax = plt.subplots()
bar = ax.bar(x, metric_per_model, .5)
plt.xticks(x,[model for model in model_names])
for rect in bar:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., .2 + height,
'%8.2f' % height,
ha='center', va='bottom',
fontsize=20)
plt.title('Testfehler verschiedener Algorithmen')
plt.ylabel('Testfehler (RMSE)');
# + [markdown] hidden=true
# # Hyperparameter-Optimierung (Hyperparamter tunining)
# -
# ---
# ---
# ---
# # Aufgabe 4
# Welches sind die optimaten Hyperparamter für einen Random Forest?
# <br>
# Versuche verschiedene Hyperparametr aus. Dabei können in der nächsten Zelle die folgenden Hyperparameter angepasst werden:
# <br>
# <b>n_estimator:</b> Anzahl der Bäume (ganze Zahl zwischen 2 und 100. Je höher die Zahl, desto länger dauert die Berechnung)
# <br>
# <b>max_depth:</b> Maximale Tiefe eines Baumes (ganze Zahl zwischen 2 und 15 oder None, falls keine maximale Tiefe vorgegebn werden soll)
# <br>
# <b>max_features:</b> Maximale Anzhal der Merkmale, die für das erstellen eines Baumes zufällig ausgewählt wertden (ganze Zahl zwischen 2 und 15 oder “auto”, “sqrt”, “log2”. Der Standardwert ist sqrt(n_features), die Quadratwurzel der Anzahl der Merkmale)
# <br>
# Es gibt noch viele weitere...
# Definition des Random Forests
model_forest = RandomForestClassifier(n_estimators=10, max_depth=None, max_features='sqrt')
# Training des Random Forests
model_forest.fit(X_train_scaled, y_train_optimized)
y_pred = model_forest.predict(X_train_scaled)
print('Trainingsfehler (RMSE):', np.sqrt(metrics.mean_squared_error(y_train_optimized, y_pred)))
# # Ende Aufgabe 4
# ---
# ---
# ---
# # Zufällige Suche (Random Search) der optimalen Hyperparamter für einen Random Forest
# +
# Erstellen der Random Search mit Angabe, welche Hyperparameter mit welchen werten getestet werden sollen
def Random_Search_CV_RFR(X_train, y_train):
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor()
param_grid = {
"n_estimators" : [10, 25, 50, 100, 250, 500],
"max_features" : ["sqrt", "log2"],
"min_samples_split" : [2,4,8,16, 32],
"bootstrap": [True, False]
}
random_params = RandomizedSearchCV(estimator, param_grid, n_iter=20, n_jobs=-1, cv=5)
random_params.fit(X_train, y_train)
return random_params.best_score_ , random_params.best_params_
def RFR(X_train, X_test, y_train, y_test, best_params):
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor(n_jobs=-1).set_params(**best_params)
estimator.fit(X_train,y_train)
y_predict = estimator.predict(X_test)
return y_test,y_predict
# -
# Durchführen der Random Search für den Random Forest
best_score, best_params = Random_Search_CV_RFR(X_train_scaled, y_train_optimized)
y_test , y_predict = RFR(X_train_scaled, X_test_scaled, y_train_optimized, y_test, best_params)
print("Best params:",best_params)
# ### Der Random Forest mit den optimalen Hyperparametern
# Der Random Forest mit den optimalen Hyperparametern wird nun noch einmal mit allen Daten trainiert
model_forest2 = RandomForestClassifier(bootstrap=True, max_features='log2', n_estimators=100)
model_forest2.fit(X_train_scaled, y_train_optimized)
y_pred = model_forest2.predict(X_train_scaled)
# Testfehler des Random Forest mit den optimalen Hyperparametern
y_pred = model_forest2.predict(X_test_scaled)
print('Testfehler (RMSE):', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# # Vergleich aller Modelle
# +
models2 = [model_linear, model_forest, model_forest2, model_nn, model_deep_nn, supVecMac]
metric = metrics.mean_squared_error
metric_per_model2 = []
for model in models2:
prediction = model.predict(X_test_scaled)
cost = metric(y_test, prediction)
cost = np.sqrt(cost)
metric_per_model2.append(cost)
# +
x = np.arange(len(metric_per_model2))
model_names2 = ['Lineare \n Regression', 'Random Forest', 'Optimierter \n Random Forest', 'Neural \n Network', 'Deep Neural \n Network', 'Support \n Vector Machine']
fig, ax = plt.subplots()
bar = ax.bar(x, metric_per_model2, .5)
plt.xticks(x,[model for model in model_names2])
for rect in bar:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., .2 + height,
'%8.2f' % height,
ha='center', va='bottom',
fontsize=20)
plt.title('Testfehler verschiedener Algorithmen')
plt.ylabel('Testfehler (RMSE)');
| NASA Turbofan Engines - Data Analysis, Preparation & Modelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Extract Datasets and Establish Benchmark
#
# **Learning Objectives**
# - Divide into Train, Evaluation and Test datasets
# - Understand why we need each
# - Pull data out of BigQuery and into CSV
# - Establish Rules Based Benchmark
#
# ## Introduction
# In the previous notebook we demonstrated how to do ML in BigQuery. However BQML is limited to linear models.
#
# For advanced ML we need to pull the data out of BigQuery and load it into a ML Framework, in our case TensorFlow.
#
# While TensorFlow [can read from BigQuery directly](https://www.tensorflow.org/api_docs/python/tf/contrib/cloud/BigQueryReader), the performance is slow. The best practice is to first stage the BigQuery files as .csv files, and then read the .csv files into TensorFlow.
#
# The .csv files can reside on local disk if we're training locally, but if we're training in the cloud we'll need to move the .csv files to the cloud, in our case Google Cloud Storage.
# ### Set up environment variables and load necessary libraries
PROJECT = "qwiklabs-gcp-636667ae83e902b6" # Replace with your PROJECT
REGION = "us-central1" # Choose an available region for Cloud MLE
import os
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
# !pip freeze | grep google-cloud-bigquery==1.6.1 || pip install google-cloud-bigquery==1.6.1
# ## Review
#
# In the [a_sample_explore_clean](a_sample_explore_clean.ipynb) notebook we came up with the following query to extract a repeatable and clean sample:
# <pre>
# #standardSQL
# SELECT
# (tolls_amount + fare_amount) AS fare_amount, -- label
# pickup_datetime,
# pickup_longitude,
# pickup_latitude,
# dropoff_longitude,
# dropoff_latitude
# FROM
# `nyc-tlc.yellow.trips`
# WHERE
# -- Clean Data
# trip_distance > 0
# AND passenger_count > 0
# AND fare_amount >= 2.5
# AND pickup_longitude > -78
# AND pickup_longitude < -70
# AND dropoff_longitude > -78
# AND dropoff_longitude < -70
# AND pickup_latitude > 37
# AND pickup_latitude < 45
# AND dropoff_latitude > 37
# AND dropoff_latitude < 45
# -- repeatable 1/5000th sample
# AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))),5000) = 1
# </pre>
#
# We will use the same query **with one change**. Instead of using `pickup_datetime` as is, we will extract `dayofweek` and `hourofday` from it. This is to give us some categorical features in our dataset so we can illustrate how to deal with them when we get to feature engineering. The new query will be:
#
# <pre>
# SELECT
# (tolls_amount + fare_amount) AS fare_amount, -- label
# EXTRACT(DAYOFWEEK from pickup_datetime) AS dayofweek,
# EXTRACT(HOUR from pickup_datetime) AS hourofday,
# pickup_longitude,
# pickup_latitude,
# dropoff_longitude,
# dropoff_latitude
# -- rest same as before
# </pre>
# ## Split into train, evaluation, and test sets
#
# For ML modeling we need not just one, but three datasets.
#
# **Train:** This is what our model learns on
#
# **Evaluation (aka Validation):** We shouldn't evaluate our model on the same data we trained on because then we couldn't know whether it was memorizing the input data or whether it was generalizing. Therefore we evaluate on the evaluation dataset, aka validation dataset.
#
# **Test:** We use our evaluation dataset to tune our hyperparameters (we'll cover hyperparameter tuning in a future lesson). We need to know that our chosen set of hyperparameters will work well for data we haven't seen before because in production, that will be the case. For this reason, we create a third dataset that we never use during the model development process. We only evaluate on this once our model development is finished. Data scientists don't always create a test dataset (aka holdout dataset), but to be thorough you should.
#
# We can divide our existing 1/5000th sample three ways 70%/15%/15% (or whatever split we like) with some modulo math demonstrated below.
#
# Because we are using a hash function these results are deterministic, we'll get the same exact split every time the query is run (assuming the underlying data hasn't changed)
# #### **Exercise 1**
#
# The `create_query` function below returns a query string that we will pass to BigQuery to collect our data. It takes as arguments the phase (`TRAIN`, `VALID`, or `TEST`) and the sample_size (relating to the fraction of the data we wish to sample). Complete the code below so that when the phase is set as `VALID` or `TEST` a new 15% split of the data will be created.
def create_query(phase, sample_size):
basequery = """
SELECT
(tolls_amount + fare_amount) AS fare_amount,
EXTRACT(DAYOFWEEK from pickup_datetime) AS dayofweek,
EXTRACT(HOUR from pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N) = 1
"""
if phase == "TRAIN":
subsample = """
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N * 100) >= (EVERY_N * 0)
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N * 100) < (EVERY_N * 70)
"""
elif phase == "VALID":
subsample = """
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N * 100) >= (EVERY_N * 70)
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N * 100) < (EVERY_N * 85)
"""
elif phase == "TEST":
subsample = """
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N * 100) >= (EVERY_N * 85)
AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), EVERY_N * 100) < (EVERY_N * 100)
"""
query = basequery + subsample
return query.replace("EVERY_N", sample_size)
# ## Write to CSV
# Now let's execute a query for train/valid/test and write the results to disk in csv format. We use Pandas's `.to_csv()` method to do so.
# #### **Exercise 2**
#
# The `for` loop below will generate the TRAIN/VALID/TEST sampled subsets of our dataset. Complete the code in the cell below to 1) create the BigQuery `query_string` using the `create_query` function you completed above, taking our original 1/5000th of the dataset and 2) load the BigQuery results of that `query_string` to a DataFrame labeled `df`.
#
# The remaining lines of code write that DataFrame to a csv file with the appropriate naming.
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
sample_size = "5000"
for phase in ["TRAIN", "VALID", "TEST"]:
# 1. Create query string
query_string = create_query(phase, sample_size)
# 2. Load results into DataFrame
df = bq.query(query_string).to_dataframe()
# 3. Write DataFrame to CSV
df.to_csv("taxi-{}.csv".format(phase.lower()), index_label = False, index = False)
print("Wrote {} lines to {}".format(len(df), "taxi-{}.csv".format(phase.lower())))
# Note that even with a 1/5000th sample we have a good amount of data for ML. 150K training examples and 30K validation.
# <h3> Verify that datasets exist </h3>
# !ls -l *.csv
# ### Preview one of the files
# !head taxi-train.csv
# Looks good! We now have our ML datasets and are ready to train ML models, validate them and test them.
# ## Establish rules-based benchmark
#
# Before we start building complex ML models, it is a good idea to come up with a simple rules based model and use that as a benchmark. After all, there's no point using ML if it can't beat the traditional rules based approach!
#
# Our rule is going to be to divide the mean fare_amount by the mean estimated distance to come up with a rate and use that to predict.
#
# Recall we can't use the actual `trip_distance` because we won't have that available at prediction time (depends on the route taken), however we do know the users pick up and drop off location so we can use euclidean distance between those coordinates.
# #### **Exercise 3**
#
# In the code below, we create a rules-based benchmark and measure the Root Mean Squared Error against the label. The function `euclidean_distance` takes as input a Pandas dataframe and should measure the straight line distance between the pickup location and the dropoff location. Complete the code so that the function returns Euclidean distance between the pickup and dropoff location.
#
# The `compute_rmse` funciton takes the actual (label) value and the predicted value and computes the Root Mean Squared Error between the the two. Complete the code below for the `compute_rmse` function.
# +
import pandas as pd
import math
import numpy as np
def euclidean_distance(df):
euc = df.apply(lambda x: math.sqrt((x['pickuplat'] - x['dropofflat'])**2 +
(x['pickuplon'] - x['dropofflon'])**2), axis=1)
return euc
# just for fun
def manhattan_distance(df):
man = df.apply(lambda x: abs(x['pickuplat'] - x['dropofflat']) + abs(x['pickuplon'] - x['dropofflon']), axis=1)
return man
def compute_rmse(actual, predicted):
err = math.sqrt(np.mean((np.array(actual) - np.array(predicted))**2))
return err
def print_rmse(df, rate, name):
print("{} Euclidean distance RMSE = {}".format(compute_rmse(df["fare_amount"], rate * euclidean_distance(df)), name))
print("{} Manhattan distance RMSE = {}".format(compute_rmse(df["fare_amount"], man_rate * manhattan_distance(df)), name))
print("{} Flag drop distance RMSE = {}".format(compute_rmse(df["fare_amount"], (df['fare_amount'].min() + flag_drop_rate * manhattan_distance(df))), name))
df_train = pd.read_csv("taxi-train.csv")
df_valid = pd.read_csv("taxi-valid.csv")
rate = df_train["fare_amount"].mean() / euclidean_distance(df_train).mean()
man_rate = df_train["fare_amount"].mean() / manhattan_distance(df_train).mean()
flag_drop_rate = ((df_train["fare_amount"].mean() - df_train["fare_amount"].min()) / manhattan_distance(df_train).mean())
print_rmse(df_train, rate, "Train")
print_rmse(df_valid, rate, "Valid")
# -
df_train['fare_amount'].min()
# The simple distance-based rule gives us an RMSE of <b>$7.70</b> on the validation dataset. We have to beat this, of course, but you will find that simple rules of thumb like this can be surprisingly difficult to beat.
#
# You don't want to set a goal on the test dataset because you'll want to tweak your hyperparameters and model architecture to get the best validation error. Then, you can evaluate ONCE on the test data.
# ## Challenge exercise
#
# Let's say that you want to predict whether a Stackoverflow question will be acceptably answered. Using this [public dataset of questions](https://bigquery.cloud.google.com/table/bigquery-public-data:stackoverflow.posts_questions), create a machine learning dataset that you can use for classification.
# <p>
# What is a reasonable benchmark for this problem?
# What features might be useful?
# <p>
# If you got the above easily, try this harder problem: you want to predict whether a question will be acceptably answered within 2 days. How would you create the dataset?
# <p>
# Hint (highlight to see):
# <p style='color:white' linkstyle='color:white'>
# You will need to do a SQL join with the table of [answers]( https://bigquery.cloud.google.com/table/bigquery-public-data:stackoverflow.posts_answers) to determine whether the answer was within 2 days.
# </p>
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| courses/machine_learning/deepdive/01_bigquery/labs/c_extract_and_benchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Visualising high dimensional spaces using Bayesian GPLVM
# --
# *<NAME>, June 2016*
#
# - Sample from a periodic kernel
# - Visualise the space using Baysian GPLVM
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['figure.figsize'] = (12,6)
matplotlib.style.use('ggplot')
# %matplotlib inline
import GPflow
import numpy as np
import tensorflow as tf
np.random.seed(0)
Q = 2 # latent dimension
D = 5 # true dimension
N = 10
lengthscale = 1.5
variance = 1.3
rng = np.random.RandomState(1)
X_data1 = rng.multivariate_normal(np.zeros(D), 0.1*np.eye(D), N)
X_data2 = rng.multivariate_normal(2*np.ones(D), 1*np.eye(D), N)
X_data = np.vstack([X_data1,X_data2])
idx = np.vstack([np.ones((N,1)), 2*np.ones((N,1))])
kernel = GPflow.kernels.RBF(Q, variance=variance, lengthscales=lengthscale)
# +
K = kernel.compute_K(X_data, X_data)
L = np.linalg.cholesky(K + np.eye(K.shape[0])*0.01)
Y = L.dot(np.random.randn(L.shape[0],D))
# -
Y.shape
plt.scatter(X_data[:,0], X_data[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
from sklearn.decomposition import PCA
pca = PCA(n_components=Q)
XPCA = pca.fit_transform(Y)
print XPCA.shape
plt.scatter(XPCA[:,0], XPCA[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
from GPflow import gplvm
# +
XInitGPLVM = rng.multivariate_normal(np.zeros(Q), np.eye(Q), 2*N)
np.set_printoptions(3)
print XInitGPLVM
fig=plt.figure
plt.scatter(XInitGPLVM[:,0], XInitGPLVM[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
plt.title('initialisation')
# You could implement a standard GPLVM, and show that it recovers PCA when the kernel is linear
m = GPflow.gplvm.GPLVM(Y=Y, Q=Q, kern=GPflow.kernels.Linear(2), XInit=XInitGPLVM)
m.optimize()
print m.X.value
# -
np.set_printoptions(precision=2)
X=np.vstack([m.X.value[:,1], -m.X.value[:,0]]).T
XT= (X - X.min(0)) / (X.max(0) - X.min(0))
XPCAT= (XPCA - XPCA.min(0)) / (XPCA.max(0) - XPCA.min(0))
fig=plt.figure()
plt.scatter(XT[:,0], XT[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
plt.title('GPLVM')
fig=plt.figure()
plt.scatter(XPCAT[:,0], XPCAT[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
plt.title('PCA')
print np.allclose(XT,XPCAT,atol=0.2)
print np.allclose(m.X.value, XPCA)
print np.allclose(m.X.value[:,[1,0]], XPCA)
# +
# You could set the variance of the BGPLVM to zero and show that it's the same as the GPLVM
# BGPLVM with variance to 0 is same as GPLVM
N = 10 # number of data points
D = 1 # latent dimensions
M = 5 # inducings points
R = 2 # data dimension
rng = np.random.RandomState(1)
Y = rng.randn(N,R)
Z = rng.rand(M,D)
XInit = rng.rand(N, D)
m = GPflow.gplvm.BayesianGPLVM(X_mean = XInit,
X_var=np.zeros((N,D)), Y=Y, kern=GPflow.kernels.RBF(D), Z=Z) # use 0 variance
m.kern.fixed = True
m.X_var.fixed = True
m.optimize()
mGPLVM = GPflow.gplvm.GPLVM(Y=Y, Q=D, kern=GPflow.kernels.RBF(D), XInit=XInit)
mGPLVM.kern.fixed = True
mGPLVM.optimize()
# -
np.set_printoptions(3)
print XInit
print m.X_mean.value
print mGPLVM.X.value
print np.allclose(m.X_mean.value, mGPLVM.X.value)
# +
# Bayesian GPLVM model
from GPflow import gplvm
# Create 49 inducings points on a grid
fGrid = False
if(fGrid):
Z1, Z2 = np.mgrid[X_data.min():X_data.max():7j, X_data.min():X_data.max():7j]
Z = np.vstack([Z1.ravel(), Z2.ravel()]).T
else:
Z = rng.multivariate_normal(np.zeros(Q), np.eye(Q), 10)
plt.scatter(Z[:,0], Z[:,1], 100, lw=2, cmap=plt.cm.viridis)
print Z.shape
# just optimize X
kernel.variance.fixed = True
kernel.lengthscales.fixed = True
# m = GPflow.gplvm.BayesianGPLVM(X_mean = np.zeros((2*N,Q)),
# X_var=np.ones((2*N,Q)), Y=Y, kern=kernel, Z=Z)
m = GPflow.gplvm.BayesianGPLVM(X_mean = XPCA.copy(),
X_var=2*np.ones((2*N,Q)), Y=Y, kern=kernel, Z=Z)
m.likelihood.variance = 0.01
m.likelihood.fixed = True
m.X_var = 0.01
m.optimize(max_iters = 500, display=1)
# -
Xinf = m.X_mean.value
plt.scatter(Xinf[:,0], Xinf[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
plt.scatter(Xinit[:,0], Xinit[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
plt.scatter(X_data[:,0], X_data[:,1], 100, idx, lw=2, cmap=plt.cm.viridis)
# Has GPLVM done anything or is it stuck in initialisation?
np.allclose(Xinf,Xinit)
print m.X_var
| notebooks/GPLVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # Numerical linear algebra
versioninfo()
# ## Introduction
#
# * Topics in numerical algebra:
# - BLAS
# - solve linear equations $\mathbf{A} \mathbf{x} = \mathbf{b}$
# - regression computations $\mathbf{X}^T \mathbf{X} \beta = \mathbf{X}^T \mathbf{y}$
# - eigen-problems $\mathbf{A} \mathbf{x} = \lambda \mathbf{x}$
# - generalized eigen-problems $\mathbf{A} \mathbf{x} = \lambda \mathbf{B} \mathbf{x}$
# - singular value decompositions $\mathbf{A} = \mathbf{U} \Sigma \mathbf{V}^T$
# - iterative methods for numerical linear algebra
#
# * Except for the iterative methods, most of these numerical linear algebra tasks are implemented in the BLAS and LAPACK libraries. They form the **building blocks** of most statistical computing tasks (optimization, MCMC).
#
# * Our major **goal** (or learning objectives) is to
# 1. know the complexity (flop count) of each task
# 2. be familiar with the BLAS and LAPACK functions (what they do)
# 3. do **not** re-invent wheels by implementing these dense linear algebra subroutines by yourself
# 4. understand the need for iterative methods
# 5. apply appropriate numerical algebra tools to various statistical problems
#
# * All high-level languages (R, Matlab, Julia) call BLAS and LAPACK for numerical linear algebra.
# - Julia offers more flexibility by exposing interfaces to many BLAS/LAPACK subroutines directly. See [documentation](https://docs.julialang.org/en/v1/stdlib/LinearAlgebra/#BLAS-functions-1).
# ## BLAS
#
# * BLAS stands for _basic linear algebra subprograms_.
#
# * See [netlib](http://www.netlib.org/blas/) for a complete list of standardized BLAS functions.
#
# * There are many implementations of BLAS.
# - [Netlib](http://www.netlib.org/blas/) provides a reference implementation.
# - Matlab uses Intel's [MKL](https://software.intel.com/en-us/node/520724) (mathematical kernel libaries). **MKL implementation is the gold standard on market.** It is not open source but the compiled library is free for Linux and MacOS.
# - Julia uses [OpenBLAS](https://github.com/xianyi/OpenBLAS). **OpenBLAS is the best open source implementation**.
#
# * There are 3 levels of BLAS functions.
# - [Level 1](http://www.netlib.org/blas/#_level_1): vector-vector operation
# - [Level 2](http://www.netlib.org/blas/#_level_2): matrix-vector operation
# - [Level 3](http://www.netlib.org/blas/#_level_3): matrix-matrix operation
#
# | Level | Example Operation | Name | Dimension | Flops |
# |-------|----------------------------------------|-------------|-------------------------------------------|-------|
# | 1 | $\alpha \gets \mathbf{x}^T \mathbf{y}$ | dot product | $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$ | $2n$ |
# | 1 | $\mathbf{y} \gets \mathbf{y} + \alpha \mathbf{x}$ | axpy | $\alpha \in \mathbb{R}$, $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$ | $2n$ |
# | 2 | $\mathbf{y} \gets \mathbf{y} + \mathbf{A} \mathbf{x}$ | gaxpy | $\mathbf{A} \in \mathbb{R}^{m \times n}$, $\mathbf{x} \in \mathbb{R}^n$, $\mathbf{y} \in \mathbb{R}^m$ | $2mn$ |
# | 2 | $\mathbf{A} \gets \mathbf{A} + \mathbf{y} \mathbf{x}^T$ | rank one update | $\mathbf{A} \in \mathbb{R}^{m \times n}$, $\mathbf{x} \in \mathbb{R}^n$, $\mathbf{y} \in \mathbb{R}^m$ | $2mn$ |
# | 3 | $\mathbf{C} \gets \mathbf{C} + \mathbf{A} \mathbf{B}$ | matrix multiplication | $\mathbf{A} \in \mathbb{R}^{m \times p}$, $\mathbf{B} \in \mathbb{R}^{p \times n}$, $\mathbf{C} \in \mathbb{R}^{m \times n}$ | $2mnp$ |
#
# * Typical BLAS functions support single precision (S), double precision (D), complex (C), and double complex (Z).
# ## Examples
#
# > **The form of a mathematical expression and the way the expression should be evaluated in actual practice may be quite different.**
#
# Some operations _appear_ as level-3 but indeed are level-2.
#
# **Example 1**. A common operation in statistics is column scaling or row scaling
# $$
# \begin{eqnarray*}
# \mathbf{A} &=& \mathbf{A} \mathbf{D} \quad \text{(column scaling)} \\
# \mathbf{A} &=& \mathbf{D} \mathbf{A} \quad \text{(row scaling)},
# \end{eqnarray*}
# $$
# where $\mathbf{D}$ is diagonal. For example, in generalized linear models (GLMs), the Fisher information matrix takes the form
# $$
# \mathbf{X}^T \mathbf{W} \mathbf{X},
# $$
# where $\mathbf{W}$ is a diagonal matrix with observation weights on diagonal.
#
# Column and row scalings are essentially level-2 operations!
# +
using BenchmarkTools, LinearAlgebra, Random
Random.seed!(123) # seed
n = 2000
A = rand(n, n) # n-by-n matrix
d = rand(n) # n vector
D = Diagonal(d) # diagonal matrix with d as diagonal
# -
Dfull = convert(Matrix, D) # convert to full matrix
# this is calling BLAS routine for matrix multiplication: O(n^3) flops
# this is SLOW!
@benchmark $A * $Dfull
# dispatch to special method for diagonal matrix multiplication.
# columnwise scaling: O(n^2) flops
@benchmark $A * $D
# in-place: avoid allocate space for result
# rmul!: compute matrix-matrix product AB, overwriting A, and return the result.
@benchmark rmul!($A, $D)
# **Note:** In R or Matlab, `diag(d)` will create a full matrix. Be cautious using `diag` function: do we really need a full diagonal matrix?
# +
using RCall
R"""
d <- runif(5)
diag(d)
"""
# +
using MATLAB
mat"""
d = rand(5, 1)
diag(d)
"""
# -
# **Example 2**. Innter product between two matrices $\mathbf{A}, \mathbf{B} \in \mathbb{R}^{m \times n}$ is often written as
# $$
# \text{trace}(\mathbf{A}^T \mathbf{B}), \text{trace}(\mathbf{B} \mathbf{A}^T), \text{trace}(\mathbf{A} \mathbf{B}^T), \text{ or } \text{trace}(\mathbf{B}^T \mathbf{A}).
# $$
# They appear as level-3 operation (matrix multiplication with $O(m^2n)$ or $O(mn^2)$ flops).
# +
Random.seed!(123)
n = 2000
A, B = randn(n, n), randn(n, n)
# slow way to evaluate this thing
@benchmark tr(transpose($A) * $B)
# -
# But $\text{trace}(\mathbf{A}^T \mathbf{B}) = <\text{vec}(\mathbf{A}), \text{vec}(\mathbf{B})>$. The latter is level-2 operation with $O(mn)$ flops.
@benchmark dot($A, $B)
# **Example 3**. Similarly $\text{diag}(\mathbf{A}^T \mathbf{B})$ can be calculated in $O(mn)$ flops.
# slow way to evaluate this thing: O(n^3)
@benchmark diag(transpose($A) * $B)
# smarter: O(n^2)
@benchmark Diagonal(vec(sum($A .* $B, dims=1)))
# To get rid of allocation of intermediate array at all, we can just write a double loop or use `dot` function.
# +
using LoopVectorization
function diag_matmul!(d, A, B)
m, n = size(A)
@assert size(B) == (m, n) "A and B should have same size"
fill!(d, 0)
@avx for j in 1:n, i in 1:m
d[j] += A[i, j] * B[i, j]
end
# for j in 1:n
# @views d[j] = dot(A[:, j], B[:, j])
# end
Diagonal(d)
end
d = zeros(eltype(A), size(A, 2))
@benchmark diag_matmul!($d, $A, $B)
# -
# ## Memory hierarchy and level-3 fraction
#
# > **Key to high performance is effective use of memory hierarchy. True on all architectures.**
#
# * Flop count is not the sole determinant of algorithm efficiency. Another important factor is data movement through the memory hierarchy.
#
# <img src="./macpro_inside.png" width="400" align="center">
#
# <img src="./cpu_die.png" width="400" align="center">
#
# <img src="./hei.png" width="400" align="center">
#
# * Numbers everyone should know
#
# | Operation | Time |
# |-------------------------------------|----------------|
# | L1 cache reference | 0.5 ns |
# | L2 cache reference | 7 ns |
# | Main memory reference | 100 ns |
# | Read 1 MB sequentially from memory | 250,000 ns |
# | Read 1 MB sequentially from SSD | 1,000,000 ns |
# | Read 1 MB sequentially from disk | 20,000,000 ns |
#
#
# <!-- | Operation | Time | -->
# <!-- |-------------------------------------|----------------| -->
# <!-- | L1 cache reference | 0.5 ns | -->
# <!-- | Branch mispredict | 5 ns | -->
# <!-- | L2 cache reference | 7 ns | -->
# <!-- | Mutex lock/unlock | 100 ns | -->
# <!-- | Main memory reference | 100 ns | -->
# <!-- | Compress 1K bytes with Zippy | 10,000 ns | -->
# <!-- | Send 2K bytes over 1 Gbps network | 20,000 ns | -->
# <!-- | Read 1 MB sequentially from memory | 250,000 ns | -->
# <!-- | Round trip within same datacenter | 500,000 ns | -->
# <!-- | Disk seek | 10,000,000 ns | -->
# <!-- | Read 1 MB sequentially from network | 10,000,000 ns | -->
# <!-- | Read 1 MB sequentially from disk | 30,000,000 ns | -->
# <!-- | Send packet CA->Netherlands->CA | 150,000,000 ns | -->
#
# Source: <https://gist.github.com/jboner/2841832>
#
# * For example, Xeon X5650 CPU has a theoretical throughput of 128 DP GFLOPS but a max memory bandwidth of 32GB/s.
#
# * Can we keep CPU cores busy with enough deliveries of matrix data and ship the results to memory fast enough to avoid backlog?
# Answer: use **high-level BLAS** as much as possible.
#
# | BLAS | Dimension | Mem. Refs. | Flops | Ratio |
# |--------------------------------|------------------------------------------------------------|------------|--------|-------|
# | Level 1: $\mathbf{y} \gets \mathbf{y} + \alpha \mathbf{x}$ | $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$ | $3n$ | $2n$ | 3:2 |
# | Level 2: $\mathbf{y} \gets \mathbf{y} + \mathbf{A} \mathbf{x}$ | $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$, $\mathbf{A} \in \mathbb{R}^{n \times n}$ | $n^2$ | $2n^2$ | 1:2 |
# | Level 3: $\mathbf{C} \gets \mathbf{C} + \mathbf{A} \mathbf{B}$ | $\mathbf{A}, \mathbf{B}, \mathbf{C} \in\mathbb{R}^{n \times n}$ | $4n^2$ | $2n^3$ | 2:n |
#
# * Higher level BLAS (3 or 2) make more effective use of arithmetic logic units (ALU) by keeping them busy. **Surface-to-volume** effect.
# See [Dongarra slides](https://www.samsi.info/wp-content/uploads/2017/02/SAMSI-0217_Dongarra.pdf).
#
# <img src="./blas_throughput.png" width="500" align="center"/>
#
# * A distinction between LAPACK and LINPACK (older version of R uses LINPACK) is that LAPACK makes use of higher level BLAS as much as possible (usually by smart partitioning) to increase the so-called **level-3 fraction**.
#
# * To appreciate the efforts in an optimized BLAS implementation such as OpenBLAS (evolved from GotoBLAS), see the [Quora question](https://www.quora.com/What-algorithm-does-BLAS-use-for-matrix-multiplication-Of-all-the-considerations-e-g-cache-popular-instruction-sets-Big-O-etc-which-one-turned-out-to-be-the-primary-bottleneck), especially the [video](https://youtu.be/JzNpKDW07rw). Bottomline is
#
# > **Get familiar with (good implementations of) BLAS/LAPACK and use them as much as possible.**
# ## Effect of data layout
#
# * Data layout in memory affects algorithmic efficiency too. It is much faster to move chunks of data in memory than retrieving/writing scattered data.
#
# * Storage mode: **column-major** (Fortran, Matlab, R, Julia) vs **row-major** (C/C++).
#
# * **Cache line** is the minimum amount of cache which can be loaded and stored to memory.
# - x86 CPUs: 64 bytes
# - ARM CPUs: 32 bytes
#
# <img src="https://patterns.eecs.berkeley.edu/wordpress/wp-content/uploads/2013/04/dense02.png" width="500" align="center"/>
#
# * Accessing column-major stored matrix by rows ($ij$ looping) causes lots of **cache misses**.
#
# * Take matrix multiplication as an example
# $$
# \mathbf{C} \gets \mathbf{C} + \mathbf{A} \mathbf{B}, \quad \mathbf{A} \in \mathbb{R}^{m \times p}, \mathbf{B} \in \mathbb{R}^{p \times n}, \mathbf{C} \in \mathbb{R}^{m \times n}.
# $$
# Assume the storage is column-major, such as in Julia. There are 6 variants of the algorithms according to the order in the triple loops.
# - `jki` or `kji` looping:
# ```julia
# # inner most loop
# for i = 1:m
# C[i, j] = C[i, j] + A[i, k] * B[k, j]
# end
# ```
# - `ikj` or `kij` looping:
# ```julia
# # inner most loop
# for j = 1:n
# C[i, j] = C[i, j] + A[i, k] * B[k, j]
# end
# ```
# - `ijk` or `jik` looping:
# ```julia
# # inner most loop
# for k = 1:p
# C[i, j] = C[i, j] + A[i, k] * B[k, j]
# end
# ```
# * We pay attention to the innermost loop, where the vector calculation occurs. The associated **stride** when accessing the three matrices in memory (assuming column-major storage) is
#
# | Variant | A Stride | B Stride | C Stride |
# |----------------|----------|----------|----------|
# | $jki$ or $kji$ | Unit | 0 | Unit |
# | $ikj$ or $kij$ | 0 | Non-Unit | Non-Unit |
# | $ijk$ or $jik$ | Non-Unit | Unit | 0 |
# Apparently the variants $jki$ or $kji$ are preferred.
# +
"""
matmul_by_loop!(A, B, C, order)
Overwrite `C` by `A * B`. `order` indicates the looping order for triple loop.
"""
function matmul_by_loop!(A::Matrix, B::Matrix, C::Matrix, order::String)
m = size(A, 1)
p = size(A, 2)
n = size(B, 2)
fill!(C, 0)
if order == "jki"
@inbounds for j = 1:n, k = 1:p, i = 1:m
C[i, j] += A[i, k] * B[k, j]
end
end
if order == "kji"
@inbounds for k = 1:p, j = 1:n, i = 1:m
C[i, j] += A[i, k] * B[k, j]
end
end
if order == "ikj"
@inbounds for i = 1:m, k = 1:p, j = 1:n
C[i, j] += A[i, k] * B[k, j]
end
end
if order == "kij"
@inbounds for k = 1:p, i = 1:m, j = 1:n
C[i, j] += A[i, k] * B[k, j]
end
end
if order == "ijk"
@inbounds for i = 1:m, j = 1:n, k = 1:p
C[i, j] += A[i, k] * B[k, j]
end
end
if order == "jik"
@inbounds for j = 1:n, i = 1:m, k = 1:p
C[i, j] += A[i, k] * B[k, j]
end
end
end
using Random
Random.seed!(123)
m, p, n = 2000, 100, 2000
A = rand(m, p)
B = rand(p, n)
C = zeros(m, n);
# -
# * $jki$ and $kji$ looping:
# +
using BenchmarkTools
@benchmark matmul_by_loop!($A, $B, $C, "jki")
# -
@benchmark matmul_by_loop!($A, $B, $C, "kji")
# * $ikj$ and $kij$ looping:
@benchmark matmul_by_loop!($A, $B, $C, "ikj")
@benchmark matmul_by_loop!($A, $B, $C, "kij")
# * $ijk$ and $jik$ looping:
@benchmark matmul_by_loop!($A, $B, $C, "ijk")
@benchmark matmul_by_loop!($A, $B, $C, "ijk")
# * Julia wraps BLAS library for matrix multiplication. We see BLAS library wins hands down (multi-threading, Strassen algorithm, higher level-3 fraction by block outer product).
@benchmark mul!($C, $A, $B)
# direct call of BLAS wrapper function
@benchmark LinearAlgebra.BLAS.gemm!('N', 'N', 1.0, $A, $B, 0.0, $C)
# **Exercise:** Annotate the loop in `matmul_by_loop!` by `@avx` and benchmark again.
# ## BLAS in R
#
# * **Tip for R user**. Standard R distribution from CRAN uses a very out-dated BLAS/LAPACK library.
# +
using RCall
R"""
library(dplyr)
library(bench)
bench::mark($A %*% $B) %>%
print(width = Inf)
""";
# -
# * Re-build R from source using OpenBLAS or MKL will immediately boost linear algebra performance in R. Google `build R using MKL` to get started. Similarly we can build Julia using MKL.
#
# * Matlab uses MKL. Usually it's very hard to beat Matlab in terms of linear algebra.
# +
using MATLAB
mat"""
f = @() $A * $B;
timeit(f)
"""
# -
# ## Avoid memory allocation: some examples
#
# 1. Transposing matrix is an expensive memory operation.
# - In R, the command
# ```R
# t(A) %*% x
# ```
# will first transpose `A` then perform matrix multiplication, causing unnecessary memory allocation
# - Julia is smart to avoid transposing matrix if possible.
# +
using Random, LinearAlgebra, BenchmarkTools
Random.seed!(123)
n = 1000
A = rand(n, n)
x = rand(n);
# -
typeof(transpose(A))
fieldnames(typeof(transpose(A)))
# same data in tranpose(A) and original matrix A
pointer(transpose(A).parent), pointer(A)
# dispatch to BLAS
# does *not* actually transpose the matrix
@benchmark transpose($A) * $x
# pre-allocate result
out = zeros(size(A, 2))
@benchmark mul!($out, transpose($A), $x)
# or call BLAS wrapper directly
@benchmark LinearAlgebra.BLAS.gemv!('T', 1.0, $A, $x, 0.0, $out)
# 2. [Broadcasting](https://docs.julialang.org/en/v1/base/arrays/#Broadcast-and-vectorization-1) in Julia achieves vectorized code without creating intermediate arrays.
#
# Suppose we want to calculate elementsize maximum of absolute values of two large arrays. In R or Matlab, the command
# ```r
# max(abs(X), abs(Y))
# ```
# will create two intermediate arrays and then one result array.
# +
using RCall
Random.seed!(123)
X, Y = rand(1000, 1000), rand(1000, 1000)
R"""
library(dplyr)
library(bench)
bench::mark(max(abs($X), abs($Y))) %>%
print(width = Inf)
""";
# -
# In Julia, dot operations are fused so no intermediate arrays are created.
# no intermediate arrays created, only result array created
@benchmark max.(abs.($X), abs.($Y))
# Pre-allocating result array gets rid of memory allocation at all.
# no memory allocation at all!
Z = zeros(size(X)) # zero matrix of same size as X
@benchmark $Z .= max.(abs.($X), abs.($Y)) # .= (vs =) is important!
# 3. [View](https://docs.julialang.org/en/v1/base/arrays/#Views-(SubArrays-and-other-view-types)-1) avoids creating extra copy of matrix data.
# +
Random.seed!(123)
A = randn(1000, 1000)
# sum entries in a sub-matrix
@benchmark sum($A[1:2:500, 1:2:500])
# -
# view avoids creating a separate sub-matrix
@benchmark sum(@view $A[1:2:500, 1:2:500])
# The [`@views`](https://docs.julialang.org/en/v1/base/arrays/#Base.@views) macro, which can be useful in [some operations](https://discourse.julialang.org/t/why-is-a-manual-in-place-addition-so-much-faster-than-and-on-range-indexed-arrays/3302).
@benchmark @views sum($A[1:2:500, 1:2:500])
| slides/08-numalgintro/numalgintro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn import linear_model
import numpy as np
import matplotlib.pyplot as plt
url="https://github.com/codebasics/py/raw/master/ML/5_one_hot_encoding/Exercise/carprices.csv"
df= pd.read_csv(url)
df
dummies=pd.get_dummies(df['Car Model'])
dummies
merged= pd.concat([df,dummies],axis='columns')
merged
final = merged.drop(['Car Model','Audi A5'], axis='columns')
final
from sklearn.linear_model import LinearRegression
model = LinearRegression()
final.columns
X = final.drop(['Sell Price($)'], axis='columns')
X
Y= final['Sell Price($)']
Y
model.fit(X,Y)
model.predict([[35000, 3,1,0]])
model.predict([[59000, 5, 0, 1]])
# model accuracy
model.score(X,Y)
# predicting the price of 4 yr old mercedes benz with 45000 mileage
model.predict([[45000,4,0,1]])
# predicting the price of 7 yr old BMWX with 86000 mileage
model.predict([[86000, 7, 1, 0]])
model.predict(X)
df.dtypes
# train_test_split with the car model csv
df.info()
from sklearn.model_selection import train_test_split
df
x=df[['Mileage','Age(yrs)']]
y=df['Sell Price($)']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=10)
x_train
len(x_test)
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(x_train, y_train)
clf.predict(x_test)
y_test
clf.score(x_test,y_test)
| Car Model LinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Getting API working
# importing API
import quandl
import pandas as pd
import os
from dotenv import load_dotenv
# %load_ext dotenv
# %dotenv
# x = %env
# the API key
quandl.ApiConfig.api_key = x['QUANDL_API']
# merged dataset
final = pd.read_csv('C:/Users/Julio/Projects/PT17_cityspire-b-ds/notebooks/datasets/datasets_to_merge/updated/final.csv')
# The list of the cities that we are dealing with in final (there's 397 cities we're working with)
map_to_cities = final['City'] + ' ' + final['State']
map_to_cities
# + jupyter={"outputs_hidden": true}
map_to_cities.tolist()
# -
# can see a long text in a column (useful for description in regions dataset)
pd.set_option('display.max_colwidth', None)
# data about the regions, their ids, and types (there's zip code, county etc.)
regions_data = quandl.get_table("ZILLOW/REGIONS", region_type='city', paginate=True)
regions_data
regions_cities = regions_data['region'].str.replace(';', ',').str.split(',',expand=True).drop([2,3], axis=1)
# regions_data['region'] = regions_data['region'].str.replace(';', ',')
regions_cities['New'] = regions_cities[0] + regions_cities[1]
regions_cities
regions_data
regions_df = regions_data.join(regions_cities)
regions_df
# once confirmed that everything lines up right, drop region column
regions_df = regions_df.drop(['region'],axis=1)
# + jupyter={"outputs_hidden": true}
# how come I can just use the int but when putting '0' in a string, it doesn't change it?
regions_df = regions_df.rename(columns = {0:"City",1:"State"})
regions_df
# + jupyter={"outputs_hidden": true}
our_regions = regions_df[regions_df['New'].isin(map_to_cities)]
our_regions
# -
region_id_our_cities = our_regions['region_id'].tolist()
len(region_id_our_cities)
missing_cities = regions_df['New'].tolist()
# +
lst = []
for i in map_to_cities:
if i not in missing_cities:
lst.append(i)
lst
# -
# the three cities are missing their region_ID or don't have one. Need to resolve this
regions_df[regions_df['City'] == 'Lancaster']
regions_df[regions_df['City'] == 'Watertown']
regions_df[regions_df['City'] == 'Ithaca']
# + jupyter={"outputs_hidden": true}
region_id_our_cities
# +
# One thing to note: the 1,2,3,4,5+ bedrooms data means condo AND single family
# -
# data split up respectially so that API doesn't get overwhelmed
data_zsfh1 = quandl.get_table('ZILLOW/DATA',indicator_id='ZSFH',region_id=region_id_our_cities[:201],paginate=True)
data_zcon1 = quandl.get_table('ZILLOW/DATA',indicator_id='ZCON',region_id=region_id_our_cities[:201],paginate=True)
data_z1br1 = quandl.get_table('ZILLOW/DATA',indicator_id='Z1BR',region_id=region_id_our_cities[:201],paginate=True)
data_z2br1 = quandl.get_table('ZILLOW/DATA',indicator_id='Z2BR',region_id=region_id_our_cities[:201],paginate=True)
data_z3br1 = quandl.get_table('ZILLOW/DATA',indicator_id='Z3BR',region_id=region_id_our_cities[:201],paginate=True)
data_z4br1 = quandl.get_table('ZILLOW/DATA',indicator_id='Z4BR',region_id=region_id_our_cities[:201],paginate=True)
data_z5br1 = quandl.get_table('ZILLOW/DATA',indicator_id='Z5BR',region_id=region_id_our_cities[:201],paginate=True)
data_zsfh2 = quandl.get_table('ZILLOW/DATA',indicator_id='ZSFH',region_id=region_id_our_cities[201:301],paginate=True)
data_zcon2 = quandl.get_table('ZILLOW/DATA',indicator_id='ZCON',region_id=region_id_our_cities[201:301],paginate=True)
data_z1br2 = quandl.get_table('ZILLOW/DATA',indicator_id='Z1BR',region_id=region_id_our_cities[201:301],paginate=True)
data_z2br2 = quandl.get_table('ZILLOW/DATA',indicator_id='Z2BR',region_id=region_id_our_cities[201:301],paginate=True)
data_z3br2 = quandl.get_table('ZILLOW/DATA',indicator_id='Z3BR',region_id=region_id_our_cities[201:301],paginate=True)
data_z4br2 = quandl.get_table('ZILLOW/DATA',indicator_id='Z4BR',region_id=region_id_our_cities[201:301],paginate=True)
data_z5br2 = quandl.get_table('ZILLOW/DATA',indicator_id='Z5BR',region_id=region_id_our_cities[201:301],paginate=True)
data_zsfh3 = quandl.get_table('ZILLOW/DATA',indicator_id='ZSFH',region_id=region_id_our_cities[301:],paginate=True)
data_zcon3 = quandl.get_table('ZILLOW/DATA',indicator_id='ZCON',region_id=region_id_our_cities[301:],paginate=True)
data_z1br3 = quandl.get_table('ZILLOW/DATA',indicator_id='Z1BR',region_id=region_id_our_cities[301:],paginate=True)
data_z2br3 = quandl.get_table('ZILLOW/DATA',indicator_id='Z2BR',region_id=region_id_our_cities[301:],paginate=True)
data_z3br3 = quandl.get_table('ZILLOW/DATA',indicator_id='Z3BR',region_id=region_id_our_cities[301:],paginate=True)
data_z4br3 = quandl.get_table('ZILLOW/DATA',indicator_id='Z4BR',region_id=region_id_our_cities[301:],paginate=True)
data_z5br3 = quandl.get_table('ZILLOW/DATA',indicator_id='Z5BR',region_id=region_id_our_cities[301:],paginate=True)
merging_zsfh = data_zsfh1.append(data_zsfh2, ignore_index=True)
df_zsfh = merging_zsfh.append(data_zsfh3, ignore_index=True)
df_zsfh['region_id'].nunique()
merging_zcon = data_zcon1.append(data_zcon2, ignore_index=True)
df_zcon = merging_zcon.append(data_zcon3, ignore_index=True)
df_zcon['region_id'].nunique()
merging_z1br = data_z1br1.append(data_z1br2, ignore_index=True)
df_z1br = merging_z1br.append(data_z1br3, ignore_index=True)
df_z1br['region_id'].nunique()
merging_z2br = data_z2br1.append(data_z2br2, ignore_index=True)
df_z2br = merging_z2br.append(data_z2br3, ignore_index=True)
df_z2br['region_id'].nunique()
merging_z3br = data_z3br1.append(data_z3br2, ignore_index=True)
df_z3br = merging_z3br.append(data_z3br3, ignore_index=True)
df_z3br['region_id'].nunique()
merging_z4br = data_z4br1.append(data_z4br2, ignore_index=True)
df_z4br = merging_z4br.append(data_z4br3, ignore_index=True)
df_z4br['region_id'].nunique()
merging_z5br = data_z5br1.append(data_z5br2, ignore_index=True)
df_z5br = merging_z5br.append(data_z5br3, ignore_index=True)
df_z5br['region_id'].nunique()
# +
lst = []
x = df_zsfh['region_id'].unique().tolist()
for i in region_id_our_cities:
if i not in x:
lst.append(i)
lst
# missing from the merged dataset
regions_df[regions_df['region_id'] == '39864']
regions_df[regions_df['region_id'] == '37385']
# looks like they don't even have data for indicator ZSFH nor ZALL
# + jupyter={"outputs_hidden": true}
# https://www.zillow.com/research/why-zillow-home-value-index-better-17742/
# a better understanding of what ZHVI (Zillow Home Value Index)
indicators_data = quandl.get_table("ZILLOW/INDICATORS")
indicators_data
# -
# ## Exploration and Merging
# + jupyter={"outputs_hidden": true}
# datasets
df_zsfh
df_zcon
df_z1br
df_z2br
df_z3br
df_z4br
df_z5br
# + jupyter={"outputs_hidden": true}
zsfh = df_zsfh[df_zsfh['date'] == '2021-02-28']
zsfh = pd.merge(zsfh,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'SingleFamilyHousingAvgValue'})
zsfh
# + jupyter={"outputs_hidden": true}
zcon = df_zcon[df_zcon['date'] == '2021-02-28']
zcon = pd.merge(zcon,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'CondoAvgValue'})
zcon
# + jupyter={"outputs_hidden": true}
z1br = df_z1br[df_z1br['date'] == '2021-02-28']
z1br = pd.merge(z1br,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'1-BedroomAvgValue'})
z1br
# + jupyter={"outputs_hidden": true}
z2br = df_z2br[df_z2br['date'] == '2021-02-28']
z2br = pd.merge(z2br,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'2-BedroomAvgValue'})
z2br
# + jupyter={"outputs_hidden": true}
z3br = df_z3br[df_z3br['date'] == '2021-02-28']
z3br = pd.merge(z3br,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'3-BedroomAvgValue'})
z3br
# + jupyter={"outputs_hidden": true}
z4br = df_z4br[df_z4br['date'] == '2021-02-28']
z4br = pd.merge(z4br,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'4-BedroomAvgValue'})
z4br
# + jupyter={"outputs_hidden": true}
z5br = df_z5br[df_z5br['date'] == '2021-02-28']
z5br = pd.merge(z5br,our_regions,how='right').drop(['region_type','New','indicator_id','date'], axis=1).rename(columns = {'value':'5+-BedroomAvgValue'})
z5br
# -
df = pd.merge(zsfh,zcon)
df = pd.merge(df,z1br)
df = pd.merge(df,z2br)
df = pd.merge(df,z3br)
df = pd.merge(df,z4br)
df = pd.merge(df,z5br)
df = df.reindex(columns= ['SingleFamilyHousingAvgValue','CondoAvgValue','1-BedroomAvgValue','2-BedroomAvgValue','3-BedroomAvgValue','4-BedroomAvgValue','5+-BedroomAvgValue','region_id','City','State']).drop('region_id',axis=1)
# filling in na's with mean
mean_values = df[['SingleFamilyHousingAvgValue','CondoAvgValue','1-BedroomAvgValue','2-BedroomAvgValue','3-BedroomAvgValue','4-BedroomAvgValue','5+-BedroomAvgValue']].mean()
df[['SingleFamilyHousingAvgValue','CondoAvgValue','1-BedroomAvgValue','2-BedroomAvgValue','3-BedroomAvgValue','4-BedroomAvgValue','5+-BedroomAvgValue']] = df[[
'SingleFamilyHousingAvgValue','CondoAvgValue','1-BedroomAvgValue','2-BedroomAvgValue','3-BedroomAvgValue','4-BedroomAvgValue','5+-BedroomAvgValue'
]].fillna(value=mean_values)
df.notnull().values.all()
# 1_bedroom column had a bunch of zero so round whole df
df = df.round(decimals=1)
df.to_csv('housing_data_prices_avg.csv')
df
| notebooks/model/housing/exploring_housing_data_api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Evaluation and grading
#
# ## Summary and grading policy
#
# You will have four cumulative late days for assignments, that can be apportioned as you see fit between the problem set one and problem set two. Late days cannot be used for the final project or final presentation. If you run over and have not submitted the pset by the following Monday, I ask that you not attend class as not to see the answers. There will be a deduction of 10 percentage points for each late day after you have used your allowed late days.
#
# Grades may be curved if there are no students receiving A's on the non-curved grading scale.
# + tags=["hide_input"]
## import modules
import pandas as pd
from IPython.display import display, HTML
grade_summary = pd.DataFrame({'Assignments':
["Datacamp modules",
"Two problem sets",
"Final project",
"Team player/participation"],
'Percentage':
[5,
50,
35,
10],
'Deadlines':
["Throughout",
"Pset one: Thurs. 04-15; \
Pset two: Thurs. 05-13 and Tues. 05-18",
"Presentation: Tuesday 06-01; Short report: Sunday 06-06",
"Throughout"]})
HTML(grade_summary.to_html(index=False))
# -
# ## Details
#
# ### Datacamp modules to support programming topics (5% of grade)
#
# The DataCamp modules are mainly to support your work on the problem sets and final project by giving you additional practice prior to our in-class activities. As a result, they will be graded on a "complete" and "incomplete" basis, regardless of how many points you received on the assignment itself. This means that you shouldn't get stuck partway through, since you can always ask to be shown the answer with a points deduction. Conversely, if the concepts are review, these should be very quick to complete, but if you'd prefer to skip, you can talk to me and I will reapportion the 5\% to your second problem set.
#
# ### Two problem sets (25% of grade, each; 50% total)
#
# DataCamp provides a very smooth introduction to programming, in the sense that they provide you with example code that gets you ~80\% of the way to the solution, with you needing to apply that code to reach the other ~20\%.
#
# In contrast, the two problems sets will assess your ability to apply the concepts to data that is substantially messier, and problems that are substantially more difficult, than the ones in the DataCamp modules. More details on the problem sets will be provided the week before each is released, but roughly, the workflow will be:
#
# - Accessing the problem set and data via GitHub and/or jhub
# - Working to produce the following outputs for each problem set:
# - A raw .ipynb (or .Rmd file) with the code for the pset
# - A compiled pdf that displays that code, as well as the answers to the written questions. These written questions will involve using some Latex syntax for equations and formatting.
# - When applicable (e.g., part of the pset is run in script form), a supporting .py or .R file
#
# The problem sets will be graded on both accuracy and programming style. For instance, by our second problem set, you will have learned to write functions. The problem set will be designed to test those concepts and if you revert, for instance, to writing repeated code that could be replaced with a function, points will be deducted even if that inefficient code arrives at the correct answer.
#
# **Collaboration**: problem set one will be 80\% group based and 20\% individual (each person will submit a single problem set reflecting the work of the group for the first 80\% and your own work for the individual problem). Problem set two will be fully individual. You should ask all questions on Slack in public channels and I may ask you to delete a post if it shows too much of a solution.
#
# ### Final project (35% of grade)
#
# - I will randomly assign groups of 3-4 a few meetings in to the quarter. I've found that this is more effective than letting you choose your own groups for teaching you how to collaborate on technical projects with people who you may not always see eye to eye with!
# - I'll be releasing instructions on Canvas for two preliminary project milestones
# - Each group will work on a project that integrates the concepts we are covering into an applied data science project. The end product will be a github repository that contains:
#
# - The raw source data you used for the project. This will include portions of the data I provide to all groups and external data sources you bring to the project.
# - A pre-analysis plan where you pre-commit to certain analyses with the data: this will be especially important for the other group to be able to replicate your work.
# - A README for the repository that, for each file, describes in detail:
# - Inputs to the file: e.g., raw data; a file containing credentials needed to access an API/
# - What the file does: describe major transformations.
# - Output: if the file produces any outputs (e.g., a cleaned dataset; a figure or graph).
# - A set of code files that transform that data into a form usable to answer the question you have posed in your descriptive research proposal.
# - Final output:
# 1. A 10-minute presentation, written in Beamer (presented in class on **Tuesday June 1st**).
# 2. A report written in the computer science-style conference proceedings (10 pages; will share a template; due **Sunday June 6th**).
#
#
# ### Team player/participation (10% of grade)
#
# We will be using a `#problemset` channel in our class Slack for you to ask questions about roadblocks you encounter with the DataCamp activities or on the problem sets. While I will be answering questions within a window of 24 hours, I encourage you to help each other and answer each other's questions. Your participation grade will reflect whether you've helped your classmates on the forum and your participation in class.
#
| mini_book/_build/jupyter_execute/docs/eval_grades_py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Spyder)
# language: python3
# name: python3
# ---
# ## Summarizing recommended sample sizes for classification
#
# #### <NAME>
# #### 7/2020
#
# #### Output: plot that shows a recommended sample size for a range of effect sizes
#
# ##### recommended sample size:
# minimum amount of data per category to detect a real difference in the data. Based on the smallest sample size where a classifier does significantly better than baseline. To make sure this isn't just a blip, require that the next 5 sample sizes are also significantly better than baseline (otherwise, recommend a larger sample size).
#
# ##### effect sizes:
# really the distance between the two categories' underlying multivariate distributions. Calculated as the euclidean distance between the distributions' means, divided by their standard deviations.
# +
## clean up
try:
from IPython import get_ipython
get_ipython().magic('clear')
get_ipython().magic('reset -f')
except:
pass
# +
## import libraries
# !pip install -U scikit-learn
# !pip install opencv-python
# data wrangling:
import numpy as np
import pandas as pd
import itertools
from copy import deepcopy as dc
from scipy.spatial import distance
# math:
from sympy import Eq, var, solve
import statistics
import math
from scipy import stats
# plotting:
import matplotlib.pyplot as plt
import seaborn as sns
# SVM:
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import cv2
# +
## General Setup
# data
nVox = 100 # number of voxels (or other dimensions)
sd = 1 # standard deviation of the distributions (keeping this the same for all distributions)
nIters = 100 # how many times you sample data of each size
nCats = 2 # how many categories you're classifying
# training-testing split for classification analyses:
trainProp = .8 # 80% of data in each category used for training
testProp = .2 # 20% used for testing
# possible sample sizes
minN = 10
maxN = 100
nRange = range(minN, maxN+1)
# set up variance-covariance matrix for the multi-variate normal distributions:
covMat = np.identity(nVox)*sd**2 # orthogonal dimensions; main diagonal = s.d.^2 (variance)
# plot it to check -- main diagonal should = sd^2, all other values should = 0
plt.matshow(covMat);
plt.colorbar()
plt.show()
# seed a random number generator:
np.random.seed(444)
# -
# #### Set up the possible effect sizes
#
# ###### general method:
# ...fix the multivariate mean for category 1 to 1.
# ...set up a range of possible multivariate means for category 2 (from 1.1 to 3)
# ...for each of category 2's possible mean values, calculate the distance between the category 1 and 2 distributions, and treat that as the "effect size" of the difference between them.
#
# N.B.: Ideally, you'd specify a range of possible distances between the categories and solve for category 2's vector for each distance. But I couldn't find a straightforward way to do this, so instead just sampling a lot of possible distributions for category 2.
#
# ###### calculating distance between distributions:
# To estimate the size of this difference, calculate the euclidean distance between the means and then scale that by the standard deviation of the distributions (intuitively: a larger standard deviation = closer distributions if you don't change the means)
# +
## Set up possible effect sizes
# multivariate means for each category
popMean1 = 1 # fixed at 1
cat1Means = np.full((nVox), popMean1) # vector of means for this category length = # voxels (dimensions)
# range of possible means for category 2
popMean2Range = np.arange(popMean1+.1, 3+.1, .1)
# function to calculate the distance between the categories' multivariate distributions
def calcDist(vec1, vec2):
# numerator: euclidean distance between mean vectors:
d = distance.euclidean(vec1, vec2)
# denominator: variance of the distributions (which is set up to be the same for both distributions):
var = sd**2
return d/var
# -
# #### Set up the procedure for selecting a reasonable suggestion for the minimum amount of data
#
# ##### general procedure:
# ...compare classifier accuracy to baseline at every sample size (independent-samples t-test)
#
# ...figure out which sample sizes are significantly > baseline
#
# ...figure out the smallest significant sample size that's not just a blip -- some number of samples
# above it also have to be significant
#
# ...if there are no cases like this, default to suggesting the highest considered sample size
# +
## Set up procedure for selecting a "good" minimum amount of data
# decision boundary for considering classification accuracies to be significant:
maxP = 0.001;
# minimum size of the "run" of significant results to make sure the recommendation isn't just a blip:
runLength = 5;
# helper function
def findSuggestion(intactDict, scrambledDict, maxP, runLength):
# get all the sample sizes we considered:
samples = list(intactDict.keys());
# loop through the samples:
sigOrNot = {}; # store whether each sample size passes the significance threshold vs. baseline
for s in samples:
# get the intact accuracies:
intactAcc = intactDict[s]; # all classification accuracies for this sample size
scramAcc = scrambledDict[s]; # all baseline accuracies for this sample size
# compare with independent-samples t-test:
[t, p] = stats.ttest_ind(intactAcc, scramAcc)
# assess whether it passes the significance threshold:
if p < maxP:
sigOrNot[s] = 1;
else:
sigOrNot[s] = 0;
# find the smallest sample size that's significant and not a blip:
sigSamples = [k for k in sigOrNot.keys() if sigOrNot[k] == 1] # get all the significant samples
suggestion = max(samples) # default to highest sample size we considered
done = 0;
while not done and len(sigSamples) > 0:
# find smallest value that's significant:
currSuggestion = min(sigSamples)
# check if it's part of a run:
runMembers = range(currSuggestion, currSuggestion+runLength+1) # what would the next n sample sizes be?
if all(item in sigSamples for item in runMembers): # next n sample sizes are also significant
suggestion = currSuggestion; # update the suggestion
done = 1;
break
else: # run didn't hold -- move on to the next significant option
# remove this suggestion from the list of possibilities, and start again
sigSamples.remove(currSuggestion)
return suggestion
# +
## loop through the possible effect sizes
# on each loop, use a different distribution for category 2
# set up a dictionary to store the suggested sample size for each effect size
suggestionDict = {}
# display progress:
from IPython.display import clear_output
clear_output(wait=True)
mCount = 1;
for m in popMean2Range:
print('\n\ncomparison ' + str(mCount) + '/' + str(len(popMean2Range)) + '...')
# get the multivariate means vector for category 2
cat2Means = np.full((nVox), m)
# loop through the sample sizes:
accuracyDict = {};
accuracyDictScram = {};
for n in nRange:
print('Estimating classification accuracy with ' + str(n) + ' samples / category')
currAccuracy = [] # set up an array to store classification accuracy for this sample size
currAccuracyScram = [] # set up an array to store classification accuracy for scrambled data with this sample size
# loop through the iterations for this sample size
for i in range(nIters):
# sample from both categories:
cat1Patterns = np.random.multivariate_normal(cat1Means, covMat, n) # category 1: sample size x voxels
cat2Patterns = np.random.multivariate_normal(cat2Means, covMat, n) # category 2: sample size x voxels
# put the categories together:
allCatsPatterns = np.concatenate((cat1Patterns, cat2Patterns)) # sample size * 2 categories x voxels
# set up the labels for the classifier:
labels = []
for c, s in itertools.product(range(nCats), range(n)):
labels.append(c)
# make sure the data are in the expected size:
assert len(labels) == allCatsPatterns.shape[0], 'mismatch between labels length and patterns size'
assert len(labels) == n*nCats, 'unexpected label length (should be sample size x categories)'
assert allCatsPatterns.shape[1] == nVox, 'unexpected patterns size (should be sample size*categories x voxels)'
# partition the data into train / test subsets:
(trainPatterns, testPatterns, trainLabels, testLabels) = train_test_split(allCatsPatterns, labels, test_size = testProp, random_state=42)
# train the classifier:
model = LinearSVC(dual=False) # setting dual = false helps the algorithm converge faster
model.fit(trainPatterns, trainLabels)
# test the classifier:
predictions = model.predict(testPatterns) # predicted labels for the held-out data
# calculate accuracy (% of testing data that was correctly classified)
comparisonVec = []
comparisonVec = [predictions[e] == testLabels[e] for e in range(len(predictions))]
comparisonVec = list(map(int, comparisonVec))
currAccuracy.append(sum(comparisonVec)/len(comparisonVec))
# --------------------------------------------------------------------
# Scrambled baseline
scramPatterns = dc(allCatsPatterns)
np.random.shuffle(scramPatterns) # shuffle the rows, keeping the labels intact
(trainScramPatterns, testScramPatterns, trainScramLabels, testScramLabels) = train_test_split(scramPatterns, labels, test_size = testProp)
modelScram = LinearSVC(dual=False) # set up the SVM
modelScram.fit(trainScramPatterns, trainScramLabels) # train the SVM
predictionsScram = model.predict(testScramPatterns) # test the SVM
comparisonVecScram = []
comparisonVecScram = [predictionsScram[e] == testScramLabels[e] for e in range(len(predictionsScram))]
comparisonVecScram = list(map(int, comparisonVecScram))
currAccuracyScram.append(sum(comparisonVecScram)/len(comparisonVecScram))
# after looping through the iterations for this sample size, add the array to a dictionary
accuracyDict[n] = currAccuracy
accuracyDictScram[n] = currAccuracyScram
assert len(currAccuracy) == nIters, 'accuracy isn''t being stored with expected length.'
assert len(currAccuracyScram) == nIters, 'scrambled accuracy isn''t being stored with expected length.'
# after looping through all the sample sizes, result = a dictionary with classification
# accuracies across iterations for every sample size, for these 2 distributions.
# figure out the distance between these distributions:
currDist = round(calcDist(cat1Means, cat2Means), ndigits = 3)
# figure out & record the recommended sample size for these distributions:
suggestionDict[currDist] = findSuggestion(accuracyDict, accuracyDictScram, maxP, runLength)
mCount = mCount + 1; # for progress message
# outcome: dictionary with suggested sample size (values) for each distance between distributions (keys)
print('\n\nDONE with simulations!!')
# +
## plot the results
# convert dict -> dataframe
suggestionDF = pd.DataFrame(list(suggestionDict.items()), columns = ['distance', 'suggestedN'])
# plot it:
fig = plt.figure(figsize=(25,10))
sns.set(rc={"lines.linewidth": 3, "ytick.labelsize": 18.0, "xtick.labelsize": 18.0})
sns.set_style("ticks")
ax = sns.lineplot(x = 'distance', y = 'suggestedN',
data = suggestionDF,
markers = True,
color = "k")
# make it pretty:
sns.despine()
ax.set_xlabel('Distance between Categories (Euclidean dist / sd1*sd2)',fontsize=25);
ax.set_ylabel('Suggested amount of data per category',fontsize=25);
# save the plot to pwd:
figName = 'SummaryDataGuidelines.png'
fig.savefig(figName, bbox_inches='tight')
| Simulating-MinimumData/.ipynb_checkpoints/SummarizingSuggestions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
data_json = '[{"name":"KUKA Towards Depot 90°-2","start_pose":{"position":{"y":-0.5904946954673478,"x":6.386504322191972,"z":0},"orientation":{"y":0,"x":0,"z":-0.7142179050751947,"w":0.6999234130031659}},"test_measurement":{"position":{"x":1.35981965,"y":4.550434,"z":0.475029916},"rotation":{"x":0.46835850446649785,"y":0.06052739552682744,"z":178.00806229979216},"time":1580409190461}}, {"name":"KUKA Towards Depot 90°-3","start_pose":{"position":{"y":-0.014605581450524596,"x":6.412817545707348,"z":0},"orientation":{"y":0,"x":0,"z":-0.7131689960136938,"w":0.7009921419850725}},"test_measurement":{"position":{"x":1.37904572,"y":4.54490042,"z":0.475152075},"rotation":{"x":0.4606574661773085,"y":0.016858159184107747,"z":177.71090808976157},"time":1580409224666}}, {"name":"KUKA Towards Depot 90°-4","start_pose":{"position":{"y":-0.5675867813066082,"x":6.387264148153445,"z":0},"orientation":{"y":0,"x":0,"z":-0.7172991275878251,"w":0.6967653561721802}},"test_measurement":{"position":{"x":1.37464106,"y":4.54426861,"z":0.47514835},"rotation":{"x":0.4483927409446399,"y":0.042143804638552586,"z":177.85611791151607},"time":1580409255360}}, {"name":"KUKA Towards Depot 90°-5","start_pose":{"position":{"y":-0.5628726451445587,"x":6.389237113394578,"z":0},"orientation":{"y":0,"x":0,"z":-0.7235188128649677,"w":0.6903046627616446}},"test_measurement":{"position":{"x":1.36009407,"y":4.54369164,"z":0.47524184},"rotation":{"x":0.4328545941913898,"y":0.030626121341194214,"z":177.77322673574878},"time":1580409320162}}, {"name":"KUKA Towards Depot 90°-6","start_pose":{"position":{"y":-0.4949599951853983,"x":6.344046021242352,"z":0},"orientation":{"y":0,"x":0,"z":-0.720949752449484,"w":0.6929873407523602}},"test_measurement":{"position":{"x":1.358537,"y":4.55432034,"z":0.475241572},"rotation":{"x":0.45580680456698297,"y":0.03097281680758517,"z":177.68588227664264},"time":1580409375565}}, {"name":"KUKA Towards Depot 90°-7","start_pose":{"position":{"y":-0.5721592611531663,"x":6.389888607159058,"z":0},"orientation":{"y":0,"x":0,"z":-0.7212070151167328,"w":0.6927195979228628}},"test_measurement":{"position":{"x":1.36363125,"y":4.55193663,"z":0.475120157},"rotation":{"x":0.46653857921780684,"y":0.010874810084524103,"z":177.27906254929835},"time":1580409442367}}, {"name":"KUKA Towards Depot 90°-8","start_pose":{"position":{"y":-0.5467275211949677,"x":6.370180508813469,"z":0},"orientation":{"y":0,"x":0,"z":-0.7167208581219194,"w":0.6973601734634546}},"test_measurement":{"position":{"x":1.356997,"y":4.552256,"z":0.475110769},"rotation":{"x":0.5049252028486305,"y":0.0560602717167606,"z":177.73019654397768},"time":1580409498170}}, {"name":"KUKA Towards Depot 90°-9","start_pose":{"position":{"y":-0.5688259564111965,"x":6.38886799290817,"z":0},"orientation":{"y":0,"x":0,"z":-0.7213934235747815,"w":0.6925254713171608}},"test_measurement":{"position":{"x":1.35681117,"y":4.57303572,"z":0.4751747},"rotation":{"x":0.5004950247590839,"y":0.04680330766350372,"z":177.61633729051357},"time":1580409553974}}, {"name":"KUKA Towards Depot 90°-10","start_pose":{"position":{"y":2.1547647551257687,"x":6.506023700571905,"z":0},"orientation":{"y":0,"x":0,"z":-0.7127854323796808,"w":0.7013821550248562}},"test_measurement":{"position":{"x":1.35769653,"y":4.553309,"z":0.4751895},"rotation":{"x":0.4832005055329356,"y":0.0726157097798393,"z":177.90727602839945},"time":1580409620179}}, {"name":"KUKA Towards Depot 90°-11","start_pose":{"position":{"y":2.1547647551257687,"x":6.506023700571905,"z":0},"orientation":{"y":0,"x":0,"z":-0.7127854323796808,"w":0.7013821550248562}},"test_measurement":{"position":{"x":1.35764408,"y":4.553703,"z":0.475097328},"rotation":{"x":0.46237504531015516,"y":0.08793366297363583,"z":178.0367490943062},"time":1580409641675}}, {"name":"KUKA Towards Depot 90°-12","start_pose":{"position":{"y":-0.6205154338998624,"x":6.333185857954587,"z":0},"orientation":{"y":0,"x":0,"z":-0.7210215939843798,"w":0.69291259261773}},"test_measurement":{"position":{"x":1.35842013,"y":4.55775261,"z":0.474987268},"rotation":{"x":0.45084259106170593,"y":0.043698799922747805,"z":177.94758780488087},"time":1580409700378}}, {"name":"KUKA Towards Depot 90°-13","start_pose":{"position":{"y":-0.6205154338998624,"x":6.333185857954587,"z":0},"orientation":{"y":0,"x":0,"z":-0.7210215939843798,"w":0.69291259261773}},"test_measurement":{"position":{"x":1.35831249,"y":4.55734062,"z":0.474966556},"rotation":{"x":0.4482528287883604,"y":0.03737664609289442,"z":177.93476070961958},"time":1580409710578}}, {"name":"KUKA Towards Depot 90°-14","start_pose":{"position":{"y":-0.5637301216358668,"x":6.3846671396947725,"z":0},"orientation":{"y":0,"x":0,"z":-0.7123329120925687,"w":0.7018417359702406}},"test_measurement":{"position":{"x":1.35588419,"y":4.551616,"z":0.4749562},"rotation":{"x":0.4801888189957831,"y":0.057864815699605894,"z":177.85199247725737},"time":1580409777381}}, {"name":"KUKA Towards Depot 90°-15","start_pose":{"position":{"y":-0.6222804553527072,"x":6.346927276675518,"z":0},"orientation":{"y":0,"x":0,"z":-0.7201084903356946,"w":0.6938614862827068}},"test_measurement":{"position":{"x":1.36496365,"y":4.57771254,"z":0.4749693},"rotation":{"x":0.45748521683380783,"y":0.04997576378030936,"z":176.89993240884928},"time":1580409821984}}, {"name":"KUKA Towards Depot 90°-16","start_pose":{"position":{"y":-0.5721149250150794,"x":6.37670944681134,"z":0},"orientation":{"y":0,"x":0,"z":-0.6972000603402925,"w":0.7168766113226827}},"test_measurement":{"position":{"x":1.35840452,"y":4.5448966,"z":0.475048244},"rotation":{"x":0.5205872535022928,"y":-0.037483254406863474,"z":176.95568041285492},"time":1580409884588}}, {"name":"KUKA Towards Depot 90°-17","start_pose":{"position":{"y":1.8677258577392488,"x":6.474304341902348,"z":0},"orientation":{"y":0,"x":0,"z":-0.7131742684792033,"w":0.7009867778918183}},"test_measurement":{"position":{"x":1.35245192,"y":4.55712652,"z":0.475090772},"rotation":{"x":0.45153521493222754,"y":0.04661692487446303,"z":177.61861857369638},"time":1580409947093}}, {"name":"KUKA Towards Depot 90°-18","start_pose":{"position":{"y":-0.5758358072883698,"x":6.388054025809992,"z":0},"orientation":{"y":0,"x":0,"z":-0.7151855693915007,"w":0.6989346187835848}},"test_measurement":{"position":{"x":1.36486971,"y":4.58125162,"z":0.47494483},"rotation":{"x":0.4598248767896545,"y":0.07028662194297157,"z":177.38315463224936},"time":1580409985690}}, {"name":"KUKA Towards Depot 90°-19","start_pose":{"position":{"y":2.6789731953512357,"x":6.513438979265583,"z":0},"orientation":{"y":0,"x":0,"z":-0.7118666620844967,"w":0.7023146413201686}},"test_measurement":{"position":{"x":1.36958027,"y":4.546706,"z":0.4748954},"rotation":{"x":0.47721741990256306,"y":0.008173218471259482,"z":177.49865313111425},"time":1580410123616}}, {"name":"KUKA Towards Depot 90°-20","start_pose":{"position":{"y":2.98496784455736,"x":6.499466227341441,"z":0},"orientation":{"y":0,"x":0,"z":-0.7091634072383601,"w":0.705044155946335}},"test_measurement":{"position":{"x":1.35701966,"y":4.55098629,"z":0.475085527},"rotation":{"x":0.4916317337330816,"y":0.03363304875727952,"z":177.4920824891327},"time":1580410232001}}, {"name":"KUKA Towards Depot 90°-21","start_pose":{"position":{"y":-0.5719914813624838,"x":6.385066265256816,"z":0},"orientation":{"y":0,"x":0,"z":-0.684282121919015,"w":0.7292173733681956}},"test_measurement":{"position":{"x":1.35030782,"y":4.552039,"z":0.474936873},"rotation":{"x":0.4925264885143533,"y":0.03251878410248194,"z":176.78306787215035},"time":1580410853127}}, {"name":"KUKA Towards Depot 90°-22","start_pose":{"position":{"y":-0.5926681018297302,"x":6.373007006192079,"z":0},"orientation":{"y":0,"x":0,"z":-0.683605090134248,"w":0.7298520951141722}},"test_measurement":{"position":{"x":1.34949613,"y":4.551984,"z":0.4750338},"rotation":{"x":0.464424582308835,"y":0.05187212178065976,"z":177.66000952006664},"time":1580411768466}}, {"name":"KUKA Towards Depot 90°-23","start_pose":{"position":{"y":-0.5715236062628867,"x":6.381745081532661,"z":0},"orientation":{"y":0,"x":0,"z":-0.7145230223773834,"w":0.6996119284951403}},"test_measurement":{"position":{"x":1.36539435,"y":4.546565,"z":0.475120336},"rotation":{"x":0.5027816973880102,"y":0.020013514815088562,"z":177.26991009581053},"time":1580411871771}}]'
manual_json = '{"name":"KUKA Towards Depot 90°","manual_measurement":{"position":{"x":1.35702121,"y":4.55015,"z":0.474951327},"rotation":{"x":0.502097131006275,"y":0.08358308593638894,"z":178.86385768203806},"time":1580409112053},"goal_pose":{"position":{"x":6.387662663950927,"y":-0.6078421716668322,"z":0.1016},"orientation":{"x":0,"y":0,"z":-0.7177715670049098,"w":0.696278663754187}}}'
data = json.loads(data_json)
manual = json.loads(manual_json)
test_measurements = data
manual_measurement = manual['manual_measurement']
# -
x = []
for measurement in test_measurements:
x.append(measurement['test_measurement']['position']['x'])
y = []
for measurement in test_measurements:
y.append(measurement['test_measurement']['position']['y'])
rot = []
for measurement in test_measurements:
rot.append(measurement['test_measurement']['rotation']['z'])
# +
import math
rot_errors = []
for measurement in test_measurements:
reached_rot = measurement['test_measurement']['rotation']['z']
manual_rot = manual_measurement['rotation']['z']
rot_errors.append(math.fabs(reached_rot)-math.fabs(manual_rot))
# -
gt_x = manual_measurement['position']['x']
gt_y = manual_measurement['position']['y']
xx = [xx-gt_x for xx in x ]
yy = [yy-gt_y for yy in y ]
def mscatter(x,y,ax=None, m=None, **kw):
import matplotlib.markers as mmarkers
if not ax: ax=plt.gca()
sc = ax.scatter(x,y,**kw)
if (m is not None) and (len(m)==len(x)):
paths = []
for marker in m:
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
paths.append(path)
sc.set_paths(paths)
return sc
# +
import matplotlib.markers as mmarkers
markers = []
for rot in rot_errors:
t = mmarkers.MarkerStyle(marker='2')
t._transform = t.get_transform().rotate_deg(rot)
markers.append(t)
# +
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10, 10))
scatter = mscatter(xx, yy, c='#0000ff', s=200, m=markers)
plt.title('OptiTrack Measurements at Pose 2 for Odometry-Based Method [m]')
ax.plot(0, 0, "or", markersize=30, marker='2')
ax.legend()
ax.grid(True)
ax.set_xlabel('x', fontsize=15)
ax.set_ylabel('y', fontsize=15)
ax.set_ylim(bottom=-0.04,top=0.1)
ax.set_xlim(left=-0.15,right=0.15)
plt.show()
fig.savefig('Pose2-scatter_odom.pdf')
# -
def mscatter(axes,x,y,ax=None, m=None, **kw):
import matplotlib.markers as mmarkers
if not axes: axes=plt.gca()
sc = axes.scatter(x,y,**kw)
if (m is not None) and (len(m)==len(x)):
paths = []
for marker in m:
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
paths.append(path)
sc.set_paths(paths)
return sc
# +
import matplotlib.markers as mmarkers
markers = []
for rot in rot_errors:
t = mmarkers.MarkerStyle(marker='2')
t._transform = t.get_transform().rotate_deg(rot)
markers.append(t)
# +
import numpy as np
fig = plt.figure(figsize=(8, 8))
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
scatter = mscatter(ax_scatter, xx, yy, c='#0000ff', s=200, m=markers)
ax_scatter.plot(0, 0, "or", markersize=30, marker='2')
ax_scatter.grid(True)
ax_scatter.set_xlabel('x', fontsize=15)
ax_scatter.set_ylabel('y', fontsize=15)
ax_scatter.set_ylim(bottom=-0.04,top=0.095)
ax_scatter.set_xlim(left=-0.14,right=0.14)
binwidthx = 0.01
binwidthy = 0.005
bins_x = np.arange(-0.14, 0.14 + binwidthx, binwidthx)
bins_y = np.arange(-0.04, 0.095 + binwidthy, binwidthy)
ax_histx.hist(xx, bins=bins_x)
ax_histy.hist(yy, bins=bins_y, orientation='horizontal')
ax_histx.set_xlim(ax_scatter.get_xlim())
ax_histx.set_ylim(bottom=0,top=15)
ax_histy.set_ylim(ax_scatter.get_ylim())
ax_histy.set_xlim(left=0, right=20)
fig.savefig('Pose2-scatterHist_odom.pdf')
| stats/Plot_Odom.Backwards-Method-Straight-KuKa-[30012020].ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Start each lab by giving an overview of what to expect to learn. Avoid using `/edit/` in your notebook as it will change to a fixed path on remote machine.
#
# **EXAMPLE**
#
# Let's execute the cell below to display information about the GPUs running on the server by running the pgaccelinfo command, which ships with the PGI compiler that we will be using. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell.
# !pgaccelinfo
# # Heading1
#
# ### Heading3
# Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus quis sodales neque, ac tincidunt justo. Cras facilisis eget magna non mollis. Donec rutrum vulputate dolor non efficitur. Maecenas porttitor imperdiet accumsan. Etiam ac massa metus. Cras malesuada felis tellus, at pretium velit ullamcorper et. Sed feugiat egestas mi ut vehicula. Nam vel interdum felis. Nam ullamcorper velit ut blandit facilisis. Integer cursus erat ut felis viverra maximus. Phasellus leo lectus, dapibus ut faucibus non, dignissim porttitor neque. Fusce sit amet maximus justo. Vivamus nec tempus nisi. Donec interdum eros et augue mollis, vitae semper arcu semper.
#
# - **Lorem** ipsum dolor sit amet
# - **Lorem** ipsum dolor sit amet
# - **Lorem** ipsum dolor sit amet
#
# <img src="images/Nsight Diagram.png" width="80%" height="80%">
# *EXAMPLE image: ipsum dolor sit amet*
#
# #### Heading4
# Proin aliquet, odio placerat euismod accumsan, risus est ultrices nisi, quis porttitor libero sapien ac nunc. Donec fringilla eros at dui sagittis, in tempus magna tincidunt. Praesent sem lacus, dapibus id leo ut, laoreet tempus nisi. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Ut vehicula est non metus aliquam, faucibus blandit risus tempus. Aenean ultrices turpis nec pretium finibus. Duis ac nisl dui. Nulla bibendum tincidunt ante, ornare convallis sem maximus sit amet. Vivamus vitae velit sed felis feugiat gravida. Suspendisse sagittis non elit consequat auctor. Nunc vulputate posuere neque, vel vulputate diam viverra id. Maecenas faucibus mi arcu, non interdum sem condimentum nec. Curabitur dictum lacus vel felis commodo, et semper urna congue. Donec at ex eros. Nam euismod posuere aliquam. Quisque et faucibus elit.
#
#
# **EXAMPLE CODE**
#
# ```cpp
# nvtxRangePushA("init");
# initialize(A, Anew, m, n);
# nvtxRangePop();
#
# printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
#
# double st = omp_get_wtime();
# int iter = 0;
#
# nvtxRangePushA("while");
# while ( error > tol && iter < iter_max )
# {
# nvtxRangePushA("calc");
# error = calcNext(A, Anew, m, n);
# nvtxRangePop();
#
# nvtxRangePushA("swap");
# swap(A, Anew, m, n);
# nvtxRangePop();
#
# if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
#
# iter++;
# }
# nvtxRangePop();
# ```
#
# **EXAMPLE LINK**
#
# Detailed NVTX documentation can be found under the __[CUDA Profiler user guide](https://docs.nvidia.com/cuda/profiler-users-guide/index.html#nvtx)__.
# -----
#
# # <div style="text-align: center ;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em">[NEXT](appName_fortran_lab1.ipynb)</div>
#
# -----
# # Links and Resources
#
# Include all the necessary links and resources here including links to download a tool, links for further reading, any slack channel or mailing list you would like them to join.
#
# **EXAMPLE**
#
# Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community.
#
# ---
#
# ## Licensing
#
# This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).
| misc/jupyter_lab_template/appName/English/Fortran/jupyter_notebook/appName_fortran_lab1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import psycopg2
import pandas as pd
from dotenv import load_dotenv
import numpy as np
load_dotenv()
# %matplotlib notebook
# Test out RDS connection.
conn = psycopg2.connect(host=os.getenv("POSTGRES_HOST"),
dbname=os.getenv("POSTGRES_NAME"),
user=os.getenv("POSTGRES_USER"),
port=os.getenv("POSTGRES_PORT"),
password=<PASSWORD>("<PASSWORD>"))
curs = conn.cursor()
curs.execute("""SELECT table_name FROM information_schema.tables WHERE table_schema='public'""")
for _ in curs.fetchall():
print(_[0])
# -
# The goal is to calculate (or really, set up a databse to calculate) stop to stop durations for all bus routes. The idea is that this average stop to stop time for various filters (time of day, day of week).
#
# To do so, The idea here is to take every trip from `bus_trips`, grab the corresponding stops from that trip, then see how they can be aggregated.
#
# The `stop_types` to make note of here are 0 and 5 for 'stop' and 4 and 6 for 'drive thru'
#
# Given that our database could reflect some changes in stops, I plan to look at the data we do have rather than the prescribed routes.
#
# For now, let's start with something I know well, the 35!
# These are the columns we have access to.
curs = conn.cursor()
curs.execute("""SELECT column_name FROM information_schema.columns WHERE table_name = 'bus_all_stops'""")
cols = [c[0] for c in curs.fetchall()]
for c in cols:
print(c)
# These are the columns we have access to.
curs = conn.cursor()
curs.execute("""SELECT column_name FROM information_schema.columns WHERE table_name = 'bus_trips'""")
cols = [c[0] for c in curs.fetchall()]
for c in cols:
print(c)
# +
query = """SELECT * FROM public.bus_trips WHERE line_id = 35 ORDER BY event_no_trip"""
curs.execute(query)
trips = [{l:v for l,v in zip(cols, row)} for row in curs.fetchall()]
print(len(trips))
# -
# cool, 14,560 trips
# +
event_no_trips = ','.join([str(trip['event_no_trip']) for trip in trips])
query = (f"SELECT event_no_trip, pattern_direction, line_id, stop_id, stop_type, act_arr_time, act_dep_time "
f"FROM bus_all_stops "
f"WHERE event_no_trip in ({event_no_trips}) "
f"AND stop_type in (0, 5, 4, 6) ORDER BY event_no_trip, act_arr_time")
df = pd.read_sql(query, conn)
df['prev_stop_act_arr_time'] = df['act_arr_time'].shift(1)
df['cur_stop_act_arr_time'] = df['act_arr_time']
df['elapsed_time'] = (df['cur_stop_act_arr_time'] - df['prev_stop_act_arr_time'])
df['elapsed_time_seconds'] = df['elapsed_time'].apply(lambda x: x.seconds).astype('Int64')
df['prev_stop_id'] = df['stop_id'].shift(1).astype('Int64')
df['cur_stop_id'] = df['stop_id']
df.loc[df[df['event_no_trip'] != df['event_no_trip'].shift(1)].index,
['elapsed_time', 'prev_stop_id', 'prev_stop_act_arr_time', 'elapsed_time_seconds']] = np.nan
df
# -
B = df[(df['prev_stop_id'] == 2213) & (df['cur_stop_id'] == 2186)]
B
B['elapsed_time_seconds'] = B['elapsed_time_seconds'].astype('int')
B[B['elapsed_time_seconds'] < 100].hist('elapsed_time_seconds', bins=10)
# +
query = (f"SELECT event_no_trip, pattern_direction, line_id, stop_id, stop_type, act_arr_time "
f"FROM bus_all_stops "
f"WHERE stop_type in (0, 5, 4, 6) ORDER BY event_no_trip, act_arr_time")
df = pd.read_sql(query, conn)
df['prev_stop_act_arr_time'] = df['act_arr_time'].shift(1)
df['elapsed_time_seconds'] = (df['act_arr_time'] - df['prev_stop_act_arr_time']).apply(lambda x: x.seconds)
df['prev_stop_id'] = df['stop_id'].shift(1)
df.loc[df[df['event_no_trip'] != df['event_no_trip'].shift(1)].index,
['prev_stop_id', 'prev_stop_act_arr_time', 'elapsed_time_seconds']] = np.nan
df.head()
# -
df.info()
df.describe()
# +
df.dropna(inplace=True)
data_types = {
'line_id': 'uint16',
'event_no_trip': 'uint32',
'stop_id': 'uint16',
'stop_type': 'uint8',
'elapsed_time_seconds': 'uint16',
'prev_stop_id': 'uint16',
}
for col, dtype in data_types.items():
df[col] = df[col].astype(dtype)
df = df[['event_no_trip', 'pattern_direction', 'line_id', 'stop_type', 'stop_id', 'act_arr_time',
'prev_stop_id', 'prev_stop_act_arr_time', 'elapsed_time_seconds']]
# -
df.info()
df.describe()
df.to_csv('/users/jbeyer/desktop/stop_to_stop.csv', index=False, chunksize=10000)
df.shape
df[(df['stop_id'] == 8015) & (df['prev_stop_id'] == 7968)].hist('elapsed_time_seconds', bins=50)
df.to_parquet('/users/jbeyer/desktop/stop_to_stop.parquet.gzip', compression='gzip')
# These are the columns we have access to.
curs = conn.cursor()
curs.execute("""SELECT column_name FROM information_schema.columns WHERE table_name = 'tm_route_stops'""")
cols = [c[0] for c in curs.fetchall()]
for c in cols:
print(c)
# +
query = """SELECT rte, dir, stop_seq, stop_id, stop_name FROM public.tm_route_stops ORDER BY rte, dir, stop_seq"""
df = pd.read_sql(query, conn)
df.to_parquet('../data/interim/TOAD/route_stops.parquet.gzip', compression='gzip')
# -
df[(df['rte'] == 35) & (df['dir'] == 0)]
pwd
| notebooks/4.0-jab-stop-segment-timing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tl_env
# language: python
# name: tl-env
# ---
# +
import pandas as pd
import numpy as np
import os
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
# pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# -
HOME_DIR = '/Users/summ7t/dev/novartis/table-linker/t2dv2-candidates-april-28/dev'
# ### Generate lof-graph-embedding-score for any table
#
# Required datasets
# - candidate file
# - candidate feature file
# - graph_embedding_complex.tsv (generated and stored during candidate generation)
#
# Script used `lof-script.sh`
#
# ```
# filename=$1
# tsv_postfix=_graph_embedding_complex
#
# tl smallest-qnode-number train-candidates/candidates-$filename.csv \
# / align-page-rank \
# / string-similarity -i --method symmetric_monge_elkan:tokenizer=word -o monge_elkan \
# / string-similarity -i --method jaccard:tokenizer=word -c kg_descriptions context -o des_cont_jaccard \
# / normalize-scores -c des_cont_jaccard \
# / vote-by-classifier --prob-threshold 0.995 --model weighted_lr.pkl \
# > model-voted/$filename.csv
#
# tl score-using-embedding model-voted/$filename.csv \
# --column-vector-strategy centroid-of-lof \
# --lof-strategy ems-mv \
# -o graph-embedding-score \
# --embedding-file train-graph-embeddings/$filename$tsv_postfix.tsv \
# --embedding-url http://ckg07:9200/wikidatadwd-augmented/ \
# > lof-score/$filename.csv
# ```
#
# cmd: `bash {HOME_DIR}/lof-script.sh {fid}`
#
# output: lof-score/$filename.csv contains `is_lof` and `graph-embedding-score` (centroid-of-lof)
# !mkdir -p $HOME_DIR/model-voted
# !mkdir -p $HOME_DIR/lof-score
# !mkdir -p $HOME_DIR/merged-lof-score
# !mkdir -p $HOME_DIR/final-features
# +
# list all files in candidates dir
file_names = []
file_ids = []
for (dirpath, dirnames, filenames) in os.walk(f'{HOME_DIR}/dev-candidates/'):
for fn in filenames:
if "csv" not in fn:
continue
abs_fn = dirpath + fn
assert os.path.isfile(abs_fn)
if os.path.getsize(abs_fn) == 0:
continue
file_names.append(abs_fn)
file_ids.append(fn.split('.csv')[0].split('candidates-')[1])
len(file_names), file_ids[:3]
# -
for idx, fid in enumerate(file_ids):
print(f"Generating score for {idx}th file: {fid}...")
os.system(f'bash {HOME_DIR}/lof-script.sh {fid}')
assert os.path.isfile(f'{HOME_DIR}/model-voted/{fid}.csv'), f"Something wrong with model-voted result: {idx}th file: {fid}"
assert os.path.isfile(f'{HOME_DIR}/lof-score/{fid}.csv'), f"Something wrong with lof-score result: {idx}th file: {fid}"
# check model-voted and lof-score files
fid = '14380604_4_3329235705746762392'
model_voted_df = pd.read_csv(f'{HOME_DIR}/model-voted/{fid}.csv')
model_voted_df[model_voted_df['vote_by_classifier'] > 0]
fid = '14380604_4_3329235705746762392'
score_df = pd.read_csv(f'{HOME_DIR}/lof-score/{fid}.csv')
score_df.sort_values(by=['graph-embedding-score'], ascending=False).head(10)
# merge lof candidate (graph-embedding-score) with candidate feature file
for idx, fid in enumerate(file_ids):
print(f"Merging embedding score for {idx}th file: {fid}...")
features_df = pd.read_csv(f'{HOME_DIR}/dev-features/{fid}.csv')
lof_score_df = pd.read_csv(f'{HOME_DIR}/lof-score/{fid}.csv')
lof_score_df.rename(columns = {'graph-embedding-score':'lof-graph-embedding-score'}, inplace = True)
trimmed_lof_score_df = lof_score_df.loc[:, ['column', 'row', 'kg_id', 'method', 'lof-graph-embedding-score', 'is_lof']]
# merge two df on row, column, kg_id
final_df = pd.merge(features_df, trimmed_lof_score_df, left_on=['column', 'row', 'kg_id', 'method'], right_on = ['column', 'row', 'kg_id', 'method'])
final_df.drop_duplicates(inplace=True)
assert len(final_df) == len(features_df), f"{len(features_df)}, {len(final_df)}"
final_df.to_csv(f"{HOME_DIR}/merged-lof-score/{fid}.csv", index=False)
assert os.path.isfile(f'{HOME_DIR}/merged-lof-score/{fid}.csv'), f"Something wrong with merged score result: {idx}th file: {fid}"
# check merged train feature files
fid = '14380604_4_3329235705746762392'
merged_score_df = pd.read_csv(f'{HOME_DIR}/merged-lof-score/{fid}.csv')
# merged_score_df.sort_values(by=['lof-graph-embedding-score'], ascending=False).head(10)
merged_score_df[merged_score_df['is_lof'] == 1]
# Generate lof-reciprocal-rank
for idx, fid in enumerate(file_ids):
print(f"generating final feature for {idx}th file: {fid}")
class_count_f = f'{HOME_DIR}/dev-class-count/{fid}_class_count.tsv'
property_count_f = f'{HOME_DIR}/dev-prop-count/{fid}_prop_count.tsv'
merged_lof_f = f'{HOME_DIR}/merged-lof-score/{fid}.csv'
final_features_f = f'{HOME_DIR}/final-features/{fid}.csv'
script = f"""
tl generate-reciprocal-rank {merged_lof_f} \
-c lof-graph-embedding-score \
-o lof-reciprocal-rank \
/ compute-tf-idf \
--feature-file {class_count_f} \
--feature-name class_count \
--singleton-column is_lof \
-o lof_class_count_tf_idf_score \
/ compute-tf-idf \
--feature-file {property_count_f} \
--feature-name property_count \
--singleton-column is_lof \
-o lof_property_count_tf_idf_score \
> {final_features_f}
"""
os.system(script)
assert os.path.isfile(final_features_f), f"Something wrong with final feature result: {idx}th file: {fid}"
# +
# / compute-tf-idf \
# --feature-file {class_count_f} \
# --feature-name class_count \
# --singleton-column singleton \
# -o cos_class_count_tf_idf_score \
# / compute-tf-idf \
# --feature-file {property_count_f} \
# --feature-name property_count \
# --singleton-column singleton \
# -o cos_property_count_tf_idf_score \
# -
# check final feature files
fid = '28086084_0_3127660530989916727'
final_feature_df = pd.read_csv(f'{HOME_DIR}/final-features/{fid}.csv')
final_feature_df[final_feature_df['evaluation_label'] == 1].loc[:, [
'property_count_tf_idf_score', 'lof_property_count_tf_idf_score',
'class_count_tf_idf_score', 'lof_class_count_tf_idf_score'
]]
len(final_feature_df.groupby(['column', 'row']))
len(final_feature_df[final_feature_df['is_lof'] == 1]), \
len(final_feature_df[(final_feature_df['is_lof'] == 1) & (final_feature_df['evaluation_label'] == 1)]), \
len(final_feature_df[final_feature_df['singleton'] == 1]), \
len(final_feature_df[(final_feature_df['singleton'] == 1) & (final_feature_df['evaluation_label'] == 1)])
# min-max scaling on tfidf score
cos_class_tfidf_max = final_feature_df['class_count_tf_idf_score'].max()
cos_class_tfidf_min = final_feature_df['class_count_tf_idf_score'].min()
cos_property_tfidf_max = final_feature_df['property_count_tf_idf_score'].max()
cos_property_tfidf_min = final_feature_df['property_count_tf_idf_score'].min()
lof_class_tfidf_max = final_feature_df['lof_class_count_tf_idf_score'].max()
lof_class_tfidf_min = final_feature_df['lof_class_count_tf_idf_score'].min()
lof_property_tfidf_max = final_feature_df['lof_property_count_tf_idf_score'].max()
lof_property_tfidf_min = final_feature_df['lof_property_count_tf_idf_score'].min()
final_feature_df['class_count_tf_idf_score'] = (final_feature_df['class_count_tf_idf_score'] - cos_class_tfidf_min) / (cos_class_tfidf_max - cos_class_tfidf_min)
final_feature_df['property_count_tf_idf_score'] = (final_feature_df['property_count_tf_idf_score'] - cos_property_tfidf_min) / (cos_property_tfidf_max - cos_property_tfidf_min)
final_feature_df['lof_class_count_tf_idf_score'] = (final_feature_df['lof_class_count_tf_idf_score'] - lof_class_tfidf_min) / (lof_class_tfidf_max - lof_class_tfidf_min)
final_feature_df['lof_property_count_tf_idf_score'] = (final_feature_df['lof_property_count_tf_idf_score'] - lof_property_tfidf_min) / (lof_property_tfidf_max - lof_property_tfidf_min)
# final_feature_df
# +
# final_feature_df[final_feature_df['evaluation_label'] == 1].loc[:, [
# 'kg_id', 'method', 'class_count_tf_idf_score', 'property_count_tf_idf_score', 'lof_class_count_tf_idf_score', 'lof_property_count_tf_idf_score'
# ]]
# -
final_feature_df[final_feature_df['evaluation_label'] == 1].loc[:, ['kg_id', 'method', 'class_count_tf_idf_score', 'property_count_tf_idf_score', 'lof_class_count_tf_idf_score', 'lof_property_count_tf_idf_score']]
final_feature_df.sort_values(by=['class_count_tf_idf_score'], ascending=False).head(10)
final_feature_df.sort_values(by=['lof_class_count_tf_idf_score'], ascending=False).head(10)
# +
# final_feature_df[final_feature_df['evaluation_label'] == 1].loc[:, [
# 'cos_property_count_tf_idf_score', 'lof_property_count_tf_idf_score',
# 'cos_class_count_tf_idf_score', 'lof_class_count_tf_idf_score'
# ]]
# +
# final_feature_df[final_feature_df['evaluation_label'] == 1].loc[:, [
# 'cos_class_count_tf_idf_score_tf', 'cos_class_count_tf_idf_score_idf',
# 'lof_class_count_tf_idf_score_tf', 'lof_class_count_tf_idf_score_idf'
# ]]
# +
# final_feature_df[final_feature_df['evaluation_label'] == 1].loc[:, [
# 'cos_property_count_tf_idf_score_tf', 'cos_property_count_tf_idf_score_idf',
# 'lof_property_count_tf_idf_score_tf', 'lof_property_count_tf_idf_score_idf'
# ]]
# -
# ### Evaluation of lof-graph-embedding-score
# - baseline: graph-embedding-score (centroid-of-singleton)
# use top 1/5 accuracy
def embedding_eval(eval_file):
assert "graph-embedding-score" in eval_file
assert "lof-graph-embedding-score" in eval_file
cos_top1_count = 0
cos_top5_count = 0
lof_top1_count = 0
lof_top5_count = 0
all_count = 0
for ((col, row), group) in eval_file.groupby(['column', 'row']):
all_count += 1
# sort by centroid-of-singleton embedding score
eval_labels = group.sort_values(by=['graph-embedding-score'], ascending=False)['evaluation_label']
if eval_labels.iloc[0] == 1:
cos_top1_count += 1
if 1 in eval_labels.iloc[:5].values:
cos_top5_count += 1
# sort by centroid-of-lof embedding score
eval_labels = group.sort_values(by=['lof-graph-embedding-score'], ascending=False)['evaluation_label']
if eval_labels.iloc[0] == 1:
lof_top1_count += 1
if 1 in eval_labels.iloc[:5].values:
lof_top5_count += 1
return {
'cos_top1_accuracy': cos_top1_count / all_count,
'cos_top5_accuracy': cos_top5_count / all_count,
'lof_top1_accuracy': lof_top1_count / all_count,
'lof_top5_accuracy': lof_top5_count / all_count,
'all_count': all_count
}
res_top_accuracy = {}
for fid in file_ids:
final_df = pd.read_csv(f"{HOME_DIR}/merged-lof-score/{fid}.csv")
res_top_accuracy[fid] = embedding_eval(final_df)
res_top_accuracy
top_accuracy_df = pd.DataFrame(res_top_accuracy)
top_accuracy_df = top_accuracy_df.transpose()
len(top_accuracy_df[top_accuracy_df['lof_top1_accuracy'] < top_accuracy_df['cos_top1_accuracy']]), \
len(top_accuracy_df[top_accuracy_df['lof_top5_accuracy'] < top_accuracy_df['cos_top5_accuracy']]), \
len(top_accuracy_df)
# +
# visualize embedding-score difference
def highlight_greaterthan_1(x):
if x.lof_top1_accuracy < x.cos_top1_accuracy:
return ['background-color: yellow']*5
else:
return ['background-color: white']*5
top_accuracy_df.style.apply(highlight_greaterthan_1, axis=1)
# -
| fuzzy-augmented-match-centroid-pipeline/lof-feature-generation-dev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a href="http://cocl.us/DA0101EN_NotbookLink_Top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/TopAd.png" width="750" align="center">
# </a>
# </div>
#
# <a href="https://www.bigdatauniversity.com"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/CCLog.png" width=300, align="center"></a>
#
# <h1 align=center><font size = 5>Data Analysis with Python</font></h1>
# <h1>Introduction</h1>
# <h3>Welcome!</h3>
#
# <p>
# In this section, you will learn how to approach data acquisition in various ways, and obtain necessary insights from a dataset. By the end of this lab, you will successfully load the data into Jupyter Notebook, and gain some fundamental insights via Pandas Library.
# </p>
# <h2>Table of Contents</h2>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
# <li><a href="#data_acquisition">Data Acquisition</a>
# <li><a href="#basic_insight">Basic Insight of Dataset</a></li>
# </ol>
#
# Estimated Time Needed: <strong>10 min</strong>
# </div>
# <hr>
# <h1 id="data_acquisition">Data Acquisition</h1>
# <p>
# There are various formats for a dataset, .csv, .json, .xlsx etc. The dataset can be stored in different places, on your local machine or sometimes online.<br>
# In this section, you will learn how to load a dataset into our Jupyter Notebook.<br>
# In our case, the Automobile Dataset is an online source, and it is in CSV (comma separated value) format. Let's use this dataset as an example to practice data reading.
# <ul>
# <li>data source: <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data" target="_blank">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a></li>
# <li>data type: csv</li>
# </ul>
# The Pandas Library is a useful tool that enables us to read various datasets into a data frame; our Jupyter notebook platforms have a built-in <b>Pandas Library</b> so that all we need to do is import Pandas without installing.
# </p>
# import pandas library
import pandas as pd
# <h2>Read Data</h2>
# <p>
# We use <code>pandas.read_csv()</code> function to read the csv file. In the bracket, we put the file path along with a quotation mark, so that pandas will read the file into a data frame from that address. The file path can be either an URL or your local file address.<br>
# Because the data does not include headers, we can add an argument <code>headers = None</code> inside the <code>read_csv()</code> method, so that pandas will not automatically set the first row as a header.<br>
# You can also assign the dataset to any variable you create.
# </p>
# This dataset was hosted on IBM Cloud object click <a href="https://cocl.us/cognitive_class_DA0101EN_objectstorage">HERE</a> for free storage.
# +
# Import pandas library
import pandas as pd
# Read the online file by the URL provides above, and assign it to variable "df"
other_path = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv"
df = pd.read_csv(other_path, header=None)
# -
# After reading the dataset, we can use the <code>dataframe.head(n)</code> method to check the top n rows of the dataframe; where n is an integer. Contrary to <code>dataframe.head(n)</code>, <code>dataframe.tail(n)</code> will show you the bottom n rows of the dataframe.
#
# show the first 5 rows using dataframe.head() method
print("The first 5 rows of the dataframe")
df.head(5)
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #1: </h1>
# <b>check the bottom 10 rows of data frame "df".</b>
# </div>
# Write your code below and press Shift+Enter to execute
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #1 Answer: </h1>
# <b>Run the code below for the solution!</b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# print("The last 10 rows of the dataframe\n")
# df.tail(10)
#
# -->
# <h3>Add Headers</h3>
# <p>
# Take a look at our dataset; pandas automatically set the header by an integer from 0.
# </p>
# <p>
# To better describe our data we can introduce a header, this information is available at: <a href="https://archive.ics.uci.edu/ml/datasets/Automobile" target="_blank">https://archive.ics.uci.edu/ml/datasets/Automobile</a>
# </p>
# <p>
# Thus, we have to add headers manually.
# </p>
# <p>
# Firstly, we create a list "headers" that include all column names in order.
# Then, we use <code>dataframe.columns = headers</code> to replace the headers by the list we created.
# </p>
# create headers list
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
print("headers\n", headers)
# We replace headers and recheck our data frame
df.columns = headers
df.head(10)
# we can drop missing values along the column "price" as follows
df.dropna(subset=["price"], axis=0)
# Now, we have successfully read the raw dataset and add the correct headers into the data frame.
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #2: </h1>
# <b>Find the name of the columns of the dataframe</b>
# </div>
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# print(df.columns)
#
# -->
# <h2>Save Dataset</h2>
# <p>
# Correspondingly, Pandas enables us to save the dataset to csv by using the <code>dataframe.to_csv()</code> method, you can add the file path and name along with quotation marks in the brackets.
# </p>
# <p>
# For example, if you would save the dataframe <b>df</b> as <b>automobile.csv</b> to your local machine, you may use the syntax below:
# </p>
# + active=""
# df.to_csv("automobile.csv", index=False)
# -
# We can also read and save other file formats, we can use similar functions to **`pd.read_csv()`** and **`df.to_csv()`** for other data formats, the functions are listed in the following table:
#
# <h2>Read/Save Other Data Formats</h2>
#
#
#
# | Data Formate | Read | Save |
# | ------------- |:--------------:| ----------------:|
# | csv | `pd.read_csv()` |`df.to_csv()` |
# | json | `pd.read_json()` |`df.to_json()` |
# | excel | `pd.read_excel()`|`df.to_excel()` |
# | hdf | `pd.read_hdf()` |`df.to_hdf()` |
# | sql | `pd.read_sql()` |`df.to_sql()` |
# | ... | ... | ... |
# <h1 id="basic_insight">Basic Insight of Dataset</h1>
# <p>
# After reading data into Pandas dataframe, it is time for us to explore the dataset.<br>
# There are several ways to obtain essential insights of the data to help us better understand our dataset.
# </p>
# <h2>Data Types</h2>
# <p>
# Data has a variety of types.<br>
# The main types stored in Pandas dataframes are <b>object</b>, <b>float</b>, <b>int</b>, <b>bool</b> and <b>datetime64</b>. In order to better learn about each attribute, it is always good for us to know the data type of each column. In Pandas:
# </p>
df.dtypes
# returns a Series with the data type of each column.
# check the data type of data frame "df" by .dtypes
print(df.dtypes)
# <p>
# As a result, as shown above, it is clear to see that the data type of "symboling" and "curb-weight" are <code>int64</code>, "normalized-losses" is <code>object</code>, and "wheel-base" is <code>float64</code>, etc.
# </p>
# <p>
# These data types can be changed; we will learn how to accomplish this in a later module.
# </p>
# <h2>Describe</h2>
# If we would like to get a statistical summary of each column, such as count, column mean value, column standard deviation, etc. We use the describe method:
# + active=""
# dataframe.describe()
# -
# This method will provide various summary statistics, excluding <code>NaN</code> (Not a Number) values.
df.describe()
# <p>
# This shows the statistical summary of all numeric-typed (int, float) columns.<br>
# For example, the attribute "symboling" has 205 counts, the mean value of this column is 0.83, the standard deviation is 1.25, the minimum value is -2, 25th percentile is 0, 50th percentile is 1, 75th percentile is 2, and the maximum value is 3.
# <br>
# However, what if we would also like to check all the columns including those that are of type object.
# <br><br>
#
# You can add an argument <code>include = "all"</code> inside the bracket. Let's try it again.
# </p>
# describe all the columns in "df"
df.describe(include = "all")
# <p>
# Now, it provides the statistical summary of all the columns, including object-typed attributes.<br>
# We can now see how many unique values, which is the top value and the frequency of top value in the object-typed columns.<br>
# Some values in the table above show as "NaN", this is because those numbers are not available regarding a particular column type.<br>
# </p>
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #3: </h1>
#
# <p>
# You can select the columns of a data frame by indicating the name of each column, for example, you can select the three columns as follows:
# </p>
# <p>
# <code>dataframe[[' column 1 ',column 2', 'column 3']]</code>
# </p>
# <p>
# Where "column" is the name of the column, you can apply the method ".describe()" to get the statistics of those columns as follows:
# </p>
# <p>
# <code>dataframe[[' column 1 ',column 2', 'column 3'] ].describe()</code>
# </p>
#
# Apply the method to ".describe()" to the columns 'length' and 'compression-ratio'.
# </div>
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# df[['length', 'compression-ratio']].describe()
#
# -->
#
# <h2>Info</h2>
# Another method you can use to check your dataset is:
# + active=""
# dataframe.info
# -
# It provide a concise summary of your DataFrame.
# look at the info of "df"
df.info
# <p>
# Here we are able to see the information of our dataframe, with the top 30 rows and the bottom 30 rows.
# <br><br>
# And, it also shows us the whole data frame has 205 rows and 26 columns in total.
# </p>
# <h1>Excellent! You have just completed the Introduction Notebook!</h1>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
#
# <p><a href="https://cocl.us/DA0101EN_NotbookLink_Top_bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png" width="750" align="center"></a></p>
# </div>
#
# <h3>About the Authors:</h3>
#
# This notebook was written by <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank"><NAME> PhD</a>, <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a>, <NAME>, <NAME>, <NAME>, Parizad, <NAME> and <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank"><NAME></a> and <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" ><NAME></a>.
#
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| Data Analysis with python/DA0101EN-Review-Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3.0 Data Preparation
# select and cleanse data
#
from collections import Counter
import numpy as np
import pandas as pd
import os as os
from scipy import stats
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns;sns.set()
sns.set(style="whitegrid")
# for regression we take dataset from http://bit.ly/dsdata
path='C:\\Users\\ajaohri\\Desktop\\data'
os.chdir(path)
os.listdir()
# +
# %%time
# %time
df=pd.read_csv('C:\\Users\\ajaohri\\Desktop\\ODSP\\data\\BigDiamonds.csv')
# -
print("The name of variables in the input dataset are ",df.columns)
print("The type of variables in the input dataset are ",df.dtypes)
print("The first five rows of dataset are")
df.head()
print('The input dataset df','has',df.shape[0],"rows and",df.shape[1],'columns')
df = df.drop("Unnamed: 0", axis=1)
df = df.drop("measurements", axis=1)
# ## 3.1 Verify Data Quality
# - Completeness - how complete is the data
# - Consistance - taking note of invalid values, missing values and outliers
# - Conformity - how is it in data type, size and format
# - Accuracy - how accuarate to actual conditions is it
# - Integrity - is data same across all data sources
# - Timeliness - how often data is refreshed with regard to data science purposes
# ### 3.1.1 Select Data
# We can select data using .iloc and also using conditions using query function
df.iloc[:3,:7]
df.iloc[0:3,2:7]
df.query('carat>0.3 and price <306')
# ## 3.2 Clean Data
# ### 3.2.1 Missing Values
# #### 3.2.1.1 Count of Missing Vaues
df.apply(lambda x: sum(x.isnull().values), axis = 0)
# ### 3.2.2 Missing Value Treatment
# We can do the following with missing values
# ##### 3.2.2.1 Drop missing values
# ##### 3.2.2.2 Fill missing values with test statistic
# ##### 3.2.2.3 impute missing value with a machine learning algorithm
#
# #### 3.2.2.1 dropping missing values
# +
df2=df.dropna()
# -
# #note %%timeit helps to time code to determine which part of code needs to be optimized. We also introduce a loading Bar function to check progress of long running jobs
# %timeit df2.apply(lambda x: sum(x.isnull().values), axis = 0)
import sys as sys
#to see progress of a code operation being executed you can also use the function loading bar
def loadingBar(count,total,size):
percent = float(count)/float(total)*100
sys.stdout.write("\r" + str(int(count)).rjust(3,'0')+"/"+str(int(total)).rjust(3,'0') + ' [' + '='*int(percent/10)*size + ' '*(10-int(percent/10))*size + ']')
for x in range(0,3):
df.apply(lambda x: sum(x.isnull().values), axis = 0)
loadingBar(x,2,1)
del df2
# #### 3.3.2.1.1 How to deal with missing values that are not NaN. Example Replace values of zero with NaN in the column named column_nam
#
# +
# Example- Replace values of zero with NaN in the column named column_name
#df[‘column_name’].replace(0, np.nan, inplace= True)
# -
# #### 3.2.2.2 Fill missing values with test statistic
# +
#Replace missing values with meanif needed
mean_value=df['price'].mean()
df['price']=df['price'].fillna(mean_value)
# -
df.apply(lambda x: sum(x.isnull().values), axis = 0) # For columns
#price no longer has missing values
# +
#Replace missing values with median if needed
median_value=df['x'].median()
df['x']=df['x'].fillna(median_value)
# -
df.apply(lambda x: sum(x.isnull().values), axis = 0) # For columns
# #### 3.2.2.3.1 For imputing all missing values
# +
#This will look for all columns where we have NaN value and replace the NaN value with specified test statistic.
#for mode we specify strategy='most_frequent'
from sklearn.preprocessing import Imputer
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(df[['y','z']])
df[['y','z']]= imp.transform(df[['y','z']])
# -
#assert that there are no missing values.
#When it encounters an assert statement, Python evaluates the accompanying expression, which is hopefully true.
#If the expression is false, Python raises an AssertionError exception.
assert pd.notnull(df).all().all()
df.apply(lambda x: sum(x.isnull().values), axis = 0) # For columns
# #### 3.2.2.3.2 In addition this has the following way to impute missing value using back fill or forward fill
# filling missing value with test statistic
#for back fill
df.fillna(method='bfill')
#for forward-fill
df.fillna(method='ffill')
#one can also specify an axis to propagate (1 is for rows and 0 is for columns)
df.fillna(method='bfill', axis=1).head()
# #### 3.2.2.3.3 predict missing value with a machine learning algorithm
# Example- Split data into sets with missing values and without missing values, name the missing set X_text and the one without missing values X_train and take y (variable or feature where there is missing values) off the second set, naming it y_train.
# Use one of classification methods to predict y_pred.Add it to X_test as your y_test column. Then combine sets together
# or use fancyimpute
# !pip install fancyimpute
# # RESTARTING
# ### Taking a fresh dataset for demonstrating missing value imputation, since previous dataset has all missing values replaced
df2=pd.read_csv('C:\\Users\\ajaohri\\Desktop\\ODSP\\data\\BigDiamonds.csv')
df2.columns
df2 = df2.drop("Unnamed: 0", axis=1)
df2 = df2.drop("measurements", axis=1)
df2.apply(lambda x: sum(x.isnull().values), axis = 0) # For columns
df2=df2.dropna()
colormap = plt.cm.RdBu
plt.figure(figsize=(32,10))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(df2.corr(),linewidths=0.1,vmax=1.0,
square=True, cmap=colormap, linecolor='white', annot=True)
# Note that the categorical features have been neglected in the
# correlation matrix.
# we can see price and x,y,carat are highly corelated but carat has no null values
#
df2.apply(lambda x: sum(x.isnull().values), axis = 0) # For columns
(df2.price).corr(df2.carat)
# %whos DataFrame
# + active=""
# #del categorical_data
# #del continuous_data
# del data
# del x_test
# del x_train
# #del df
# + active=""
# import fancyimpute
# dir(fancyimpute)
# -
from fancyimpute import KNN
# + active=""
# # X is the complete data matrix
# # X_incomplete has the same values as X except a subset have been replace with NaN
# df_numeric = df2.select_dtypes(include=[np.float]).as_matrix()
#
# # I now run fancyimpute KNN,
# # it returns a np.array which I store as a pandas dataframe
# # Use 3 nearest rows which have a feature to fill in each row's missing features
# df_filled = pd.DataFrame(KNN(3).fit_transform(df_numeric))
#
# #fancy impute removes column names.
# train_cols = list(continuous_data)
# # Use 5 nearest rows which have a feature to fill in each row's
# # missing features
# train = pd.DataFrame(KNN(k=5).fit_transform(continuous_data))
# train.columns = train_cols
# -
# ### 3.2.3 Outlier TREATMENT
# In statistics, an outlier is an observation point that is distant from other observations.
df=df2.copy()
del df2
df.info()
# %whos DataFrame
# ## Looking at variables as continuous (numeric) , categorical (discrete) and string variables
# - #### Note some numeric variables can be categoric. (eg 3,4,8 cylinder cars)
# - #### We use values_count.unique to find number of unique values_count for categoric and describe for numeric variables as above in continuous and categorical variales
# - #### Categorical values can be made discrete using get_dummy method where each level of categorical variable can be made 1/0 binary variable
# - #### Some categoric data can be string
# ### You can also modify the lists of variables below manually
#https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html
Image("C:\\Users\\ajaohri\\Desktop\\ODSP\\img\\dtype-hierarchy.png")
df=pd.get_dummies(df, drop_first=True,sparse=True) #Sparse = True takes care of memory error
df.head()
# ## 3.5 Manual Cleaning, Construct and Preprocessing Data
# ### Using Re to clean data below
# Here re.sub replaces the $ and , patterns with nothing ("") from each value of the list .
# In python str converts an object to string
# . In the next step, int converts the object to numeric values (integer)
# +
#MATCHING
import re
names=["Ajay","<NAME>","<NAME> ", " Jayesh"]
for name in names:
print (re.search(r'(jay)',name))
for name in names:
print (bool(re.search(r'(jay)',name)))
# -
numlist=["$10000","$20,000","30,000",40000,"50000 "]
for i,value in enumerate(numlist):
numlist[i]=re.sub(r"([$,])","",str(value))
numlist[i]=int(numlist[i])
numlist
# ## 3.6 Construct Data
# Example of creating features based on conditions
df['size'] = ['Big' if x >= 4 else 'Small' for x in df['carat']]
pd.value_counts(df['size'])
df.loc[df.price <= 400, 'cost'] = 'Cheap'
df.loc[df.price > 400, 'cost'] = 'Expensive'
pd.value_counts(df['cost'])
# +
#Using if else
# -
if len('ajayohri')>5:
size2='Big'
else:
size2='Small'
size2
# +
#using for loops
for a in range(0,10):
print(a)
# +
##using functions
def funx(x):
y=x*x+31*x+21
return(y)
funx(10)
# -
# ### 3.7 Integrate Data
# Merged Data
# + active=""
# #Merge dataset to get a final flattened dataframe from example datasets df1 and df2 with common variable
# ##result = pd.merge(df1,df2[['variable1', 'variable2', 'variable3']],on='common_column_to_merge_on')
# -
# ### 3.8 Format Data
# Reformatted data
df.columns
a=list(df.columns)
a
##### list to tuple
x=tuple(a,)
x
x=23.5
##### to int
b=int(x)
b
###### to string
c=str(b)
c
#### convert dataframe to numpy array
df.values
# +
from datetime import datetime
datetime_object = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')
datetime_object
# -
# ### 3.9 Scaling the data
# Feature scaling through standardization (or Z-score normalization) can be an important preprocessing step for many machine learning algorithms. Standardization involves rescaling the features such that they have the properties of a standard normal distribution with a mean of zero and a standard deviation of one.
#
# While many algorithms (such as SVM, K-nearest neighbors, and logistic regression) require features to be normalized, intuitively we can think of Principle Component Analysis (PCA) as being a prime example of when normalization is important. In PCA we are interested in the components that maximize the variance. If one component (e.g. human height) varies less than another (e.g. weight) because of their respective scales (meters vs. kilos), PCA might determine that the direction of maximal variance more closely corresponds with the ‘weight’ axis, if those features are not scaled. As a change in height of one meter can be considered much more important than the change in weight of one kilogram, this is clearly incorrect.
# #### 3.9.1 Log transform
# - normalization by log transformation
#
# %whos DataFrame
df.apply(lambda x: sum(x.isnull().values), axis = 0)
df=df.dropna()
df.describe()
df.dtypes
del df['size']
del df['cost']
df.values
np.log(df.iloc[:, :]).head()
df3=np.log(df.iloc[:, :])
df3.price.describe()
df.price.describe()
# #### 3.9.2 Z Score transform
# - normalization by z score
from scipy import stats
import numpy as np
z = np.abs(stats.zscore(df))
#print(z)
threshold = 3
#print(np.where(z > 3))
#
print(z[:][1])
np.where(z > 3)
z[2][1]
z[2][2]
df2_o =df[(z < 3).all(axis=1)]
df.iloc[2,1]
df2_o.iloc[2,1]
df2_o.head()
df2_o.price.describe()
df.price.describe()
df3.price.describe()
# %whos DataFrame
# #### 3.9.3Scaling
#
features=list(df.columns)
features
y='price'
features.remove(y)
features
from sklearn.preprocessing import StandardScaler
features
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
y = df.loc[:,['price']].values
# Standardizing the features
x
y
x = StandardScaler().fit_transform(x)
# # The curse of dimensionality
#
# For an estimator to be effective, you need the distance between neighboring points to be less than some value , which depends on the problem. In one dimension, this requires on average points. In the context of the above -NN example, if the data is described by just one feature with values ranging from 0 to 1 and with training observations, then new data will be no further away than . Therefore, the nearest neighbor decision rule will be efficient as soon as is small compared to the scale of between-class feature variations.
#
# If the number of features is , you now require points. Let’s say that we require 10 points in one dimension: now points are required in dimensions to pave the space. As becomes large, the number of training points required for a good estimator grows exponentially.
#
# For example, if each point is just a single number (8 bytes), then an effective -NN estimator in a paltry dimensions would require more training data than the current estimated size of the entire internet (±1000 Exabytes or so).
#
# This is called the curse of dimensionality and is a core problem that machine learning addresses
# # 3.4 DIMENSIONALITY REDUCTION -SVD
# Dimensionality reduction using truncated SVD (aka LSA). The Singular-Value Decomposition (SVD), is a matrix decomposition method for reducing a matrix to its constituent parts in order to make certain subsequent matrix calculations simpler.
#
# This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). Contrary to PCA, this estimator does not center the data before computing the singular value decomposition. This means it can work with scipy.sparse matrices efficiently.
# TruncatedSVD is very similar to PCA, but differs in that it works on sample matrices directly instead of their covariance matrices. When the columnwise (per-feature) means of are subtracted from the feature values, truncated SVD on the resulting matrix is equivalent to PCA
mean_vec = np.mean(x, axis=0)
cov_mat = (x - mean_vec).T.dot((x - mean_vec)) / (x.shape[0]-1)
print('Covariance matrix \n%s' %cov_mat)
print('NumPy covariance matrix: \n%s' %np.cov(x.T))
# np.linalg.eig Performs eigendecomposition on covariance matrix. Compute the eigenvalues and right eigenvectors of a square array.Eigendecomposition of a matrix is a type of decomposition that involves decomposing a square matrix into a set of eigenvectors and eigenvalues.
#
# A vector is an eigenvector of a matrix if it satisfies the following equation. Av = lambdav
# This is called the eigenvalue equation, where A is the parent square matrix that we are decomposing, v is the eigenvector of the matrix, and lambda is the lowercase Greek letter and represents the eigenvalue scalar
cov_mat = np.cov(x.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
# +
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort()
eig_pairs.reverse()
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in descending order:')
for i in eig_pairs:
print(i[0])
# +
from numpy import array
from numpy.linalg import pinv
A=df.values
print(A)
# calculate pseudoinverse
B = pinv(A)
print(B)
# + active=""
# U : ndarray
# Unitary matrix having left singular vectors as columns. Of shape (M, M) or (M, K), depending on full_matrices.
#
# s : ndarray
# The singular values, sorted in non-increasing order. Of shape (K,), with K = min(M, N).
#
# Vh : ndarray
# Unitary matrix having right singular vectors as rows. Of shape (N, N) or (K, N) depending on full_matrices.
# -
# Pseudoinverse via SVD
from scipy import linalg
from numpy import array
#from numpy.linalg import svd
from numpy import zeros
from numpy import diag
# calculate svd
U, s, Vh=linalg.svd(A, full_matrices=False)
U.shape, s.shape, Vh.shape
Sigma = diag(s)
# reconstruct matrix
B = U.dot(Sigma.dot(Vh))
print(B)
from numpy import array
from numpy.linalg import pinv
# define matrix
A = df.values
print(A)
# calculate pseudoinverse
B = pinv(A)
print(B)
# +
from numpy import array
from sklearn.decomposition import TruncatedSVD
# define array
A =df.values
print(A)
# svd
svd = TruncatedSVD(n_components=2)
svd.fit(A)
result = svd.transform(A)
print(result)
# -
# # 3.5 DIMENSIONALITY REDUCTION -PCA
# PCA is used to decompose a multivariate dataset in a set of successive orthogonal components that explain a maximum amount of the variance. In scikit-learn, PCA (Principal Component Analysis) is implemented as a transformer object that learns components in its fit method, and can be used on new data to project it on these components.
from sklearn import preprocessing
data_scaled = pd.DataFrame(preprocessing.scale(df),columns = df.columns)
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
principalComponents = pca.fit_transform(df)
print(principalComponents)
# PCA
pca = PCA()
pca.fit_transform(data_scaled)
### PCA components relative with features:
print(pd.DataFrame(pca.components_,columns=data_scaled.columns))
# ### Cumulative Variance Explained by Dimensions
np.cumsum(pca.explained_variance_ratio_)
# +
principalComponents = pca.fit_transform(data_scaled)
print(principalComponents)
# -
plt.scatter(principalComponents[:, 0], principalComponents[:, 1],
c=df.price, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('Blues', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
print (round(0.5*len(np.cumsum(pca.explained_variance_ratio_)),0)," features explain ",round(100*np.cumsum(pca.explained_variance_ratio_)[13],3), " % variance")
#Explained variance
pca = PCA().fit(df)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.show()
plt.semilogy(pca.explained_variance_ratio_, '--o')
# +
sns.heatmap(np.log(pca.inverse_transform(np.eye(df2_o.shape[1]))), cmap="PiYG")
# -
# # 3.6 DIMENSIONALITY REDUCTION -t-SNE
#
# t-distributed Stochastic Neighbor Embedding.
#
# t-SNE is a tool to visualize high-dimensional data. It converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results.
#
# It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of features is very high.
#
# The disadvantages to using t-SNE are roughly:
#
# t-SNE is computationally expensive, and can take several hours on million-sample datasets where PCA will finish in seconds or minutes
# The Barnes-Hut t-SNE method is limited to two or three dimensional embeddings.
# The algorithm is stochastic and multiple restarts with different seeds can yield different embeddings. However, it is perfectly legitimate to pick the embedding with the least error.
# Global structure is not explicitly preserved. This is problem is mitigated by initializing points with PCA (using init=’pca’).
# %whos DataFrame
df[features].head()
data_scaled[features].head()
df['price'].head()
data_scaled['price'].head()
data_scaled.values
# Taking a smaller dataset for TSNE since it takes a long time
df2=pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/ggplot2/diamonds.csv')
df2.info()
df2 = df2.drop("Unnamed: 0", axis=1)
df2=pd.get_dummies(df2,drop_first=True)
from sklearn import preprocessing
data_scaled = pd.DataFrame(preprocessing.scale(df2),columns = df2.columns)
# %whos
import numpy as np
from sklearn.manifold import TSNE
X = data_scaled.values
X1=TSNE(n_components=2, init='pca',n_iter=250)
X_embedded =X1.fit_transform(X)
X_embedded.shape
X_embedded
x1=X_embedded[:,0]
y1=X_embedded[:,1]
x1
y1
df2.columns
# +
n_sne = 32
rndperm = np.random.permutation(df2.shape[0])
tsne_results = X1.fit_transform(df2.loc[rndperm[:n_sne],df2.columns].values)
# -
tsne_results
df_tsne = df2.loc[rndperm[:n_sne],:].copy()
df_tsne['x-tsne'] = tsne_results[:,0]
df_tsne['y-tsne'] = tsne_results[:,1]
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn
seaborn.set(rc={'axes.facecolor':'gray', 'figure.facecolor':'white'})
fig, ax = plt.subplots()
plt.gcf().set_size_inches(15 ,12)
ax = sns.scatterplot(x='x-tsne', y='y-tsne', data=df_tsne)
# -
from ggplot import *
chart = ggplot( df_tsne, aes(x='x-tsne', y='y-tsne') ) \
+ geom_point(size=70,alpha=0.1) \
+ ggtitle("tSNE dimensions ")
chart
| Data Preparation Dimensionality Reduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="gf2TE3_LDgze"
# %pylab inline
# %config InlineBackend.figure_format = 'retina'
from ipywidgets import interact
# + [markdown] id="2nSnPyunDgzh"
# Turn in an image (e.g., screenshot) or PDF copy of any code that is part of your answer. Make sure all images and PDF pages are properly rotated. Make sure that all pages are clearly visible.
#
# Tips: Use the document scanner function on your smart phone to take better page "scans" using your camera. Make sure your screen is not shifted toward warmer colours (some devices filter blue light at night) giving it a dim and orange appearance.
# + [markdown] id="l-O2BNiVDgzi"
# # Q1
#
# ## A
# Derive a method for computing the determinant of a matrix $A\in \mathbb{R}^{n\times n}$ using Gaussian elimination with partial pivoting, which is given by $PA = LU$ for a permutation matrix $P$. **Hint: use the fact that a permutation matrix is an orthogonal matrix.**
#
# ## B
# Assume that the LU decomposition has already been computed. Show that the method for computing the determinant requires $n$ floating point operations.
# + [markdown] id="4z9wHtW8Dgzj"
# # Q2
# Let $b + \delta b$ be a perturbation of the vector $b\neq 0$ and let $x$ and $\delta x$ be such that $Ax = b$ and $A(x + \delta x) = b + \delta b$, where $A$ is a given nonsingular matrix. Show that
# $$
# \frac{\Vert \delta x \Vert }{\Vert x \Vert } \leq \kappa(A) \frac{\Vert \delta b \Vert }{\Vert b \Vert }.
# $$
#
# + [markdown] id="cWralAJxDgzj"
# # Q3
# The book states without proof that if $\Vert \delta A \Vert < \frac{1}{\Vert A^{-1}\Vert}$ then the bound on the relative error can be written as
# $$
# \frac{\Vert x - \hat{x} \Vert}{\Vert x \Vert} \leq \frac{\kappa(A)}{1 - \kappa(A)\frac{\Vert \delta A\Vert}{\Vert A\Vert}}
# \left(\frac{\Vert \delta b \Vert}{\Vert b\Vert} + \frac{\Vert\delta A \Vert}{\Vert A\Vert} \right).
# $$
#
# In what follows, let $A$, $B$, and $\delta A$ be real $n\times n$ matrices.
#
# We saw an incomplete proof in lecture that assumed $A + \delta A$ is nonsingular. In the following, you will show that if $\Vert \delta A \Vert < \frac{1}{\Vert A^{-1}\Vert}$ then $A + \delta A$ is nonsingular.
#
# ## A
# Show that if the spectral radius $\rho(A) < 1$ then the matrix $A - I$ is nonsingular. **Hint: use the definition of the spectral radius and the characteristic equation, $\det(A-\lambda I) = 0$, for the eigenvalues of $A$.**
#
# ## B
# Show that if $A$ is nonsingular and $\Vert A - B\Vert < \frac{1}{\Vert A^{-1} \Vert}$ then $B$ is nonsingular. **Hint: use $B = A[I - A^{-1}(A - B)]$ and part A**
#
# ## C
# Show that if $A$ is nonsingular and $\Vert \delta A\Vert < \frac{1}{\Vert A^{-1} \Vert}$, then $A + \delta A$ is nonsingular.
# + [markdown] id="eiEBnovRDgzk"
# # Q4
# Tridiagonal matrices appear often. Due to their simple structure, it is possible to significantly speed up the computation of the LU decomposition.
#
# ## A
# Implement Gaussian elimination for computing $A = LU$ of the form
# \begin{equation}
# \begin{bmatrix}
# a_1 & c_1 & & & & \\
# b_2 & a_2 & c_2& & & \\
# & b_3& a_3& c_3& & \\
# & &\ddots & \ddots & \ddots & \\
# & & & \ddots & \ddots& c_{n-1}\\
# & & & &b_n & a_n
# \end{bmatrix} =
# \begin{bmatrix}
# 1 & & & & & \\
# l_2 & 1& & & & \\
# & l_3 & 1& & & \\
# & & \ddots & \ddots & & \\
# & & & \ddots& \ddots& \\
# & & & & l_n & 1
# \end{bmatrix}
# \begin{bmatrix}
# u_1& c_1& & & & \\
# & u_2& c_2 & & & \\
# & & u_3& c_3 & & \\
# & & & \ddots& \ddots& \\
# & & & & \ddots& c_{n-1}\\
# & & & & & u_n
# \end{bmatrix}
# \end{equation}
# Your function should take three input arguments: the vectors `a`, `b`, and `c` containing the diagonals of the matrix $A$. It should return two vectors `l` and `u` containing the elements from the above $LU$ decomposition (ie the vectors $l$ and $u$ have entries $l_i$ and $u_i$).
#
# ## B
# Use your function from part A to compute the LU decomposition of
# $$
# A =
# \begin{bmatrix}
# 1 & -\frac{1}{2} & & & & \\
# -\frac{2}{2} & 2& -\frac{2}{2}& & & \\
# & -\frac{3}{2}& 3& -\frac{3}{2}& & \\
# & & \ddots & \ddots& \ddots& \\
# & & & \ddots& \ddots& -\frac{10-1}{2} \\
# & & & & -\frac{10}{2}& 10
# \end{bmatrix}.
# $$
# Include a print out of the elements of `l` and `u` rounded to three decimal places.
#
#
# ## C
# Determine the total number of floating point operations (i.e., the combined number of additions, subtractions, multiplications, and divisions) required for computing the LU decomposition of an $n\times n$ matrix with your method.
#
# + [markdown] id="L5DnXTtbDgzl"
#
# + id="g_nn8K5UDgzl"
# + id="ttcBAgMhDgzm"
| Homework 7 Problems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="YkxT5pP2zt7q" colab_type="text"
# # "The 4 Covid personality types"
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [Misc]
# + [markdown] id="WYl3fKjZz4_E" colab_type="text"
# This should be funnier, but honestly it's just kinda depressing.
# + id="TcS5isdMzsK4" colab_type="code" colab={}
#hide
from IPython.display import Image as IPImage
def url_image(url):
display(IPImage(url=url))
def local_image(fn):
display(IPImage(filename=fn))
# + id="uR6UZsG_zr2Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 619} outputId="1d1b1949-3f83-4154-fcb5-82e55bc621b3"
#hide_input
local_image("covid4.jpg")
| _notebooks/2020-09-18-the-4-covid-personality-types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch.nn as nn
from torch_geometric.datasets import GeometricShapes
dataset = GeometricShapes(root='data/GeometricShapes')
print(dataset)
# +
import torch
from torch_geometric.transforms import SamplePoints
torch.manual_seed(42)
nr_points=1024
dataset.transform = SamplePoints(num=nr_points)
data = dataset[39]
print(data)
# -
class GetGraph(nn.Module):
def __init__(self):
super(GetGraph, self).__init__()
def forward(self, point_cloud):
point_cloud_transpose = point_cloud.permute(0, 2, 1)
point_cloud_inner = torch.matmul(point_cloud, point_cloud_transpose)
point_cloud_inner = -2 * point_cloud_inner
point_cloud_square = torch.sum(torch.mul(point_cloud, point_cloud), dim=2, keepdim=True)
point_cloud_square_tranpose = point_cloud_square.permute(0, 2, 1)
adj_matrix = point_cloud_square + point_cloud_inner + point_cloud_square_tranpose
adj_matrix = torch.exp(-adj_matrix)
return adj_matrix
class GetLaplacian(nn.Module):
def __init__(self, normalize=True):
super(GetLaplacian, self).__init__()
self.normalize = normalize
def diag(self, mat):
# input is batch x vertices
d = []
for vec in mat:
d.append(torch.diag(vec))
return torch.stack(d)
def forward(self, adj_matrix):
if self.normalize:
D = torch.sum(adj_matrix, dim=2)
eye = torch.ones_like(D)
eye = self.diag(eye)
D = 1 / torch.sqrt(D)
D = self.diag(D)
L = eye - torch.matmul(torch.matmul(D, adj_matrix), D)
else:
D = torch.sum(adj_matrix, dim=1)
D = torch.matrix_diag(D)
L = D - adj_matrix
return L
class GetFilter(nn.Module):
def __init__(self, Fin, K, Fout):
super(GetFilter, self).__init__()
self.Fin = Fin
self.Fout = Fout
self.K = K
self.W = nn.Parameter(torch.Tensor(self.K * self.Fin, self.Fout))
nn.init.normal_(self.W, mean=0, std=0.2)
self.B = nn.Parameter(torch.Tensor(self.Fout))
nn.init.normal_(self.B, mean=0, std=0.2)
self.relu = nn.ReLU()
# def reset_parameters(self):
def forward(self, x, L):
N, M, Fin = list(x.size())
K = self.K
x0 = x.clone()
x = x0.unsqueeze(0)
# x = x.expand(-1,-1,-1,1)
def concat(x, x_):
x_ = x_.unsqueeze(0)
# x_ = x.expand(1,-1,-1)
return torch.cat((x, x_), dim=0)
if K > 1:
x1 = torch.matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * torch.matmul(L, x1) - x0
x = concat(x, x2)
x0, x1 = x1, x2
x = x.permute(1, 2, 3, 0)
x = x.reshape(N * M, Fin * K)
x = torch.matmul(x, self.W)
x = torch.add(x, self.B)
x = self.relu(x)
return x.reshape(N, M, self.Fout)
class RGCNN_Seg(nn.Module):
def __init__(self, vertice, F, K, M, regularization=0, dropout=0, batch_size=100, eval_frequency=200,
dir_name=''):
# Verify the consistency w.r.t. the number of layers.
assert len(F) == len(K)
super(RGCNN_Seg, self).__init__()
# Keep the useful Laplacians only. May be zero.
self.vertice = vertice
# Print information about NN architecture.
Ngconv = len(F)
Nfc = len(M)
print('NN architecture')
print(' input: M_0 = {}'.format(vertice))
for i in range(Ngconv):
print(' layer {0}: gconv{0}'.format(i + 1))
print(' representation: M_{0} * F_{1}= {2} * {3} = {4}'.format(
i, i + 1, vertice, F[i], vertice * F[i]))
F_last = F[i - 1] if i > 0 else 1
print(' weights: F_{0} * F_{1} * K_{1} = {2} * {3} * {4} = {5}'.format(
i, i + 1, F_last, F[i], K[i], F_last * F[i] * K[i]))
print(' biases: F_{} = {}'.format(i + 1, F[i]))
for i in range(Nfc):
name = 'fc{}'.format(i + 1)
print(' layer {}: {}'.format(Ngconv + i + 1, name))
print(' representation: M_{} = {}'.format(Ngconv + i + 1, M[i]))
M_last = M[i - 1] if i > 0 else vertice if Ngconv == 0 else vertice * F[-1]
print(' weights: M_{} * M_{} = {} * {} = {}'.format(
Ngconv + i, Ngconv + i + 1, M_last, M[i], M_last * M[i]))
print(' biases: M_{} = {}'.format(Ngconv + i + 1, M[i]))
# Operations
self.getGraph = GetGraph()
self.getLaplacian = GetLaplacian(normalize=True)
# Store attributes and bind operations.
self.F, self.K, self.M = F, K, M
self.regularization, self.dropout = regularization, dropout
self.batch_size, self.eval_frequency = batch_size, eval_frequency
self.dir_name = dir_name
self.regularizer = []
for i in range(len(F)):
if i == 0:
layer = GetFilter(Fin=6, K=K[i], Fout=F[i])
else:
layer = GetFilter(Fin=F[i - 1], K=K[i], Fout=F[i])
setattr(self, 'gcn%d' % i, layer)
def forward(self, x, cat):
L = self.getGraph(x)
L = self.getLaplacian(L)
# cat = torch.unsqueeze(cat,1)
# cat = torch.zeros(self.batch_size, self.class_size).scatter_(1, cat, 1)
# cat = torch.unsqueeze(cat,1)
# cat = cat.expand(-1,self.vertice,-1).double()
# x = torch.cat((x,cat),dim=2)
for i in range(len(self.F)):
x = getattr(self, 'gcn%d' % i)(x, L)
self.regularizer.append(x)
return x
# +
# def train():
# train_data, train_label, train_cat = genData('train')
# val_data, val_label, val_cat = genData('val')
# test_data, test_label, test_cat = genData('test')
# params = dict()
# params['dir_name'] = 'model'
# params['num_epochs'] = 50
# params['batch_size'] = 1
# params['eval_frequency'] = 30
# # Building blocks.
# params['filter'] = 'chebyshev5'
# params['brelu'] = 'b1relu'
# params['pool'] = 'apool1'
# # Number of classes.
# # C = y.max() + 1
# # assert C == np.unique(y) .size
# # Architecture.
# params['F'] = [128, 512, 1024, 512, 128, 50] # Number of graph convolutional filters.
# params['K'] = [6, 5, 3, 1, 1, 1] # Polynomial orders.
# params['M'] = [384, 16, 1] # Output dimensionality of fully connected layers.
# # Optimization.
# params['regularization'] = 1e-9
# params['dropout'] = 1
# params['learning_rate'] = 1e-3
# params['decay_rate'] = 0.95
# params['momentum'] = 0
# params['decay_steps'] = train_data.shape[0] / params['batch_size']
# model = seg_model.rgcnn(2048, **params)
# accuracy, loss, t_step = model.fit(train_data, train_cat, train_label, val_data, val_cat, val_label,
# is_continue=False)
# -
| PC-NBV/RGCNN_notebooks/Atempt_RGCNN_with_classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp utils
# -
# # utils
#hide
from nbdev.showdoc import *
#export
import dgl
from dgl import DGLGraph
import pickle as pkl
import sys
import scipy.sparse as sp
import networkx as nx
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
# +
from torch.nn.functional import binary_cross_entropy, binary_cross_entropy_with_logits
from fastprogress.fastprogress import master_bar, progress_bar
import warnings
warnings.filterwarnings('ignore')
from exp.model import GAE, GAEN
# -
#export
def load_data(dataset):
# load the data: x, tx, allx, graph
names = ['x', 'tx', 'allx', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, tx, allx, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
return features
#export
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
features = load_data('cora')
features = np.array(features.todense(), dtype=np.float32)
#export
def load_graph(dataset):
names = ['x', 'tx', 'allx', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, tx, allx, graph = tuple(objects)
return graph
g = DGLGraph(load_graph('cora'))
g.ndata['h'] = features
num_nodes = len(g.nodes())
num_edges = len(g.edges()[0]);num_edges
num_test = int(np.floor(num_edges / 10.))
num_val = int(np.floor(num_edges / 20.))
all_edge_idx = list(range(num_edges))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
all_edges = g.edges()
all_edges = torch.stack(all_edges, dim=1).numpy()
val_edges = all_edges[val_edge_idx]
test_edges = all_edges[test_edge_idx]
g.remove_edges(val_edge_idx+test_edge_idx)
#export
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:,None], tol) == 0, axis=-1)
return np.any(rows_close)
ismember([2019,973], all_edges)
#export
def make_test_val_edges(g):
num_nodes = len(g.nodes())
num_edges = len(g.edges()[0])
num_test = int(np.floor(num_edges / 10.))
num_val = int(np.floor(num_edges / 20.))
all_edge_idx = list(range(num_edges))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
all_edges = g.edges()
all_edges = torch.stack(all_edges, dim=1).numpy()
val_edges = all_edges[val_edge_idx]
test_edges = all_edges[test_edge_idx]
g.remove_edges(val_edge_idx+test_edge_idx)
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, num_nodes)
idx_j = np.random.randint(0, num_nodes)
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], all_edges) or ismember([idx_j, idx_i], all_edges):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, num_nodes)
idx_j = np.random.randint(0, num_nodes)
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], all_edges) or ismember([idx_j, idx_i], all_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
val_edges_false.append([idx_i, idx_j])
train_edges = torch.stack(g.edges(), dim=1).numpy()
assert ~ismember(test_edges_false, all_edges)
assert ~ismember(val_edges_false, all_edges)
assert ~ismember(val_edges, train_edges)
assert ~ismember(test_edges, train_edges)
assert ~ismember(val_edges, test_edges)
return val_edges, val_edges_false, test_edges, test_edges_false
test_edges_false = []
while len(test_edges_false) < len(test_edges):
# if len(test_edges_false)%200==0:
# print(len(test_edges_false))
idx_i = np.random.randint(0, num_nodes)
idx_j = np.random.randint(0, num_nodes)
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], all_edges) or ismember([idx_j, idx_i], all_edges):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
# if len(val_edges_false)%200==0:
# print(len(val_edges_false))
idx_i = np.random.randint(0, num_nodes)
idx_j = np.random.randint(0, num_nodes)
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], all_edges) or ismember([idx_j, idx_i], all_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
val_edges_false.append([idx_i, idx_j])
train_edges = torch.stack(g.edges(), dim=1).numpy()
assert ~ismember(test_edges_false, all_edges)
assert ~ismember(val_edges_false, all_edges)
assert ~ismember(val_edges, train_edges)
assert ~ismember(test_edges, train_edges)
assert ~ismember(val_edges, test_edges)
in_feats = features.shape[1];in_feats
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
g.ndata['norm'] = norm.unsqueeze(1)
adj = g.adjacency_matrix().to_dense()
pos_weight = torch.Tensor([float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()])
adj_label = adj.view(-1)
weight_mask = adj_label == 1
weight_tensor = torch.ones(weight_mask.size(0))
weight_tensor[weight_mask] = pos_weight
#export
def get_acc(adj_rec, adj_label):
labels_all = adj_label.view(-1).long()
preds_all = (adj_rec > 0.5).view(-1).long()
accuracy = (preds_all == labels_all).sum().float() / labels_all.size(0)
return accuracy.item()
#export
def get_scores(edges_pos, edges_neg, adj_rec):
preds = [adj_rec[e[0], e[1]] for e in edges_pos]
preds_neg = [adj_rec[e[0], e[1]] for e in edges_neg]
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
model = GAEN(in_feats, [32, 16], dropout=0.)
model = GAE(in_feats, [32,16], dropout=0.)
device = torch.device('cuda: 1')
model = model.to(device)
g = g.to(device)
adj = adj.to(device)
weight_tensor = weight_tensor.to(device)
loss_function=binary_cross_entropy
losses = []
model.train()
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
for e in progress_bar(range(200)):
adj_logits = model(g)
optim.zero_grad()
loss = loss_function(adj_logits.view(-1), adj.view(-1), weight=weight_tensor)
loss.backward()
optim.step()
val_roc, val_ap = get_scores(val_edges, val_edges_false, adj_logits)
losses.append(loss.item())
print((f'Epoch:{e:2d} loss: {loss.item():.5f} | acc: {get_acc(adj_logits, adj):.5f} | '
f'val_roc: {val_roc} | val_ap: {val_ap}'))
get_scores(test_edges, test_edges_false, adj_logits)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(losses)
| 01_utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spark SQL Analytics with the Iguazio Data Science Platform
#
# - [Overview](#overview)
# - [Set Up](#setup)
# - [Initiate a Spark Session and Configure Spark](#initiate-a-spark-session-and-configure-spark)
# - [Modify the Spark Configuration (Optional)](#modify-the-spark-cfg)
# - [Load Data into a Spark DataFrame](#load-data)
# - [Load Data from Amazon S3](#load-data-from-amazon-s3)
# - [Load Data from an External Table](#load-data-from-external-table)
# - [Load Data from a-Semi-Structured File](#load-data-from-semi-structured-file)
# - [Load Data from an Unstructured File](#load-data-from-unstructured-file)
# - [Overwrite the Table Schema](#overwrite-table-schema)
# - [Use Spark SQL](#spark-sql)
# - [Spark SQL on an Object](#spark-sql-on-object)
# - [Spark SQL on a Table](#spark-sql-on-table)
# - [Spark SQL on Platform NoSQL Data](#spark-sql-on-platform-nosql-data)
# - [Spark SQL Join](#spark-sql-join)
# - [Spark SQL on a Parquet File](#spark-sql-on-parquet)
# - [Spark SQL on a Partitioned Table](#spark-sql-on-partitioned-table)
# - [Perform Conditional Data Updates](#conditional-update)
# - [Cleanup](#Cleanup)
# <a id="overview"></a>
# ## Overview
#
# Spark SQL is an Apache Spark module for working with structured data.
# IT lets you query structured data inside Spark programs by using either SQL or a familiar DataFrame API.
# DataFrames and SQL provide a common way to access a variety of data sources.
#
# In this notebook, you'll learn how to use Spark SQL and DataFrames to access objects, tables, and unstructured data that persist in the data containers of the [Iguazio Data Science Platform](https://www.iguazio.com/) (**the platform**).
#
# The platform's Spark drivers implement the data-source API and support predicate push down: the queries are passed to the platform's data store, which returns only the relevant data.
# This allow accelerated and high-speed access from Spark to data stored in the platform.
#
# For more, details read the [Spark SQL and DataFrames documentation](https://spark.apache.org/docs/2.3.1/sql-programming-guide.html) and the overview in platform's [Spark APIs Reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/spark-apis/overview/).
# <a id="setup"></a>
# ## Set Up
#
# Before preparing the data, you need to define some environment variables that will be used in the following steps of this tutorial:
# +
import os
## Iguazio Data Science Platform Variables
# Directory for stocks
# %env DIR1 = examples
## AWS Credentials and Bucket Variables
# TODO: Replace the <...> placedholders with your specific data.
# %env AWS_USER = '<your_aws_user>'
# %env AWS_PASSWORD = '<<PASSWORD>>'
# %env BUCKET = '<your_s3_bucket_name_here>'
# %env ACCESSKEY = '<your_accesskey.'
# %env SECRETKEY = '<your_secretkey>'
# -
# <a id="initiate-a-spark-session-and-configure-spark"></a>
# ## Initiate a Spark Session and Configure Spark
#
# Begin by initiating a new Spark session and checking the default Spark configuration for this session:
# +
# %%time
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import concat, col
# Initiate a new Spark Session
spark = SparkSession.builder.appName("Spark Session with Default Configurations").getOrCreate()
# Retrieve and view all the default Spark configurations:
# conf = spark.sparkContext._conf.getAll()
# print(conf)
conf = spark.sparkContext._conf
print('\n\n## Initial Spark Configuration ##\n')
print('spark.app.name = ', conf.get("spark.app.name"))
print('spark.driver.cores = ', conf.get("spark.driver.cores"))
print('spark.driver.memory = ', conf.get("spark.driver.memory"))
print('spark.executor.cores = ', conf.get("spark.executor.cores"))
print('spark.executor.memory = ', conf.get("spark.executor.memory"))
print('spark.cores.max = ', conf.get("spark.cores.max"))
print('spark.python.profile = ', conf.get("spark.python.profile"))
print('spark.pyspark.python = ', conf.get("spark.pyspark.python"))
print('spark.hadoop.fs.s3a.impl = ', conf.get("spark.hadoop.fs.s3a.impl"))
# + [markdown] toc-hr-collapsed=true
# <a id="modify-the-spark-cfg"></a>
# ### Modify the Spark Configuration (Optional)
#
# You may need to modify the default Spark configuration to match your specific requirements and resources and optimize performance.
# The nature of your datasets and data models, the data-access methods that you select to use, and your hardware resources are all relevant factors in selecting your configuration.
# The [Test the SQL Performance on a Partitioned NoSQL Table with Different Spark Configurations](#test-sql-perf-on-partitioned-nosql-table-w-different-spark-cfgs) section of this tutorial demonstrates how to test Spark SQL performance on a partitioned NoSQL table in the platform with different Spark configurations.
#
# The following Spark configuration properties are especially worth noting:
# - `spark.driver.cores`
# - `spark.driver.memory`
# - `spark.executor.cores`
# - `spark.executor.memory`
# - `spark.cores.max`
# - `spark.python.profile`
# - `spark.pyspark.python`
#
# To access data in an AWS S3 bucket, perform the following configurations; replace the `<...>` placeholders with your specific data:
# - `spark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem`
# - `spark.hadoop.fs.s3a.access.key=<your access key>`
# - `spark.hadoop.fs.s3a.secret.key=<your secret key>`
# - `spark.hadoop.fs.s3a.fast.upload=true`
#
# For detailed information about configuring Spark, read the [Spark documentation](https://spark.apache.org/docs/2.3.1/configuration.html#dynamically-loading-spark-properties).<br>
# For further performance services and support, contact Iguazio's [customer-success team](https://www.iguazio.com/support/).
#
# The following code demonstrates how to modify the Spark configuration.
# +
# Modify the default Spark configurations, as needed.
# The following example uses a single m5.2xlarge application node (8 CPUs, 32 GB):
'''
conf = spark.sparkContext._conf\
.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
'''
conf = spark.sparkContext._conf\
.setAll([('spark.app.name', 'Spark SQL for Analytics'), \
# ('spark.driver.cores', '2'), \ # Only in cluster mode.
('spark.driver.memory','2g'),
('spark.executor.cores', '2'), \
('spark.executor.memory', '4g'), \
('spark.cores.max', '3'), \
('spark.python.profile', 'true'), \
('spark.pyspark.python', 'true'), \
("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")])
# Stop the current Spark Session
spark.sparkContext.stop()
# Create a Spark Session with new configurations
spark = SparkSession.builder.config(conf=conf).appName("Spark SQL Analytics - ipynb").getOrCreate()
print('\n\n## Modified Spark Configuration ##\n')
print('spark.app.name = ', conf.get("spark.app.name"))
print('spark.driver.cores = ', conf.get("spark.driver.cores"))
print('spark.driver.memory = ', conf.get("spark.driver.memory"))
print('spark.executor.cores = ', conf.get("spark.executor.cores"))
print('spark.executor.memory = ', conf.get("spark.executor.memory"))
print('spark.cores.max = ', conf.get("spark.cores.max"))
print('spark.python.profile = ', conf.get("spark.python.profile"))
print('spark.pyspark.python = ', conf.get("spark.pyspark.python"))
print('spark.hadoop.fs.s3a.impl = ', conf.get("spark.hadoop.fs.s3a.impl"))
print('\n')
# + [markdown] toc-hr-collapsed=false
# <a id="load-data"></a>
# ## Load Data into a Spark DataFrame
#
# The Spark Data Sources API supports a pluggable mechanism for integration with structured data sources. It is a unified API designed to support two major operations:
#
# 1. Loading structured data from an external data source into Spark.
# 2. Storing structured data from Spark into an external data source.
# -
# <a id="load-data-from-amazon-s3"></a>
# ### Load Data from Amazon S3
#
# Load a file from an Amazon S3 bucket into a Spark DataFrame.<br>
# The URL of the S3 file should be of the form `s3a://bucket/path/to/file`.
# +
# # !pip install botocore
# +
import botocore.session
session = botocore.session.get_session()
credentials = session.get_credentials()
# Create a Spark Session
spark = SparkSession
.builder
.config(
'spark.driver.extraClassPath',
'/spark/3rd_party/aws-java-sdk-1.11.335.jar:'
'/spark/3rd_party/hadoop-aws-2.8.4.jar')
.config('fs.s3a.access.key', credentials.access_key)
.config('fs.s3a.secret.key', credentials.secret_key)
.appName("Load from S3")
.getOrCreate()
# S3 object 's3a://bucket/path/to/file'
s3Obj = "s3a://iguazio-sample-data.s3.amazonaws.com/2018-03-26_BINS_XETR08.csv"
# Load to Spark DF
df = spark.read \
.format("csv") \
.option("header", "true") \
.option("inferSchema", "true") \
.load(s3Obj)
# Stop Spark Session
spark.stop()
# + [markdown] toc-hr-collapsed=true
# #### Copy a File from an AWS S3 Bucket to the Platform
#
# Alternatively, you can first copy the data to a platform data container.
# -
# ##### Create a Directory in a Platform Data Container
#
# Create a directory (`DIR1`) in your user home directory in the "users" platform data container (`V3IO_HOME`).
# !mkdir -p /v3io/${V3IO_HOME}/${DIR1}
# ##### Copy a CSV file from an AWS S3 Bucket to the Platform
#
# Copy a CSV file from an Amazon Simple Storage (S3) bucket to a **stocks.csv** file in a platform data container.
# !curl -L "iguazio-sample-data.s3.amazonaws.com/2018-03-26_BINS_XETR08.csv" > /v3io/${V3IO_HOME}/${DIR1}/stocks.csv
# ##### List Files in a Platform Data-Container Directory
# !ls -altr /v3io/${V3IO_HOME}/${DIR1}
# ### Define Platform File-Path Variables
file_path = os.path.join(os.getenv('V3IO_HOME_URL')+'/examples')
file = os.path.join(file_path+'/stocks.csv')
# ### Load a File from a Platform Data Container into a Spark DataFrame
#
# Read the CSV file that you saved to the platform data container into a Spark DataFrame.<br>
# The following code example uses the `inferSchema` option to automatically infer the schema of the read data (recommended).
# Alternatively, you can define the schema manually:
#
# ```python
# schema = StructType([
# StructField("<field name>", <field type>, <is Null>),
# ...])
# df = spark.read.schema(schema)
# ...
# ```
# +
# %%time
df = spark.read\
.format("csv")\
.option("header", "true")\
.option("inferSchema", "true")\
.load(file)
# -
# ### Print the Schema
df.printSchema()
# ### List Columns
df.columns
# + [markdown] toc-hr-collapsed=false
# <a id="load-data-from-external-table"></a>
# ### Load Data from an External Table
#
# In this section, let's walk through two examples:
#
# 1. Use the PyMySQL Python MySQL client library and a pandas DataFrame to load data from a MySQL database.
# 2. Use Spark JDBC to read a table from AWS Redshift.
#
#
# For more details read [read-external-db](read-external-db.ipynb) and [Spark JDBC to Databases](SparkJDBCtoDBs.ipynb)
# + [markdown] toc-hr-collapsed=true
# <a id="load-data-from-external-table-mysql"></a>
# #### Use MySQL as an External Data Source
# -
# ##### Create a MySQL Database Connection
#
# Read from a MySQL database as a bulk operation using pandas DataFrames.
#
# > **AWS Cloud Note:** If you're running the notebook code from the AWS cloud, note that AWS S3 provides **eventual consistency**.
# Therefore, it takes time for users using the persisted data and software package.
# +
import os
import pymysql
import pandas as pd
conn = pymysql.connect(
host=os.getenv('DB_HOST','mysql-rfam-public.ebi.ac.uk'),
port=int(4497),
user=os.getenv('DB_USER','rfamro'),
passwd=os.getenv('DB_PASSWORD',''),
db=os.getenv('DB_NAME','Rfam'),
charset='utf8mb4')
pdfMySQL = pd.read_sql_query("select rfam_acc,rfam_id,auto_wiki,description,author,seed_source FROM family",
conn)
pdfMySQL.tail(3)
# -
# ##### Create a Spark DataFrame from a pandas DataFrame
dfMySQL = spark.createDataFrame(pdfMySQL)
# ##### Display Table Records
#
# Display a few records of the "family" table that was read into the `dfMySQL` DataFrame in the previous steps.
dfMySQL.show(5)
# ##### Print the Table Schema
#
# Print the schema of the "family" table that was read into the `dfMySQL` DataFrame.
dfMySQL.printSchema()
# ##### Register as a Table for Spark SQL Queries
#
# Define a temporary Spark view for running Spark SQL queries on the "family" table that was read into the `dfMySQL` DataFrame.
dfMySQL.createOrReplaceTempView("family")
# ##### Count Table Records
#
# Use Spark SQL to count the number records in the "family" table.
spark.sql("SELECT COUNT(*) FROM family").show()
# ##### Check for a Unique Key
#
# Check whether the `auto_wiki` column can serve as a unique key (attribute) of the "family" table.
spark.sql("SELECT COUNT(distinct(auto_wiki)) FROM family").show()
# + [markdown] toc-hr-collapsed=true
# <a id="load-data-from-external-table-amazon-redshift"></a>
# #### Use Amazon Redshift as an External Data Source
#
# The **spark-redshift** library is a data source API for [Amazon Redshift](https://aws.amazon.com/redshift/).
#
# **Spark driver to Redshift:** The Spark driver connects to Redshift via JDBC using a username and password.
# Redshift doesn't support the use of IAM roles to authenticate this connection.
#
# **Spark to AWS S3:** S3 acts as a middleman to store bulk data when reading from or writing to Redshift.
# -
# ##### Create an Amazon S3 Bucket
#
# Create an Amazon S3 bucket named "redshift-spark".
tmpS3Dir = "s3n://redshift-spark/tmp/"
# ##### Set Up Your Redshift Environment
redshiftDBName = '<your_redshift_DB_name>'
redshiftTableName = '<your_redshift_Table_name>'
redshiftUserId = '<your_redshift_User_ID>'
redshiftPassword = '<your_redshift_Password>'
redshifturl = '<your_redshift_URL>'
jdbcURL = s"jdbc:redshift://$redshifturl/$redshiftDBName?user=$redshiftUserId&password=$<PASSWORD>"
# ##### Load a Redshift Table into a Spark DataFrame
#
# The `.format("com.databricks.spark.redshift")` line tells the Spark Data Sources API that you're using the **spark-redshift** package.<br>
# Enable **spark-redshift** to use the **tmpS3Dir** temporary location in the S3 bucket to store temporary files generated by **spark-redshift**.
dfRDSHFT = spark.read
.format("com.databricks.spark.redshift")
.option("url",jdbcURL )
.option("tempdir", tmpS3Dir)
.option("dbtable", redshiftTableName)
.load()
# ##### Check the Table
#
# Print the table schema and show a few records.<br>
# `spark-redshift` automatically reads the schema from the Redshift table and maps its types back to Spark SQL's types.
dfRDSHFT.printSchema()
dfRDSHFT.show(3)
# ##### Persist the Redshift Table Data into the Platform's NoSQL Store
dfRDSHFT = spark.write\
.format("io.iguaz.v3io.spark.sql.kv")\
.mode("append")\
.option("key", key)\
.option("sorting-key", sorting-key)\
.option("allow-overwrite-schema", "true")\
.save(os.path.join(os.getenv('V3IO_HOME'))+'/rdshfttable/')
# <a id="load-data-from-semi-structured-file"></a>
# ### Load Data from a-Semi-Structured File
# +
# Replace PATH_TO_A_JSON by the full URL of a JSON file, and remove the comment sign.
# dfJSON = spark.read.json("PATH_TO_A_JSON")
jsonFile = os.path.join(os.getenv('V3IO_HOME_URL')+'/examples/mLines.json')
dfJSON = spark.read \
.option("multiline", "true") \
.json(jsonFile )
# -
dfJSON.printSchema()
# <a id="load-data-from-unstructured-file"></a>
# ### Load Data from an Unstructured File
#
# > **Note:** Beginning with version 2.4, Spark supports loading images.
# +
# Replace PATH_TO_AN_IMAGE by the full URL of a JSON file, and remove the comment sign.
# dfImage = spark.read.format("image").option("dropInvalid", true).load("PATH_TO_AN_IMAGE")
imageFile = os.path.join(os.getenv('V3IO_HOME_URL')+'/examples/CoffeeTime.jpg')
dfImage = spark.read.format("image").option("dropInvalid", "true").load(imageFile)
dfImage.select("image.origin", "image.width", "image.height").show(truncate=false)
# -
# <a id="overwrite-table-schema"></a>
# ### Overwrite the Table Schema
#
# The following example creates a table named mytable with AttrA and AttrB attributes of type string and an AttrC attribute of type long, and then overwrites the table schema to change the type of AttrC to double:
# +
dfOWSchema = spark.createDataFrame([
("a", "z", 123),
("b", "y", 456)
], ["AttrA", "AttrB", "AttrC"])
dfOWSchema.write.format("io.iguaz.v3io.spark.sql.kv") \
.mode("overwrite") \
.option("key", "AttrA") \
.save(os.path.join(file_path)+'/mytable/')
dfOWSchema = spark.createDataFrame([
("c", "x", 32.12),
("d", "v", 45.2)
], ["AttrA", "AttrB", "AttrC"])
dfOWSchema.write.format("io.iguaz.v3io.spark.sql.kv") \
.mode("append") \
.option("key", "AttrA") \
.option("allow-overwrite-schema", "true") \
.save(os.path.join(file_path)+'/mytable/')
# + [markdown] toc-hr-collapsed=true
# <a id="spark-sql"></a>
# ## Use Spark SQL
#
# Now, some Spark SQL queries to analyze the dataset that was loaded into `df` Spark DataFrame.<br>
# The first SQL queries list a few lines of selected columns in the dataset and retrieve some statistics of numerical columns.
# + [markdown] toc-hr-collapsed=false
# <a id="spark-sql-on-object"></a>
# ### Spark SQL on an Object
# -
df.select("ISIN", "Mnemonic", "SecurityDesc", "SecurityType").show(3)
# #### Retrieve Data from the First Rows
df.select("ISIN", "Mnemonic", "SecurityDesc").head(3)
# #### Summary and Descriptive Statistics
#
# The function `describe` returns a DataFrame containing information such as the number of non-null entries (`count`), mean, standard deviation (`stddev`), and the minimum (`min`) and maximum (`max`) values for each numerical column.
df.describe("TradedVolume").show()
# + [markdown] toc-hr-collapsed=true
# <a id="spark-sql-on-table"></a>
# ### Spark SQL on a Table
# -
# #### Register a Table View for Further Analytics
df.createOrReplaceTempView("stock")
# #### Select a Few Columns and Only Print a Few Lines
q = spark.sql("SELECT ISIN, SecurityDesc, SecurityID FROM stock limit 3").show()
# #### Analyze Data to Identify Unique-Key Columns
q1 = spark.sql("SELECT COUNT(ISIN) FROM stock").show()
q2 = spark.sql("SELECT COUNT(DISTINCT(ISIN)) FROM stock").show()
q4 = spark.sql("SELECT COUNT(SecurityID) FROM stock").show()
q5 = spark.sql("SELECT COUNT(DISTINCT(SecurityID)) FROM stock").show()
# A combination of `ISIN`, `Date`, and `Time` can serve as a unqiue key:
q6 = spark.sql("SELECT COUNT(DISTINCT(ISIN, Date, Time)) FROM stock").show()
# #### Concatenate Date and Time Columns
df.select(concat(col("Date"), col("Time"))).head(2)
df.withColumn("datetime", concat(col("Date"), col("Time")))\
.select("Date", "Time", "datetime").head(3)
# #### Register Another Table with a Unique Key
df.withColumn("datetime", concat(df["Date"], df["Time"])).createOrReplaceTempView("stock_UUID")
# #### Verify that the Key is Unique
q7 = spark.sql("SELECT COUNT(DISTINCT(ISIN, datetime)) FROM stock_UUID").show()
# #### Get Distinct Values on a Column
# %%time
q8 = spark.sql("SELECT COUNT(DISTINCT(datetime)) FROM stock_UUID").show()
# Results show that **all data in this dataset is of the same date.**
# %time
q9 = spark.sql("SELECT COUNT(DISTINCT(Time)) FROM stock_UUID").show()
# + [markdown] toc-hr-collapsed=false
# <a id="spark-sql-on-platform-nosql-data"></a>
# ### Spark SQL on Platform NoSQL Data
# -
# #### Persist Data from a Spark DataFrame to a Platform NoSQL Table
#
# The following code demonstrates how to write data from a Spark DataFrame to a NoSQL table in the persistent memory in a platform data container.
#
# Note:
# - The data-source format for the platform's NoSQL data store is `io.iguaz.v3io.spark.sql.kv`.
# - The path to the NoSQL table that is associated with the DataFrame should be defined as a fully qualified path of the format `v3io://<data container>/<table path>` — where `<data container>` is the name of the table's parent data container and `<table path>` is the relatve path to the data within the specified container.
# - You must use the `key` option to define the table's primary key attribute (column). Note that the value of the primary-key attributes must be unique.<br>
# You can also ptionally use the platform's custom `sorting-key` option to define a sorting-key attribute for the table (which enablese performing range scans).<br>
# For more information, see the [platform documentation](https://www.iguazio.com/docs/concepts/latest-release/containers-collections-objects/#sharding-n-sorting-keys).
# +
# %%time
# Define thepath to your NoSQL table
kvStore = os.path.join(file_path+'/stocks_kv')
# UUID: key.sorting-key
# key: ISIN
# sorting-key: Date + Time
df.withColumn("datetime", concat(df["Date"], df["Time"]))\
.write\
.format("io.iguaz.v3io.spark.sql.kv")\
.mode("append")\
.option("key", "ISIN")\
.option("sorting-key", "datetime")\
.option("allow-overwrite-schema", "true")\
.save(kvStore)
# -
# <a id="read-data-from-nosql-table-to-spark-df"></a>
# #### Load Data a NoSQL Table into a Spark DataFrame
df2 = spark.read.format("io.iguaz.v3io.spark.sql.kv").load(kvStore)
# %%time
df2.select("ISIN", "datetime").head(1)
df2.createOrReplaceTempView("stock_kv")
# +
# %%time
q10 = spark.sql("SELECT ISIN, SUM(TradedVolume) FROM stock_kv GROUP BY ISIN").show(5)
# -
# <a id="write-data-to-partitioned-nosql-table"></a>
# #### Persist Data to a Partitioned NoSQL Table
#
# Partions are firstly by `Date`, and then by `Time`. <br>
# +
# %%time
# Set Partitioned KV store name
kvStorePartition = os.path.join(file_path+'/stocks_kv_partition')
# UUID = key.sorting-key
# key: ISIN
# partition : Date, time
df.write\
.format("io.iguaz.v3io.spark.sql.kv")\
.mode("append")\
.option("key", "ISIN")\
.option("partition", "Date, Time")\
.save(kvStorePartition)
# -
# <a id="read-data-from-another-nosql-table-to-spark-df"></a>
# #### Load Data from Another NoSQL Table into a Spark DataFrame
df3 = spark.read.format("io.iguaz.v3io.spark.sql.kv").load(kvStorePartition)
# +
# %%time
df3.select("ISIN", "Date", "Time").show(3)
# + [markdown] toc-hr-collapsed=true
# <a id="test-sql-perf-on-partitioned-nosql-table-w-different-spark-cfgs"></a>
# #### Test the SQL Performance on a Partitioned NoSQL Table with Different Spark Configurarions
#
# Start out by running the following code to test the performance when using Spark SQL to access a partitioned NoSQL table in the platform with the default [Spark configuration](#initiate-a-spark-session-and-configure-spark).
#
# > **Note:** The default Spark configuration doesn't provide good support for the partition data model of the test table, and therefore the test query is expected to hang.
# -
df3.createOrReplaceTempView("stock_kv_partintion")
# %%time
# 1*m5.2xlarge: Spark 4 executors, 2 cores, and 1 GB per executor; never returns results
# 1*m5.2xlarge: Spark 2 executors, 2 cores, and 4 GB per executor; never returns results
# %debug
q11 = spark.sql("SELECT ISIN, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY ISIN").show(5)
q11 = spark.sql("SELECT ISIN, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY ISIN").explain
# <a id="test-sql-perf-on-partitioned-nosql-table-spark-cfg-experiments"></a>
# ##### Experiment with Different Spark Configurations
#
# The following experiments run the same query with different Spark configurations.
# To perform the experiments, you need to modify your Spark configuration according to the information in the comments of each job.
# For information on how to modify the Spark configuration, see the [Modify the Spark Configuration](#modify-the-spark-cfg) section of this tutorial.
# +
# %%time
# 1*m5.2xlarge: Spark 4 executor, 2core and 1G per executor
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# +
# %%time
# 1*m5.2xlarge: Spark 2 executor, 2core and 4G per executor,
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# +
# %%time
# 1*m5.2xlarge: Spark 4 executor, 1core and 4G per executor,
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# +
# %%time
# 1*m5.2xlarge: Spark 3 executor, 1core and 8G per executor,
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# +
# %%time
# 1*m5.2xlarge: Spark 6 executor, 1core and 4G per executor,
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# +
# %%time
# 1*m5.2xlarge: Spark 3 executor, 1core and 8G per executor,
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# +
# %%time
# 1*m5.2xlarge: Spark 1 executor, 1core and 20G per executor,
q12 = spark.sql("SELECT Date, SUM(TradedVolume) FROM stock_kv_partintion GROUP BY Date").show()
# -
# <a id="spark-sql-join"></a>
# ### Spark SQL Join
dfL = spark.createDataFrame([("2504271", "LU0378438732")], ["SecurityID", "ISIN"])
dfR = spark.createDataFrame([("2504271", "JOIN in Spark SQL")], ["SecurityID", "SQL Query"])
dfJoin = dfL.join(dfR, dfL.SecurityID == dfR.SecurityID).show()
dfL.createOrReplaceTempView("t1")
dfR.createOrReplaceTempView("t2")
qJoin = spark.sql("SELECT * FROM t1, t2 where t1.SecurityID=t2.SecurityID").show()
# <a id="spark-sql-on-parquet"></a>
# ### Spark SQL on a Parquet File
# #### Persist Data into Iguazio Data Container in Parquet format
#
# Use the same stock dataset to store in Parquet format.
# +
# %%time
parqFile = os.path.join(file_path+'/stocks_parq')
df.write\
.mode("overwrite")\
.parquet(parqFile)
# -
dfPARQ = spark.read.parquet(parqFile)
dfPARQ.select("ISIN", "Date").head()
# + [markdown] toc-hr-collapsed=true
# <a id="spark-sql-on-partitioned-table"></a>
# ### Spark SQL on a Partitioned Table
# -
# #### Create a Partitioned Table
# This examples creates a partitioned "weather" table. The `option("partition", "year, month, day")` write option partitions the table by the year, month, and day item attributes. As demonstrated in the following image, if you browse the container in the dashboard after running the example, you'll see that the weather directory has **year=<value>/month=<value>/day=<value>** partition directories that match the written items. If you select any of the nested day partition directories, you can see the written items and their attributes. For example, the first item (with attribute values 2016, 3, 25, 6, 16, 0.00, 55) is saved to a 20163256 file in a **weather/year=2016/month=3/day=25** partition directory.
# +
table_path = os.path.join(os.getenv('V3IO_HOME_URL')+'/examples/weather/')
df = spark.createDataFrame([
(2016, 3, 25, 17, 18, 0.2, 62),
(2016, 7, 24, 7, 19, 0.0, 52),
(2016, 12, 24, 9, 10, 0.1, 47),
(2017, 5, 7, 14, 21, 0.0, 70),
(2017, 11, 1, 10, 15, 0.0, 34),
(2017, 12, 12, 16, 12, 0.0, 47),
(2017, 12, 24, 17, 11, 1.0, 50),
(2018, 1, 18, 17, 10, 2.0, 45),
(2018, 5, 20, 21, 20, 0.0, 59),
(2018, 11, 1, 11, 11, 0.1, 65)
], ["year", "month", "day", "hour", "degrees_cel", "rain_ml", "humidity_per"])
df_with_key = df.withColumn(
"time", concat(df["year"], df["month"], df["day"], df["hour"]))
df_with_key.write.format("io.iguaz.v3io.spark.sql.kv") \
.mode("overwrite") \
.option("key", "time") \
.option("partition", "year, month, day, hour") \
.save(table_path)
# -
# #### Reading from partition table
#
# Following is the output of the example's show commands for each read. The filtered results are gathered by scanning only the partition directories that match the filter criteria.
# ##### Perform A Full Table Scan
readDF = spark.read.format("io.iguaz.v3io.spark.sql.kv").load(table_path)
readDF.show()
# ##### Retrieve all data in the last six months of each year:
#
# Filter: month > 6
readDF = spark.read.format("io.iguaz.v3io.spark.sql.kv").load(table_path) \
.filter("month > 6").show()
# ##### Retrieve all hours in Dec 24 of each year:
#
# Filter: month == 12 AND day == 24
readDF = spark.read.format("io.iguaz.v3io.spark.sql.kv").load(table_path) \
.filter("month == 12 AND day == 24") \
.show()
# ##### Retrieve data during 08:00–20:00 each day in the last six months of each year
readDF = spark.read.format("io.iguaz.v3io.spark.sql.kv").load(table_path) \
.filter("month < 7 AND hour >= 8 AND hour <= 20") \
.show()
# + [markdown] toc-hr-collapsed=true
# <a id="conditional-update"></a>
# ## Perform Conditional Data Updates
#
# This example demonstrates how to conditionally update NoSQL table items by using a conditional write option.
# Each `write` command in the example is followed by matching `read` and `show` commands to read and display the value of the updated item in the target table after the write operation.
# -
# <a id="conditional-update-generate-data"></a>
# ### Generate Data
#
# The first write command writes an item (row) to a "cars" table. The item's `reg_license` primary-key (identity-column) attribute is set to 7843321, the mode attribute is set to "Honda", and the odometer attribute is set to `29321`. The `overwrite` save mode is used to overwrite the table if it already exists and create it otherwise. Reading the item from the table produces this output:
# +
writeDF = spark.createDataFrame([("7843321", "Honda", 29321)],
["reg_license", "model", "odometer"])
writeDF.write.format("io.iguaz.v3io.spark.sql.kv") \
.option("key", "reg_license") \
.mode("overwrite") \
.save(os.path.join(os.getenv('V3IO_HOME_URL'))+'/cars/')
readDF = spark.read.format("io.iguaz.v3io.spark.sql.kv") \
.load(os.path.join(os.getenv('V3IO_HOME_URL'))+'/cars/') \
.show()
# -
# <a id="conditional-update-perform-update"></a>
# ### Conditionally Update the Data
#
# Update the odometer to `31718` on the condition that the new odometer value is greater than the old value.
# This ensures that the `odometer` attribute (column) reflects the most updated value of the odometer.
# +
writeDF = spark.createDataFrame([("7843321", "Honda", 31718)],
["reg_license", "model", "odometer"])
writeDF.write.format("io.iguaz.v3io.spark.sql.kv") \
.option("key", "reg_license") \
.option("condition", "${odometer} > odometer") \
.mode("append") \
.save(os.path.join(os.getenv('V3IO_HOME_URL'))+'/cars/')
readDF = spark.read.format("io.iguaz.v3io.spark.sql.kv") \
.load(os.path.join(os.getenv('V3IO_HOME_URL'))+'/cars/') \
.show()
# -
# <font color=green> **Congratulations!**</font> You've completed the Spark SQL Analytics with the Iguazio Data Science Platform tutorial.
# + [markdown] toc-hr-collapsed=true
# ## Cleanup
#
# Prior to exiting, let's do housekeeping to release disk space, computation and memory resources taken by this session.
# -
# ### Remove Data
#
# When you are done, uncomment the remove command in the following code to remove the example directory:
# +
# Uncomment the following line to remove the examples directory:
# # rm -rf /v3io/${V3IO_HOME}/examples/*
# + [markdown] toc-hr-collapsed=false
# <a id="stop-spark-session"></a>
# ### Stop the Spark Session
#
# Run the following command to release the computation and memory resources that are being consumed by your Spark session:
# -
spark.stop()
| getting-started/spark-sql-analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # Compute LCMV beamformer on evoked data
#
#
# Compute LCMV beamformer solutions on evoked dataset for three different choices
# of source orientation and stores the solutions in stc files for visualisation.
#
#
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
# -
# Get epochs
#
#
# +
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True, proj=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
# Plot last stc in the brain in 3D with PySurfer if available
brain = stc.plot(hemi='lh', subjects_dir=subjects_dir)
brain.set_data_time_index(180)
brain.show_view('lateral')
| 0.12/_downloads/plot_lcmv_beamformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="aNgmujIaGLP6"
# # Introducción
#
# En este listado de ejercicios propuestos podrás hacerte una idea del tipo de consultas que se pueden pedir en la prueba.
#
# Recuerda consultar la referencia de los Models de Odoo para poder resolverlos. Una vez tengas el modo Desarrollador activado en los Ajustes, la referencia se encuentra en el menú **Técnico** → **Modelos** (dentro del apartado **Estructura de la base de datos**).
#
# Asegúrate de contar con los datos de prueba en tu sistema.
#
# Los resultados que aparecen debajo de los bloques de código son orientativos, para que puedas hacerte una idea de lo que debes obtener. Pueden variar dependiendo de la información que tengas en tu base de datos.
# + [markdown] id="Bed3rOhSH4if"
# # Conexión con la API
#
# Recuerda que para poder realizar la conexión, necesitamos los siguientes parámetros de nuestra instancia de Odoo:
#
#
# * **URL**: Por ejemplo, `https://edu-test.odoo.com`.
# * **Nombre de la BD**: Por ejemplo, `edu-test`.
# * **Nombre de usuario**: El correo electrónico con el que iniciamos sesión en nuestra instancia. Por ejemplo, `<EMAIL>.es`.
# * **Contraseña del usuario**: Por ejemplo, `password`.
#
# **Importante**: Cuando damos de alta una instancia dominio.odoo.com, la contraseña que tenemos es de la cuenta en odoo.com, pero no se genera automáticamente una contraseña para el usuario administrador. Por lo tanto, tenemos que generar una (aconsejo que sea la misma, para evitar confusiones posteriores). Para ello, seguimos los siguientes pasos:
#
#
# 1. Iniciamos sesión en nuestra instancia de Odoo.
# 2. Pulsamos en la aplicación `Ajustes`.
# 3. En el apartado `OPCIONES GENERALES` → `Usuarios`, pulsamos en `Administrar usuarios`.
# 4. Hacemos clic en nuestro usuario.
# 5. Pulsamos en `Acción` → `Cambiar la contraseña`.
# 6. Establecemos la contraseña que queramos.
#
# Una vez lo hemos hecho, ya podemos almacenar en variables los datos necesarios para la conexión.
#
# Modifica el siguiente bloque de código con los datos de tu instancia y ejecutálo.
#
#
#
# + id="QJ9Rm6zLLFLR"
url = 'https://edu-test.odoo.com'
db = 'edu-test'
username = '<EMAIL>'
password = 'password'
# + [markdown] id="yPWtV3s38zy1"
# Ejecuta ahora el siguiente para poder comenzar a realizar las peticiones con el uid:
# + id="1msuOARpMaR1"
import xmlrpc.client
common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))
uid = common.authenticate(db, username, password, {})
models = xmlrpc.client.ServerProxy('{}/xmlrpc/2/object'.format(url))
# + [markdown] id="s1GxAnKk7C29"
# # 1. Cuenta las ventas mayores de 1000
#
# Muestra cuántas ventas se han realizado con una cantidad total mayor que 1000.
# + colab={"base_uri": "https://localhost:8080/"} id="5Et0HPcl67oR" outputId="fd4e45bd-b003-451e-cda4-9f9a585f7f1a"
models.execute_kw(db, uid, password,
'sale.order', 'search_count',
[[['amount_total', '>', '1000']]])
# + [markdown] id="gJ7JTs56MSvn"
# # 2. Dos ventas mayores de 1000
#
# Muestra los identificadores que ocupan el lugar 3º y 4º de las ventas con una cantidad total mayor que 1000 e indica cuáles han sido esas cantidades.
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="kp9YGMaPMhn5" outputId="d88c03a5-6656-4f35-b3ce-dc5888ca7abf"
models.execute_kw(db, uid, password,
'sale.order', 'search_read',
[[['amount_total', '>', '1000']]],
{'fields': ['amount_total'], 'offset': 2, 'limit': 2})
# + [markdown] id="k9fQ12P_8Ako"
# Calcula la suma de ambas cantidades:
# + colab={"base_uri": "https://localhost:8080/"} id="rMx0rPn87zgN" outputId="c4e2e8d0-9498-4741-ece5-b094573a8403"
ventas = models.execute_kw(db, uid, password,
'sale.order', 'search_read',
[[['amount_total', '>', '1000']]],
{'fields': ['amount_total'], 'offset': 2, 'limit': 2})
print(ventas[0]['amount_total']+ventas[1]['amount_total'])
# + [markdown] id="9Rlv1fWlH6s3"
# # 3. Escasez de productos
#
# Muestra el nombre de los productos de los que haya un stock menor de 15 y mayor o igual que 1.
# + colab={"base_uri": "https://localhost:8080/"} id="d98UNt6uMmgn" outputId="24b47326-ea8a-4000-e37c-6bd2a4864d07"
productos = models.execute_kw(db, uid, password,
'product.product', 'search_read',
[[['qty_available', '<', 15],['qty_available', '>=', 1]]],
{'fields': ['name']})
for producto in productos:
print(producto['name'])
# + [markdown] id="IW_Ed_G3IvO-"
# # 4. Contactos estadounidenses
#
# Muestra los correos electrónicos de los contactos estadounidenses.
# + colab={"base_uri": "https://localhost:8080/"} id="uG2omK1aMqFt" outputId="5c3f09ea-2754-431c-ac69-0ca42076f288"
contactos = models.execute_kw(db, uid, password,
'res.partner', 'search_read',
[[['country_id', '=', 233]]],
{'fields': ['email']})
for contacto in contactos:
print(contacto['email'])
# + [markdown] id="rXQ2SMi2JLwO"
# # 5. Llama a los despistados
#
# Muestra los números de teléfono de aquellos contactos que tienen algún mensaje sin leer.
# + colab={"base_uri": "https://localhost:8080/"} id="X392ajeSMsgP" outputId="9286654d-8252-41de-d2b2-7aa936707210"
contactos = models.execute_kw(db, uid, password,
'res.partner', 'search_read',
[[['message_unread', '=', True]]],
{'fields': ['phone','message_unread']})
for contacto in contactos:
# Por alguna razón desconocida el filtro de search no funciona, pero puede hacerse con un condicional fácilmente
if contacto['message_unread']:
print(contacto['phone'])
# + [markdown] id="FjJfKUrQZ_S2"
# # 6. Brecha de seguridad
#
# Ha habido una fuga de datos y parece que algunas cuentas de los usuarios han sido comprometidas. Muestra el nombre, el correo electrónico y el teléfono de aquellos que no tengan configurado el doble factor de autenticación para avisarles inmediatamente.
# + colab={"base_uri": "https://localhost:8080/"} id="t_0oVbi6Z_cr" outputId="f9350a34-45ee-4f90-c1ed-997cf7760338"
models.execute_kw(db, uid, password,
'res.users', 'search_read',
[[['totp_enabled', '=', False]]],
{'fields': ['name','email','phone']})
# + [markdown] id="w4F9zYNZeUYi"
# # 7. Agregando productos
#
# La empresa va a comenzar a vender consolas. Crea los productos con sus respectivos precios usando la API y almacena los identificadores en una lista llamada `ids`. Los productos son:
# - Nombre: Playstation 5. Precio 499€
# - Nombre: Nintendo Switch. Precio 299€
# - Nombre: Xbox Series S. Precio 299€
# - Nombre: Xbox Series X. Precio 499€
# - Nombre: Playstation 4. Precio 299€
# + colab={"base_uri": "https://localhost:8080/"} id="77MA07q7eUku" outputId="a2dd4e19-f12a-45cd-c5a6-11edffa7d184"
products = [['Playstation 5',499],['Nintendo Switch',299],['Xbox Series S',299],['Xbox Series X',499],['Playstation 4',299]]
ids = []
for name,price in products:
id = models.execute_kw(db, uid, password, 'product.product', 'create', [{
'name': name, 'price': price,
}])
ids.append(id)
print(ids)
# + [markdown] id="ewxY-TzaL-K0"
# # 8. Actualizando productos
#
# Actualiza los productos añadidos en el ejercicio anterior, aumentando su precio en 100, ya que comienza a haber escasez de los mismos.
# + colab={"base_uri": "https://localhost:8080/"} id="_8LLbK9OM_tr" outputId="f91647dc-313f-4a93-bd82-bcc77651e6e1"
models.execute_kw(db, uid, password, 'product.product', 'write', [[ids[0],ids[3]], {
'price': 599
}])
models.execute_kw(db, uid, password, 'product.product', 'write', [[ids[1],ids[2],ids[4]], {
'price': 499
}])
# + [markdown] id="1jqbuiTEMj6J"
# # 9. Elimina los productos
#
# A un empleado de la compañía se le ha olvidado declarar que ibáis a comenzar a vender consolas. Mientras se soluciona el problema legal, es mejor eliminar esos productos del inventario.
# + colab={"base_uri": "https://localhost:8080/"} id="E_ISedrANDr_" outputId="c578890f-196c-4a1e-ae31-93a036565169"
models.execute_kw(db, uid, password, 'product.product', 'unlink', [ids])
# + [markdown] id="fRqrWmkonhC7"
# # 10. Mi primer script: Un buscador de productos por precio
#
# Elabora un script en Visual Studio Code que realice lo siguiente:
# 1. Pregunte al usuario por el precio que desea buscar.
# 2. Pregunte al usuario si desea mostrar los productos que tengan un precio igual, superior o menor a ese precio de referencia.
# 3. Muestre al usuario los nombres de los productos que cumplen esa condición.
#
# A continuación, copia y pega ese script en el siguiente bloque de código:
# + id="5-WPyZGNnhRc"
url = 'https://edu-test.odoo.com'
db = 'edu-test'
username = '<EMAIL>'
password = 'password'
import xmlrpc.client
common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))
uid = common.authenticate(db, username, password, {})
models = xmlrpc.client.ServerProxy('{}/xmlrpc/2/object'.format(url))
precio = float(input("¿Cuál es el precio que desea buscar?: "))
comparacion = input("Indica si desea localizar los productos que tengan un precio mayor, menor o igual: ")
if comparacion == 'mayor':
productos = models.execute_kw(db, uid, password,
'product.product', 'search_read',
[[['list_price', '>', precio]]],
{'fields': ['name']})
if len(productos)>0:
print("Los productos con un precio mayor que "+str(precio)+" son:")
for producto in productos:
print(producto['name'])
else:
print("No hay ningún producto con un precio mayor que ese")
elif comparacion == 'menor':
productos = models.execute_kw(db, uid, password,
'product.product', 'search_read',
[[['list_price', '<', precio]]],
{'fields': ['name']})
if len(productos)>0:
print("Los productos con un precio menor que "+str(precio)+" son:")
for producto in productos:
print(producto['name'])
else:
print("No hay ningún producto con un precio menor que ese")
else:
productos = models.execute_kw(db, uid, password,
'product.product', 'search_read',
[[['list_price', '=', precio]]],
{'fields': ['name']})
if len(productos)>0:
print("Los productos con el precio "+str(precio)+" son:")
for producto in productos:
print(producto['name'])
else:
print("No hay ningún producto con ese precio")
# + [markdown] id="OrSfCmp9sYSM"
# # 11. Mi segundo script: El límite es tu imaginación
#
# Elabora otro script usando la API de Odoo que realice lo que quieras. Cuando más complejo sea el reto que te propongas, más aprenderás.
#
# Escribe a continuación la descripción de tu script y copia/pega el código en el bloque contiguo:
#
#
# + id="hB4P7J6CsYaa"
| api-odoo/Ejercicios_API_externa_de_Odoo_resueltos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ezGsF9F5lXfg"
# # Lista de Exercício 03:
# + [markdown] id="NkiEc2WRlTk_"
# ## Q01: Faça um programa que peça uma nota, entre zero e dez. Mostre uma mensagem caso o valor seja inválido e continue pedindo até que o usuário informe um valor válido.
# + colab={"base_uri": "https://localhost:8080/"} id="B66-Af7plgEU" outputId="21c90fb5-2e6f-4935-a76b-ea6af26b2edd"
while True:
try:
nota = int(input('Digite uma nota de 0 a 10: '))
if (nota >= 0) & (nota <= 10):
print("Ok, validado.")
break
else:
print('Valor não valido')
except:
print('Digite um número valido')
# + [markdown] id="x59FZ9Cml24p"
# ## Q02 - Faça um programa que leia um nome de usuário e a sua senha e não aceite a senha igual ao nome do usuário, mostrando uma mensagem de erro e voltando a pedir as informações.
# + colab={"base_uri": "https://localhost:8080/"} id="LQfu2NJRmHB8" outputId="8b703719-5cbc-41b7-ec3d-31bf7fee7db2"
while True:
user = input('Digite seu usario: ')
senha = input('Digite sua senha: ')
if user == senha:
print('Usuario e senhas iguais, digite novamente.')
else:
print('Validado')
break
# + [markdown] id="iYt94yQMmot-"
# ## Q03 - Supondo que a população de um país A seja da ordem de 80000 habitantes com uma taxa anual de crescimento de 3% e que a população de B seja 200000 habitantes com uma taxa de crescimento de 1.5%. Faça um programa que calcule e escreva o número de anos necessários para que a população do país A ultrapasse ou iguale a população do país B, mantidas as taxas de crescimento
#
# + colab={"base_uri": "https://localhost:8080/"} id="YtzkPBAPmw8U" outputId="e965c4de-f273-42ad-ec3c-9331d08ebafd"
pop_A = 8000
pop_B = 200000
ano = 0
while True:
ano += 1
pop_A = pop_A*1.03
pop_B = pop_B*1.015
if pop_A > pop_B:
break
print('Em {} anos a Cidade A terá mais habitantes que a cidade B'.format(ano))
# + [markdown] id="CEHGGEUEnM_E"
# ## Q04 - A seqüência de Fibonacci é a seguinte: 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, ... Sua regra de formação é simples: os dois primeiros elementos são 1; a partir de então, cada elemento é a soma dos dois anteriores. Faça um algoritmo que leia um número inteiro calcule o seu número de Fibonacci. F1 = 1, F2 = 1, F3 = 2, etc.
#
# + colab={"base_uri": "https://localhost:8080/"} id="ae_kEohSnelz" outputId="4db5573d-94b0-4a2e-d3d2-78c62ad28156"
num = int(input('Digite o número: '))
fib = []
for i in range(num):
if (i==0) | (i==1):
fib.append(1)
print(fib)
else:
fib.append(fib[-2]+fib[-1])
print(fib)
print(fib[-1])
# + [markdown] id="_VLUPOehnvFZ"
# ## Q05 - Dados dois números inteiros positivos, determinar o máximo divisor comum entre eles usando o algoritmo de Euclides.
# + colab={"base_uri": "https://localhost:8080/"} id="obmVPv30oHh9" outputId="6ae89dec-62f8-4dad-e79e-2e01c24a6868"
num1 = int(input('Digite o primeiro número: '))
num2 = int(input('Digite o segundo número: '))
dividendo = num1
divisor = num2
while True:
resto = dividendo % divisor
if resto == 0:
break
else:
dividendo = divisor
divisor = resto
print("O Mdc de {} com {} é {}".format(num1, num2, divisor))
| 01 - Python para Zumbis/PPZ_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ranjith-arch/LetsupgradeDataScience/blob/main/Day2_Data_Science.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="xi3Cb38iWI7l"
# # Question1
# + id="29yLoSorWQUY" outputId="9407bccf-b963-40ec-a1c9-53788470b8f9" colab={"base_uri": "https://localhost:8080/"}
l=[]
for i in range(10):
a=int(input())
if(a%2==0):
l.append(a)
print(l)
# + [markdown] id="Hft9sBGfWnJW"
# #Question2
# + id="YkuFEjhIWqEW" outputId="ff13870f-35d2-4998-eca0-3100a609e087" colab={"base_uri": "https://localhost:8080/"}
print("------------------------")
l=[i for i in range(1,11)]#printing the list of first 10 natural numbers
print(l)
print("------------------------")
l=[i%2 for i in range(10)]#printing 0 for even 1 for odd
print(l)
print("------------------------")
l=[i**2 for i in range(10)]#printing squares of numbers
print(l)
print("------------------------")
l=[i for i in range(10) if i%2==0]#printing even numbers
print(l)
print("------------------------")
l=[i for i in range(10) if i%2!=0]#printing odd numbers
print(l)
print("------------------------")
# + [markdown] id="_JcN8e43WxQB"
# #Question3
# + id="Z14fVVaJWzuZ" outputId="00f93643-2067-45b7-838e-2de433ba8880" colab={"base_uri": "https://localhost:8080/"}
d={}
n =int(input())
for i in range(1,n+1):
d[i]=i**2
print(d)
# + [markdown] id="AmJOBoC7W6Ut"
# #Question4
# + id="WyNmXOJdWLZ3" outputId="a219ad55-6ab2-4b4e-ca9c-37f437760a02" colab={"base_uri": "https://localhost:8080/"}
x=0
y=0
dis=0
n=int(input())
for i in range(n):
l=input().upper().split()
if l[0]=='RIGHT':
x+=int(l[1])
elif l[0]=='LEFT':
x-=int(l[1])
elif l[0]=='UP':
y+=int(l[1])
elif l[0]=='DOWN':
y-=int(l[1])
dis=dis+(((x**2)+(y**2))**0.5)
print(round(dis))
| Day2_Data_Science.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-python-tutorial]
# language: python
# name: conda-env-miniconda3-python-tutorial-py
# ---
import pandas as pd
import datetime
import numpy as np
import argparse
import math
from pysplit.trajectory_generator import generate_bulktraj
# +
parser = argparse.ArgumentParser(description='Calculating hysplit trajs.')
parser.add_argument("--climate", choices=["hist", "pgw"], required=True, type=str, help="This is the hist or pgw choice.")
parser.add_argument("--month", required=True, type=int, help="Storm month for trajectory calculation.")
parser.add_argument("--ens", required=True, type=int, help="Ensemble number.")
args=parser.parse_args()
# -
which_climate='pgw'
which_month=5
ens_number=1
# +
work_help1=np.hstack([np.array([0 for i in range(36)]),
np.array([1 for i in range(36)]),
np.array([2 for i in range(36)]),
np.array([3 for i in range(36)]),
np.array([4 for i in range(36)]),
np.array([5 for i in range(36)]),
np.array([6 for i in range(36)]),
np.array([7 for i in range(36)]),
np.array([8 for i in range(36)]),
np.array([9 for i in range(36)])
])
work_help2=np.hstack([np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)])
])
# -
which_working=f"work{work_help2[36]}_{work_help1[36]}"
print(which_working)
print(which_climate)
print(which_month)
print(ens_number)
# +
#which_climate='hist'
# +
csv_file=pd.read_csv(f'/glade/work/bpoujol/Moisture_tracking/trajectory_information_{which_climate}.csv')
dates=[]
for datetime_string in csv_file['TIME (UTC)'].values:
dates.append(datetime.datetime.strptime(datetime_string, '%Y-%m-%d_%H:%M:%S'))
ready_dates=pd.to_datetime(np.array(dates))
csv_file['YEAR']=ready_dates.year
csv_file['MONTH']=ready_dates.month
csv_file['DAY']=ready_dates.day
csv_file['HOUR']=ready_dates.hour
csv_file=csv_file[csv_file['MONTH']==which_month]
# -
csv_file
def ens_create(ens_num, lat, lon):
"""Extract the ensemble member's lat and lon coordinates.
"""
ens_help=np.hstack([np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)])
])
ens_num=ens_help[ens_num]
if ens_num==0:
return compute_displacement(lat, lon)
if ens_num==1:
return compute_displacement(lat, lon, dist=1, bear=90)
if ens_num==2:
return compute_displacement(lat, lon, dist=2, bear=90)
if ens_num==3:
return compute_displacement(lat, lon, dist=3, bear=90)
if ens_num==4:
return compute_displacement(lat, lon, dist=1, bear=270)
if ens_num==5:
return compute_displacement(lat, lon, dist=2, bear=270)
if ens_num==6:
return compute_displacement(lat, lon, dist=3, bear=270)
if ens_num==7:
return compute_displacement(lat, lon, dist=1, bear=180)
if ens_num==8:
return compute_displacement(lat, lon, dist=2, bear=180)
if ens_num==9:
return compute_displacement(lat, lon, dist=3, bear=180)
if ens_num==10:
return compute_displacement(lat, lon, dist=1, bear=0)
if ens_num==11:
return compute_displacement(lat, lon, dist=2, bear=0)
if ens_num==12:
return compute_displacement(lat, lon, dist=3, bear=0)
if ens_num==13:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==14:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==15:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==16:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==17:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==18:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==19:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==20:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==21:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==22:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==23:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==24:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==25:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==26:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==27:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==28:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==29:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==30:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==31:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==32:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==33:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==34:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==35:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==36:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==37:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==38:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==39:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==40:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==41:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==42:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==43:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==44:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==45:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==46:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==47:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==48:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=270)
def compute_displacement(lat, lon, dist=None, bear=None):
"""Compute the latitude and longitude for the respective ensemble member.
"""
if not dist:
return lat, lon
if dist:
R = 6378.1 #Radius of the Earth (km)
brng = math.radians(bear) #Bearing is 90 degrees converted to radians.
d = dist #Distance in km
lat1 = math.radians(lat) #Current lat point converted to radians
lon1 = math.radians(lon) #Current long point converted to radians
lat2 = math.asin( math.sin(lat1)*math.cos(d/R) +
math.cos(lat1)*math.sin(d/R)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat1),
math.cos(d/R)-math.sin(lat1)*math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
return lat2, lon2
def height_generator(ens_num, altitude):
"""Generate the height for the respective ensemble member.
"""
fraction=np.hstack([np.array([1 for i in range(49)]),
np.array([0.95 for i in range(49)]),
np.array([0.9 for i in range(49)]),
np.array([0.85 for i in range(49)]),
np.array([0.8 for i in range(49)]),
np.array([0.75 for i in range(49)]),
np.array([0.7 for i in range(49)])
])
return altitude*fraction[ens_num]
which_working
# +
############################################################################
############################################################################
#where is hysplit working folder?
working_dir = f'/glade/scratch/molina/hysplit/trunk/{which_working}'
#where is arl format meteo data?
meteo_dir = f'/glade/scratch/molina/basile/{which_climate}'
#where is hysplit model executable?
hysplit_dir=r'/glade/scratch/molina/hysplit/trunk/exec/hyts_std'
#where to put trajs?
output_dir=f'/glade/scratch/molina/basile/{which_climate}_traj/'
############################################################################
############################################################################
runtime = -240
basename = []
years = []
months = []
hours = []
location = []
altitudes = []
#66.736100 -151.93335 261.719970
for i in range(len(csv_file)):
print(i)
basename = 'trajid'+str(csv_file.iloc[i][0])+'_subregion'+str(csv_file.iloc[i][2])+'_'+'ens'+str(ens_number)+'_'
years = [2005]
months = [5]
hours = [6]
location = (66.7, -151.9)
altitudes = [height_generator(ens_num=ens_number, altitude=csv_file.iloc[i][6])]
day1 = (csv_file.iloc[i][10]-1)
day2 = csv_file.iloc[i][10]
#break
generate_bulktraj(basename=basename,
hysplit_working=working_dir,
output_dir=output_dir,
meteo_dir=meteo_dir,
years=years,
months=months,
hours=hours,
altitudes=altitudes,
coordinates=location,
run=runtime,
meteoyr_2digits=False, outputyr_2digits=False,
monthslice=slice(day1, day2, 1),
meteo_bookends=([1] , [1]),
get_reverse=False, get_clipped=False,
hysplit=hysplit_dir)
break
############################################################################
############################################################################
############################################################################
# -
working_dir
years
day1
'trajid11_subregion6_ens1_jul731.13293summer2003071700'
'trajid11_subregion6_ens1_jul731.13293summer2003071700
trajid95_subregion3_ens1_may678.9016spring2005051606
trajid26_subregion3_ens1_aug420.9251summer2003081206
ens_create(ens_num=ens_number, lat=csv_file.iloc[i][4], lon=csv_file.iloc[i][5])
round(height_generator(ens_num=ens_number, altitude=csv_file.iloc[i][6]),1)
basename
csv_file
csv_file
| alaska_storms/alaska_hysplitgen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia Speedy Plots 1.6.0
# language: julia
# name: julia-speedy-plots-1.6
# ---
# ### Solve system of equations using Gaussian elimination method:
# $$
# q - r + 2s = -3
# $$
#
# $$
# 4q + 4r -2s = 1
# $$
#
# $$
# -2q +2r -4s = 6
# $$
# +
#=
Put into augmented matrix:
[1 -1 2 -3]
|4 4 -2 1|
[-2 2 -4 6]
=#
# -
m = (Float64)[1 -1 2 -3;4 4 -2 1;-2 2 -4 6]
# divide row 3 by 2
m[3,:] = m[3,:] ./ 2
display(m)
# delete bottom row by adding 1st row to it
m[3,:] = m[3,:] .+ m[1,:]
display(m)
# clear out 4 from 1st column
# row 2 = row 2 - 4 * row 1
m[2,:] = m[2,:] .- (4 .* m[1,:])
display(m)
# divide row 2 by 8 to get pivot
m[2,:] = m[2,:] ./ 8
display(m)
# add row 2 to row 1
m[1,:] .+= m[2,:]
display(m)
# #### Thus we can say
# $$q + 0.75s = -1.375$$
# $$r - 1.25s = 1.625$$
#
# #### s is a free variable, which could have any value, and would not affect the rest of the system.
# #### if
# $$s = 0$$
#
# #### then
# $$ q = -1.375; r = 1.625$$
#
# and if s is another value, q and r are affected
| Gaussian elimination - infinite solutions.ipynb |
# # Adaptive Boosting (AdaBoost)
#
# In this notebook, we present the Adaptive Boosting (AdaBoost) algorithm. The
# aim is to get intuitions regarding the internal machinery of AdaBoost and
# boosting in general.
#
# We will load the "penguin" dataset. We will predict penguin species from the
# culmen length and depth features.
# +
import pandas as pd
penguins = pd.read_csv("../datasets/penguins_classification.csv")
culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_column = "Species"
data, target = penguins[culmen_columns], penguins[target_column]
range_features = {
feature_name: (data[feature_name].min() - 1, data[feature_name].max() + 1)
for feature_name in data.columns}
# -
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# In addition, we are also using the function used in the previous notebook
# to plot the decision function of the tree.
# +
import numpy as np
import matplotlib.pyplot as plt
def plot_decision_function(fitted_classifier, range_features, ax=None):
"""Plot the boundary of the decision function of a classifier."""
from sklearn.preprocessing import LabelEncoder
feature_names = list(range_features.keys())
# create a grid to evaluate all possible samples
plot_step = 0.02
xx, yy = np.meshgrid(
np.arange(*range_features[feature_names[0]], plot_step),
np.arange(*range_features[feature_names[1]], plot_step),
)
# compute the associated prediction
Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = LabelEncoder().fit_transform(Z)
Z = Z.reshape(xx.shape)
# make the plot of the boundary and the data samples
if ax is None:
_, ax = plt.subplots()
ax.contourf(xx, yy, Z, alpha=0.4, cmap="RdBu")
return ax
# -
# We will purposefully train a shallow decision tree. Since it is shallow,
# it is unlikely to overfit and some of the training examples will even be
# misclassified.
# +
import seaborn as sns
from sklearn.tree import DecisionTreeClassifier
palette = ["tab:red", "tab:blue", "black"]
tree = DecisionTreeClassifier(max_depth=2, random_state=0)
tree.fit(data, target)
# -
# We can predict on the same dataset and check which samples are misclassified.
target_predicted = tree.predict(data)
misclassified_samples_idx = np.flatnonzero(target != target_predicted)
data_misclassified = data.iloc[misclassified_samples_idx]
# +
# plot the original dataset
sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],
hue=target_column, palette=palette)
# plot the misclassified samples
ax = sns.scatterplot(data=data_misclassified, x=culmen_columns[0],
y=culmen_columns[1], label="Misclassified samples",
marker="+", s=150, color="k")
plot_decision_function(tree, range_features, ax=ax)
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left")
_ = plt.title("Decision tree predictions \nwith misclassified samples "
"highlighted")
# -
# We observe that several samples have been misclassified by the classifier.
#
# We mentioned that boosting relies on creating a new classifier which tries to
# correct these misclassifications. In scikit-learn, learners have a
# parameter `sample_weight` which forces it to pay more attention to
# samples with higher weights during the training.
#
# This parameter is set when calling
# `classifier.fit(X, y, sample_weight=weights)`.
# We will use this trick to create a new classifier by 'discarding' all
# correctly classified samples and only considering the misclassified samples.
# Thus, misclassified samples will be assigned a weight of 1 and well
# classified samples will be assigned a weight of 0.
# +
sample_weight = np.zeros_like(target, dtype=int)
sample_weight[misclassified_samples_idx] = 1
tree = DecisionTreeClassifier(max_depth=2, random_state=0)
tree.fit(data, target, sample_weight=sample_weight)
# +
sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],
hue=target_column, palette=palette)
ax = sns.scatterplot(data=data_misclassified, x=culmen_columns[0],
y=culmen_columns[1],
label="Previously misclassified samples",
marker="+", s=150, color="k")
plot_decision_function(tree, range_features, ax=ax)
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left")
_ = plt.title("Decision tree by changing sample weights")
# -
# We see that the decision function drastically changed. Qualitatively, we see
# that the previously misclassified samples are now correctly classified.
# +
target_predicted = tree.predict(data)
newly_misclassified_samples_idx = np.flatnonzero(target != target_predicted)
remaining_misclassified_samples_idx = np.intersect1d(
misclassified_samples_idx, newly_misclassified_samples_idx
)
print(f"Number of samples previously misclassified and "
f"still misclassified: {len(remaining_misclassified_samples_idx)}")
# -
# However, we are making mistakes on previously well classified samples. Thus,
# we get the intuition that we should weight the predictions of each classifier
# differently, most probably by using the number of mistakes each classifier
# is making.
#
# So we could use the classification error to combine both trees.
ensemble_weight = [
(target.shape[0] - len(misclassified_samples_idx)) / target.shape[0],
(target.shape[0] - len(newly_misclassified_samples_idx)) / target.shape[0],
]
ensemble_weight
# The first classifier was 94% accurate and the second one 69% accurate.
# Therefore, when predicting a class, we should trust the first classifier
# slightly more than the second one. We could use these accuracy values to
# weight the predictions of each learner.
#
# To summarize, boosting learns several classifiers, each of which will
# focus more or less on specific samples of the dataset. Boosting is thus
# different from bagging: here we never resample our dataset, we just assign
# different weights to the original dataset.
#
# Boosting requires some strategy to combine the learners together:
#
# * one needs to define a way to compute the weights to be assigned
# to samples;
# * one needs to assign a weight to each learner when making predictions.
#
# Indeed, we defined a really simple scheme to assign sample weights and
# learner weights. However, there are statistical theories (like in AdaBoost)
# for how these sample and learner weights can be optimally calculated.
#
# We will use the AdaBoost classifier implemented in scikit-learn and
# look at the underlying decision tree classifiers trained.
# +
from sklearn.ensemble import AdaBoostClassifier
base_estimator = DecisionTreeClassifier(max_depth=3, random_state=0)
adaboost = AdaBoostClassifier(base_estimator=base_estimator,
n_estimators=3, algorithm="SAMME",
random_state=0)
adaboost.fit(data, target)
# -
for boosting_round, tree in enumerate(adaboost.estimators_):
plt.figure()
ax = sns.scatterplot(x=culmen_columns[0], y=culmen_columns[1],
hue=target_column, data=penguins,
palette=palette)
plot_decision_function(tree, range_features, ax=ax)
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left")
_ = plt.title(f"Decision tree trained at round {boosting_round}")
print(f"Weight of each classifier: {adaboost.estimator_weights_}")
print(f"Error of each classifier: {adaboost.estimator_errors_}")
# We see that AdaBoost learned three different classifiers, each of which
# focuses on different samples. Looking at the weights of each learner, we see
# that the ensemble gives the highest weight to the first classifier. This
# indeed makes sense when we look at the errors of each classifier. The first
# classifier also has the highest classification generalization performance.
#
# While AdaBoost is a nice algorithm to demonstrate the internal machinery of
# boosting algorithms, it is not the most efficient.
# This title is handed to the gradient-boosting decision tree (GBDT) algorithm,
# which we will discuss after a short exercise.
| notebooks/ensemble_adaboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 5
# #### Student ID: *Double click here to fill the Student ID*
#
# #### Name: *Double click here to fill the name*
# ## 2
# We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of n observations.
# (a) What is the probability that the first bootstrap observation is *not* the $j$th observation from the original sample? Justify your answer.
# > Ans: *double click here to answer the question.*
# (b) What is the probability that the second bootstrap observation is *not* the $j$th observation from the original sample?
# > Ans: *double click here to answer the question.*
# (c) Argue that the probability that the $j$th observation is *not* in the bootstrap sample is $(1 − 1/n)^n$.
# > Ans: *double click here to answer the question.*
# (d) When $n = 5$, what is the probability that the $j$th observation is in the bootstrap sample?
# > Ans: *double click here to answer the question.*
# (e) When $n = 100$, what is the probability that the $j$th observation is in the bootstrap sample?
# > Ans: *double click here to answer the question.*
# (f) When $n = 10, 000$, what is the probability that the $j$th observation is in the bootstrap sample?
# > Ans: *double click here to answer the question.*
# (g) Create a plot that displays, for each integer value of $n$ from $1$ to $100, 000$, the probability that the $j$th observation is in the bootstrap sample. Comment on what you observe.
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# (h) We will now investigate numerically the probability that a bootstrap sample of size $n = 100$ contains the $j$th observation. Here $j = 4$. We repeatedly create bootstrap samples, and each time we record whether or not the fourth observation is contained in the bootstrap sample.
#
# ```Python
# > from sklearn.utils import resample
# > store = np.repeat(np.nan, 10000)
# > for i in range(10000):
# > store[i] = np.sum(
# > np.array(resample(range(1,101), replace=True, n_samples=100, random_state=i)) == 4
# > )>0
# > store.mean()
# ```
#
# Comment on the results obtained.
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# ## 3
# We now review k-fold cross-validation.
# (a) Explain how k-fold cross-validation is implemented.
# > Ans: *double click here to answer the question.*
# (b) What are the advantages and disadvantages of k-fold crossvalidation relative to:
# > i. The validation set approach?
# >> Ans: *double click here to answer the question.*
# > ii. LOOCV?
# >> Ans: *double click here to answer the question.*
# ## 8
# We will now perform cross-validation on a simulated data set.
# (a) Generate a simulated data set as follows:
#
# ```Python
# > np.random.seed(1)
# > x = np.random.normal(size=100)
# > y = x - 2*x**2 + np.random.normal(size=100)
# ```
#
# In this data set, what is $n$ and what is $p$? Write out the model used to generate the data in equation form.
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# (b) Create a scatterplot of $X$ against $Y$ . Comment on what you find.
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# (c) Set a random seed, and then compute the $\operatorname{LOOCV}$ errors that result from fitting the following four models using least squares:
#
# > i. Y = $\beta_0+\beta_1X+\epsilon$<br>
# > ii. Y = $\beta_0+\beta_1X+\beta_2X^2+\epsilon$<br>
# > iii. Y = $\beta_0+\beta_1X+\beta_2X^2+\beta_3X^3+\epsilon$<br>
# > iv. Y = $\beta_0+\beta_1X+\beta_2X^2+\beta_3X^3+\beta_4X^4+\epsilon$.
#
# Note you may find it helpful to use the <span style="color:red">pd.DataFrame()</span> function to create a single data set containing both X and Y .
# +
# coding your answer here.
# -
# (d) Repeat (c) using another random seed, and report your results. Are your results the same as what you got in (c)? Why?
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# (e) Which of the models in (c) had the smallest $\operatorname{LOOCV}$ error? Is this what you expected? Explain your answer.
# > Ans: *double click here to answer the question.*
# (f) Comment on the statistical significance of the coefficient estimates that results from fitting each of the models in (c) using least squares. Do these results agree with the conclusions drawn based on the cross-validation results?
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# ## 9
# We will now consider the <span style="color:red">Boston</span> housing data set.
# (a) Based on this data set, provide an estimate for the population mean of <span style="color:red">medv</span>. Call this estimate $\hat{\mu}$.
# +
# coding your answer here.
# -
# (b) Provide an estimate of the standard error of $\hat{\mu}$. Interpret this result.
#
# *Hint: We can compute the standard error of the sample mean by dividing the sample standard deviation by the square root of the number of observations.*
# +
# coding your answer here.
# -
# (c) Now estimate the standard error of $\hat{\mu}$ using the bootstrap. How does this compare to your answer from (b)?
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# (d) Based on your bootstrap estimate from (c), provide a $95\%$ confidence interval for the mean of <span style="color:red">medv</span>. Compare it to the results obtained using T-test of Boston$medv.
#
# *Hint: You can approximate a $95\%$ confidence interval using the formula $[\hat{\mu}-2SE(\hat{\mu}),\hat{\mu}+2SE(\hat{\mu})]$.*
#
# *Note: The 95\% confidence interval for T-test of Boston\\$medv are $\hat{\mu}$ \& $SE(\hat{\mu})$ in (b), (c).*
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
# (e) Based on this data set, provide an estimate, $\hat{\mu}_{med}$, for the median value of <span style="color:red">medv</span> in the population.
# +
# coding your answer here.
# -
# (f) We now would like to estimate the standard error of $\hat{\mu}_{med}$. Unfortunately, there is no simple formula for computing the standard error of the median. Instead, estimate the standard error of the median using the bootstrap. Comment on your findings.
# +
# coding your answer here.
# -
# > Ans: *double click here to answer the question.*
| static_files/assignments/Assignment5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="77pK2GLwsjbd"
import pandas as pd
import numpy as np # For mathematical calculations
import matplotlib.pyplot as plt # For plotting graphs
import scipy.stats as stats
import random
from datetime import datetime # To access datetime
from pandas import Series # To work on series
# %matplotlib inline
import warnings # To ignore the warnings
warnings.filterwarnings("ignore")
# + colab={"base_uri": "https://localhost:8080/", "height": 69, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} colab_type="code" id="7U6mMtC9sktJ" outputId="b11c52b2-b0d7-4a1b-fbcb-aa3d09c1387a"
#If using Google Colab platform enable the below lines
# from google.colab import files
# uploaded = files.upload()
train=pd.read_csv("products_sample_data.csv")
train_input = train
# + colab={"base_uri": "https://localhost:8080/", "height": 173} colab_type="code" id="aeuYhOInxx3J" outputId="8346b519-95c6-4e3a-b5c7-c91475143601"
# Check Data in your file
train
# + [markdown] colab_type="text" id="JYqH09RHly5W"
# Financial based on data
# + colab={} colab_type="code" id="6Uqwb1z0l1QP"
# Calculate cost based on available data
train["Buy_Quantity"] = train[['Buyer1','Buyer2','Buyer3','Buyer4']].mean(axis=1).round()
train["Standard_Dev"] = train[['Buyer1','Buyer2','Buyer3','Buyer4']].std(ddof=0,axis=1)
train['Underbuy_Cost'] = train['Normal_Price'] - train['Cost_Price']
train['Overbuy_Cost'] = train['Cost_Price'] - train['Markdown_Price']
#Calculate Gross Sales
def gross_margin(row):
return row['Sales'] * row['Underbuy_Cost']
#Markdown Losses
def markdown_loss(row):
if (row['Buy_Quantity'] > row['Sales']):
return (row['Buy_Quantity'] - row['Sales']) * row['Overbuy_Cost']
else:
return 0
#Calculate Net profit
def net_profit(row):
return row['Gross_Margin'] - row['Markdown_Losses']
#Calculate Lost Sales
def lost_sales(row):
if row['Buy_Quantity'] < row['Actual_Demand']:
return row['Actual_Demand'] - row['Buy_Quantity']
else:
return 0
#Calculate Lost Margin
def lost_margin(row):
return row['Lost_Sales'] * row['Underbuy_Cost']
# Find new sales based on new buy quantity
def new_sales(row):
if row['Actual_Demand'] > row['Buy_Quantity']:
return row['Buy_Quantity']
else :
return row['Sales']
# -
train['Gross_Margin'] = train.apply(gross_margin, axis=1)
train['Markdown_Losses'] = train.apply(markdown_loss, axis=1)
train['Net_Profit'] = train.apply(net_profit, axis=1)
train['Lost_Sales'] = train.apply(lost_sales, axis=1)
train['Lost_Margin'] = train.apply(lost_margin, axis=1)
#Profit/Loss in this in the buy
buy_forecast = train.loc[:,['Product','Buy_Quantity','Actual_Demand','Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']]
buy_forecast.loc['Total'] = pd.Series(buy_forecast[['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']].sum(), index = ['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin'])
buy_forecast
buy_forecast_chg = pd.DataFrame()
buy_forecast_chg = train_input
input_change = input()
change_to_add = 1 + (int(input_change)/100)
buy_forecast_chg['Buy_Quantity'] = round(buy_forecast_chg['Buy_Quantity'] * change_to_add) #Adding 10 % more
buy_forecast_chg
buy_forecast_chg['Sales'] = buy_forecast_chg.apply(new_sales, axis=1)
buy_forecast_chg['Underbuy_Cost'] = buy_forecast_chg['Normal_Price'] - buy_forecast_chg['Cost_Price']
buy_forecast_chg['Overbuy_Cost'] = buy_forecast_chg['Cost_Price'] - buy_forecast_chg['Markdown_Price']
#Calculate P&L
buy_forecast_chg['Gross_Margin'] = buy_forecast_chg.apply(gross_margin, axis=1)
buy_forecast_chg['Markdown_Losses'] = buy_forecast_chg.apply(markdown_loss, axis=1)
buy_forecast_chg['Net_Profit'] = buy_forecast_chg.apply(net_profit, axis=1)
buy_forecast_chg['Lost_Sales'] = buy_forecast_chg.apply(lost_sales, axis=1)
buy_forecast_chg['Lost_Margin'] = buy_forecast_chg.apply(lost_margin, axis=1)
buy_forecast_chg = buy_forecast_chg.loc[:,['Product','Buy_Quantity','Actual_Demand','Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']]
buy_forecast_chg.loc['Total'] = pd.Series(buy_forecast_chg[['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']].sum(), index = ['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin'])
buy_forecast_chg
# + colab={} colab_type="code" id="E5yYcBdNzyE-"
# Risk Adjusted Model based on input demand
risk_adjusted_layer = pd.DataFrame()
risk_adjusted_layer = train_input
risk_adjusted_layer["Buy_Quantity"] = risk_adjusted_layer[['Buyer1','Buyer2','Buyer3','Buyer4']].mean(axis=1).round()
risk_adjusted_layer["Standard_Dev"] = risk_adjusted_layer[['Buyer1','Buyer2','Buyer3','Buyer4']].std(ddof=0,axis=1)
risk_adjusted_layer['Underbuy_Cost'] = risk_adjusted_layer['Normal_Price'] - train['Cost_Price']
risk_adjusted_layer['Overbuy_Cost'] = risk_adjusted_layer['Cost_Price'] - train['Markdown_Price']
#Assuming if demand were having these values
demand1 = 86
demand2 = 89
demand3 = 102
demand_arr = [demand1,demand2,demand3] #Put it in a loop within function for dynamic generation
def expected_cost(row):
demand_range_min = round(row['Actual_Demand'] * 0.6) #Setting lower limits
demand_range_max = round(row['Actual_Demand'] * 1.4) #Setting higher limits
demand_range_arr = list(range(demand_range_min,demand_range_max))
costing_range = []
for ii in range(len(demand_range_arr)):
cost = []
for i in range(len(demand_arr)):
if (demand_range_arr[ii] < demand_arr[i]):
cost.append(row['Underbuy_Cost'] * (demand_arr[i]-demand_range_arr[ii]))
else:
cost.append(row['Overbuy_Cost'] * (demand_range_arr[ii]- demand_arr[i]))
best_cost = cost[0]*0.25 + cost[1]*0.25 +cost[2]*0.5 #Applying weights based on buying figures
costing_range.append(best_cost)
min_qty_index = np.argmin(costing_range)
return demand_range_arr[min_qty_index]
risk_adjusted_layer['Buy_Quantity'] = risk_adjusted_layer.apply(expected_cost,axis=1)
# -
risk_adjusted_layer
risk_adjusted_layer['Sales'] = risk_adjusted_layer.apply(new_sales, axis=1)
risk_adjusted_layer['Underbuy_Cost'] = risk_adjusted_layer['Normal_Price'] - risk_adjusted_layer['Cost_Price']
risk_adjusted_layer['Overbuy_Cost'] = risk_adjusted_layer['Cost_Price'] - risk_adjusted_layer['Markdown_Price']
#Calcualte P&L
risk_adjusted_layer['Gross_Margin'] = risk_adjusted_layer.apply(gross_margin, axis=1)
risk_adjusted_layer['Markdown_Losses'] = risk_adjusted_layer.apply(markdown_loss, axis=1)
risk_adjusted_layer['Net_Profit'] = risk_adjusted_layer.apply(net_profit, axis=1)
risk_adjusted_layer['Lost_Sales'] = risk_adjusted_layer.apply(lost_sales, axis=1)
risk_adjusted_layer['Lost_Margin'] = risk_adjusted_layer.apply(lost_margin, axis=1)
risk_adjusted_layer = risk_adjusted_layer.loc[:,['Product','Buy_Quantity','Actual_Demand','Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']]
risk_adjusted_layer.loc['Total'] = pd.Series(risk_adjusted_layer[['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']].sum(), index = ['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin'])
risk_adjusted_layer
# + colab={"base_uri": "https://localhost:8080/", "height": 164} colab_type="code" id="Ic0kwnOn1G-J" outputId="b6f0d46e-83cb-4e65-c5c1-62813f3fbe2a"
# Risk Adjusted Continous Gamma Model
risk_adjusted_gamma_layer = pd.DataFrame()
risk_adjusted_gamma_layer = train_input
risk_adjusted_gamma_layer["Buy_Quantity"] = risk_adjusted_gamma_layer[['Buyer1','Buyer2','Buyer3','Buyer4']].mean(axis=1).round()
risk_adjusted_gamma_layer["Mean"] = risk_adjusted_gamma_layer[['Buyer1','Buyer2','Buyer3','Buyer4']].mean(axis=1)
risk_adjusted_gamma_layer["Standard_Dev"] = risk_adjusted_gamma_layer[['Buyer1','Buyer2','Buyer3','Buyer4']].std(ddof=0,axis=1)
risk_adjusted_gamma_layer['Underbuy_Cost'] = risk_adjusted_gamma_layer['Normal_Price'] - train['Cost_Price']
risk_adjusted_gamma_layer['Overbuy_Cost'] = risk_adjusted_gamma_layer['Cost_Price'] - train['Markdown_Price']
def expected_cost_gamma(row):
prb = []
# calculate alpha and beta
alpha = (row['Mean'] / row['Standard_Dev'])**2
beta = ((row['Standard_Dev'])**2)/(row['Mean'])
demand_g_range_min = round(row['Actual_Demand'] * 0.70) #Setting lower limits
demand_g_range_max = round(row['Actual_Demand'] * 1.30) #Setting higher limits
demand_g_range_arr = list(range(demand_g_range_min,demand_g_range_max))
costing_g_range = []
cost_g = 0
cal_cost_g = 0
gamma_cal_val = 0
for ik in range(len(demand_g_range_arr)):
gamma_cal_val = 0
for ig in range(len(demand_g_range_arr)):
prob = stats.gamma.pdf(demand_g_range_arr[ig], a=alpha, scale=beta)
if (demand_g_range_arr[ik] < demand_g_range_arr[ig]):
cost_g = row['Overbuy_Cost'] * (demand_g_range_arr[ig] - demand_g_range_arr[ik])
else:
cost_g = (demand_g_range_arr[ik] - demand_g_range_arr[ig]) * row['Underbuy_Cost']
cal_cost_g = (prob * cost_g)
gamma_cal_val = cal_cost_g + gamma_cal_val
costing_g_range.append(gamma_cal_val)
expected_cost_gamma.plot = costing_g_range
min_qty_g_index = np.argmin(costing_g_range)
expected_cost_gamma.plot = costing_g_range
print ("Optimal Buy quuantity for",row['Product'],"-",demand_g_range_arr[min_qty_g_index])
return demand_g_range_arr[min_qty_g_index]
risk_adjusted_gamma_layer['Buy_Quantity'] = risk_adjusted_gamma_layer.apply(expected_cost_gamma,axis=1)
# +
# risk_adjusted_gamma_layer
# -
risk_adjusted_gamma_layer_fore = risk_adjusted_gamma_layer
risk_adjusted_gamma_layer_fore['Sales'] = risk_adjusted_gamma_layer_fore.apply(new_sales, axis=1)
risk_adjusted_gamma_layer_fore['Underbuy_Cost'] = risk_adjusted_gamma_layer_fore['Normal_Price'] - risk_adjusted_gamma_layer_fore['Cost_Price']
risk_adjusted_gamma_layer_fore['Overbuy_Cost'] = risk_adjusted_gamma_layer_fore['Cost_Price'] - risk_adjusted_gamma_layer_fore['Markdown_Price']
#Calculate P&L
risk_adjusted_gamma_layer_fore['Gross_Margin'] = risk_adjusted_gamma_layer_fore.apply(gross_margin, axis=1)
risk_adjusted_gamma_layer_fore['Markdown_Losses'] = risk_adjusted_gamma_layer_fore.apply(markdown_loss, axis=1)
risk_adjusted_gamma_layer_fore['Net_Profit'] = risk_adjusted_gamma_layer_fore.apply(net_profit, axis=1)
risk_adjusted_gamma_layer_fore['Lost_Sales'] = risk_adjusted_gamma_layer_fore.apply(lost_sales, axis=1)
risk_adjusted_gamma_layer_fore['Lost_Margin'] = risk_adjusted_gamma_layer_fore.apply(lost_margin, axis=1)
risk_adjusted_gamma_layer_fore = risk_adjusted_gamma_layer_fore.loc[:,['Product','Buy_Quantity','Actual_Demand','Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']]
risk_adjusted_gamma_layer_fore.loc['Total'] = pd.Series(risk_adjusted_gamma_layer_fore[['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin']].sum(), index = ['Sales','Gross_Margin','Markdown_Losses','Net_Profit','Lost_Sales','Lost_Margin'])
risk_adjusted_gamma_layer_fore
# +
# Markdown Model - Estimate best markdowns
# markdown_price = train
markdown_range = list(range(5,85,5))
# print(markdown_range)
calc_lift = 2.718**(2.5*0.05)
print(calc_lift)
sales_lift_array = []
def sales_lift(percent_input):
calc_lift = 2.718**(2.5*(percent_input/100))
return calc_lift
for ik in range(len(markdown_range)):
sales_lift_factor = sales_lift(markdown_range[ik])
sales_lift_array.append(sales_lift_factor)
# print(sales_lift_array)
# -
import pandas as pd
import numpy as np
# df_ = pd.DataFrame(index=index, columns=columns)
# df_ = df_.fillna(0) # with 0s rather than NaNs
selling_price = 60
col_names = ['Markdown', 'Sales_Lift_Factor', 'Sale_price','Rest_Sales','Rest_Revenue']
markdown_df = pd.DataFrame(columns = col_names)
markdown_df['Markdown'] = markdown_range
markdown_df['Sales_Lift_Factor'] = sales_lift_array
markdown_df['Sale_price'] = selling_price * (1-markdown_df['Markdown']/100)
markdown_df['Rest_Sales'] = [340,385,436,495,560,635,720,815,924,1047,1187,1345,1524,1726,1956,2000]#Actual Sales
markdown_df['Rest_Revenue'] = markdown_df['Rest_Sales'] * markdown_df['Sale_price']
plt.plot(markdown_df['Markdown'], markdown_df['Sales_Lift_Factor'])
markdown_df
# +
x = np.linspace (0, 100, 200)
y1 = stats.gamma.pdf(x, a=29, loc=3) #a is alpha, loc is beta???
plt.plot(x, y1, "y-", label=(r'$\alpha=29, \beta=3$'))
plt.ylim([0,0.08])
plt.xlim([0,150])
plt.show()
| Inventory Optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from ipyfilechooser import FileChooser
json_api_file = FileChooser()
output_loc = FileChooser()
print("Select JSON file location:")
display(json_api_file)
print("Select output folder:")
display(output_loc)
# +
import json
import panel as pn
json_loc = json_api_file.selected
pn.extension()
api_list = {}
button_list = {}
PARSED_JSON = json.loads(open(json_loc, 'r', encoding='utf-8').read())
for api in PARSED_JSON.get("testsets"):
name = api.get("name")
api_list[name] = api.get("key")
button_list[name] = pn.widgets.Checkbox(name=name)
display(button_list.get(name))
# -
apis_to_parse = ""
for api in button_list.keys():
if button_list.get(api).value == True:
apis_to_parse = f"{apis_to_parse}{api} "
apis_to_parse.strip(" ")
print(apis_to_parse)
json_file_loc = json_api_file.selected
output_folder = output_loc.selected_path
# %run -i vectorize_docs.py $json_file_loc $output_folder $apis_to_parse
| vectorize_docs/VectorizeDocs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
# import the wine dataset
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid',
'Ash', 'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids', 'Nonflavanoid phenols',
'Proanthocyanins', 'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline'
]
print('Class labels', np.unique(df_wine['Class label']))
# -
df_wine.head()
# randomly partition the wine dataset for test & training datasets
from sklearn.model_selection import train_test_split
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# +
# normilizing data via min-max scaling
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
X_train_norm = mms.fit_transform(X_train)
X_test_norn = mms.transform(X_test)
# data standardization
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
# +
# L1 regularization using 'l1' penalty parameter for the Wine dataset
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(penalty='l1', C=0.1)
lr.fit(X_train_std, y_train)
print('Trainig accuracy:', lr.score(X_train_std, y_train))
# -
print('Test accuracy:', lr.score(X_test_std, y_test))
lr.intercept_
lr.coef_
# +
# plotting the regularization path
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot(111)
colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'pink', 'lightgreen', 'lightblue',
'gray', 'indigo', 'orange']
weights, params = [], []
for c in np.arange(-4, 6):
lr = LogisticRegression(penalty='l1', C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
for column, color in zip(range(weights.shape[1]), colors):
plt.plot(params, weights[:, column], label=df_wine.columns[column+1], color=color)
plt.axhline(0, color='black', linestyle='--', linewidth=3)
plt.xlim([10**(-5), 10**5])
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.xscale('log')
plt.legend(loc='upper center', bbox_to_anchor=(1.38, 1.03), ncol=1, fancybox=True)
plt.show()
| ch04/.ipynb_checkpoints/03-partitioning-dataset-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[] toc-hr-collapsed=true
# # Machine learning compilation of quantum circuits -- experiments
# > Flexible and efficient learning with JAX+numpy
# - toc: true
# - badges: true
# - comments: true
# - categories: [qiskit, JAX, machine learning, compilation]
# - image: images/pisa.svg
# + [markdown] tags=[]
# # Introduction
# + tags=[]
#collapse
# If you are running this notebook in Colab, you might need to restart
# the environment after the installations.
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import jax.numpy as jnp
from jax import random, value_and_grad, jit, vmap, grad, lax
from scipy.stats import unitary_group
try:
import optax
except ImportError:
# !pip install optax
import optax
try:
import qiskit
except ImportError:
# !pip install qiskit
# !pip install pylatexenc # required for circuit drawing.
import qiskit
from qiskit import QuantumCircuit, transpile
from qiskit.quantum_info import Operator, Statevector
from qiskit.circuit import Parameter
from qiskit.transpiler.passes.synthesis import UnitarySynthesis
from qiskit.transpiler import PassManager
from qiskit.converters import circuit_to_gate
# + [markdown] tags=[]
# ## Motivation
# Ever since I read the paper by L.Madden and A.Simonetto ([original preprint](http://arxiv.org/abs/2106.05649), [my review](https://idnm.github.io/blog/machine%20learning/compilation/qiskit/paper%20review/2021/07/22/Machine-learning-compilation-of-quantum-circuits.html)) I knew I want to do this kind of experiments myself. At first I hoped that there is a well-developed software framework where I can easily build quantum circuits and then optimize them efficiently. However, [I was not able to find a good fit for my problem](https://quantumcomputing.stackexchange.com/questions/20718/software-tools-to-train-quantum-circuits-with-parameters). For example, to the best of my knowledge `qiskit` currently only provides acess to zero-order optimization routines. I later found [quimb](https://quimb.readthedocs.io/en/latest/index.html) which [might do what I want](https://quimb.readthedocs.io/en/latest/examples/ex_tn_train_circuit.html), but in the end I'm glad I worked things out from scratch. Eventually I went for `numpy`+`JAX` combination which while being quite low-level was not a big problem to get working and shows a decent speed. I owe a ton to [<NAME>](https://github.com/LuchnikovI) for introducing me to the framework and helping throught.
#
# In this post I will give a walk thorough this implementation and show experiments with compilation of random unitaries. However, in my opinion truly interesting stuff is concerned with the compilation of special gates, say multi-controlled Toffolis on restricted connectivity. I intend to look at this kind problems in detail in a future blog post. You may wish to take a look at [this preprint](http://arxiv.org/abs/2109.13223) for advances in that direction.
#
# > *NOTE*: While I was working on my experiments another [preprint appeared](http://arxiv.org/abs/2109.06770), by P.Rakyta and Z.Zimborás, which is very similar to the work of M&S in terms of numerical results. Despite the striking similarities these works are independent. As a bonus R&Z also provide a numerical package [SQUANDER](https://zenodo.org/record/4508680#.YVw-uYBBxNi) that allows to play with their framework for compilation of unitaries. You might want to check that out if you are interested in doing some experiments yourself.
#
#
# ## The problem
# OK, so first a brief recap of what is the compilation problem. Given a quantum circuit we need to find an equivalent one, which satisfies certain requirements. A typical restrictions are to use only some specific two-qubits gates and to be compatible with limited connectivity. I gave a more detailed intro [here](https://idnm.github.io/blog/machine%20learning/compilation/qiskit/paper%20review/2021/07/22/Machine-learning-compilation-of-quantum-circuits.html). Here is a nearly-trivial example: a simple $CNOT$ gate
# -
#collapse
qc = QuantumCircuit(2)
qc.cx(0, 1)
qc.draw(output='mpl')
# can be decomposed in terms of the entangling $cz$ gate and single-qubit gates $rx, ry, rz$ as follows
#collapse
qc_compiled = transpile(qc, basis_gates=['cz', 'rx', 'ry', 'rz'], optimization_level=3)
qc_compiled.draw(output='mpl')
# Now, for generic $n$-qubit unitaries one needs exponentially many entangling gates for the compilation. More precisely, there is a [theoretical lower bound](https://dl.acm.org/doi/10.5555/968879.969163) $\#CNOTs\ge \frac14 \left(4^n-3n-1\right)$ on the amount of $CNOT$s required for compilation of any $n-$qubit unitary outside a measure zero set. Crucially, this measure zero set might in fact be of principal interest to quantum computing as it includes many operators featuring in most algorithms (such as multi-controlled gates). In this post I will only adress compilation of random unitaries and discuss compilation of special cases in a future post. For later reference here is the function computing the theoretical lower bound.
# +
def TLB(n):
return int((4**n-3*n-1)/4 + 1)
for n in range(1, 7):
print('TLB for {}-qubit unitary is {}'.format(n, TLB(n)))
# -
# Now, there is an algorithm called [quantum Shannon decomposition](https://arxiv.org/abs/quant-ph/0406176) to decompose an arbitary $n$-qubit unitary into a sequence of $CNOT$s and single-qubit rotations which requires roughly twice as many $CNOT$s as the theoretical lower bound implies. In complexity-theoretic terms this is definitely good enoough, the overhead is just a small constant factor. However, for NISQ devices doubling the amount of gates is not a trivial matter. Is it possible to do better?
#
# ## 3-qubit example
# As papers [M&S](http://arxiv.org/abs/2106.05649) and [R&Z](http://arxiv.org/abs/2109.06770) show, one can do better and eliminate the 2x overhead, at least numerically. Namely, it seems that precisely at the theoretical lower bound the exact or nearly-exact compilation of any unitary is possible. Here is a real-life example. Consider the following 3-qubit circuit with $TLB(3)=14$ $CNOT$ gates
#
# <img src="myimages/3qubitsequ.png" alt="Drawing" style="width: 800px;"/>
#
# The claim is that with the appropriate choice of angles in rotation gates it can morhp into *any* 3-qubit unitary (and in fact at least this many $CNOT$s are needed for almost all 3-qubit unitaries). To find the corresponding angles it is sufficient to run a numerical optimization minimizing the fidelity between this circuit's unitary and the target unitary.
# To me this is rather imressive, but raises several questions. Why choose $CNOT$ gates of all entangling gates? Why place them in that exact order as shown at the figure? It appears to be an empirical fact that precise location of entangling gates as well as their choice ($CNOT$, $cz$, etc) makes little difference. Moreover, even restricted connectivity does not seem to force an overhead for compilation. It is my main goal to back up these claims with numerical experiments in an interactive way. In particular, I will illustrate the following points.
#
# 1. Exactly at the theoretical lower bound a nearly-exact compilation seems to always be possible (at least for up to 6 qubits). This is a 2x improvement over the best theoretical decomposition.
# 1. Both $cz$ and $CNOT$ gates perform equally well. It is tempting to guess that any entangling gate will perform similarly.
# 1. The maximum fidelity is a monotonic function of the number of entangling gates. This implies that simply counting 2-qubit gates gives a good measure of circuits expressivity.
# 1. The most remarkable for me is the fact that even a restricted topology seems to cause no overhead on compilation cost. I will show that even on a chain topology the same amount of $CNOT$s is sufficient to reach good fidelity.
#
# ## What you'll find if you keep reading
# The rest of this post is divided into two parts. In the first I write some `numpy`/`JAX`/`qiskit` code that allows to construct and efficiently optimize parametrized circuits. I try to give some explanations of the underlying numerical framework, but please take into account that my own understanding is rather limited. Still, the resulting performance seems to be good enough to reproduce results of the existing preprints. I advise to skip this part if you are only interested in the results.
#
# In the second part of the post I will do a number of experiments compiling random unitaries with varying numbers of qubits, different types of entangling gates, restricted connectivity and try to draw some general lessons from them. I tried to make this part independent of the first, although I didn't stop all the implementation details from sinking trough.
#
# > *NOTE*: This blog post is also a fully functional jupyter notebook. You can open it in Colab or download locally and perform more experiments yourself!
#
# <img src="myimages/mlexperiments/pisa.svg" alt="Drawing" style="width: 400px;"/>
# + [markdown] tags=[]
# # Numerical framework
# + [markdown] tags=[]
# ## Entangling blocks
# -
# First let us define the basic 1- and 2-qubit gates in matrix form. For now you can safely ignore the use `jnp` arrays instead of `np` arrays.
# +
# Matrix represntations of CNOT, CZ and single-qubit rotations
# Controlled-NOT (or controlled-X gate)
cx_mat = jnp.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]])
# Controlled-Z gate
cz_mat = jnp.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]])
# Pauli matrices
x_mat = jnp.array([[0, 1],
[1, 0]])
y_mat = jnp.array([[0, -1j],
[1j, 0]], dtype=jnp.complex64)
z_mat = jnp.array([[1, 0],
[0, -1]])
# Rotation gates
def rx_mat(a):
return jnp.cos(a/2)*jnp.identity(2)-1j*x_mat*jnp.sin(a/2)
def ry_mat(a):
return jnp.cos(a/2)*jnp.identity(2)-1j*y_mat*jnp.sin(a/2)
def rz_mat(a):
return jnp.cos(a/2)*jnp.identity(2)-1j*z_mat*jnp.sin(a/2)
# -
# The circuits that we are going to train will be built out of two types of 2-qubit blocks, the controlled-Z and the controlled-NOT. Here are the definitions:
class block():
"""Two-qubit entangling block.
Methods:
circuit: gives equivalent `qiskit` circuit.
unitary: gives `jax.numpy` unitary matrix of the circuit.
"""
def __init__(self, gate_name, angles):
self.gate_name = gate_name
self.angles = angles
def circuit(self):
"""Quantum circuit in `qiskit` corresponding to our block."""
qc = QuantumCircuit(2)
if self.gate_name == 'cx':
qc.cx(0, 1)
elif self.gate_name == 'cz':
qc.cz(0, 1)
else:
print("Gate '{}' not yet supported'".format(self.gate_name))
angles = np.array(self.angles) # convert from JAX array to numpy array if applicable.
qc.ry(angles[0], 0)
qc.rx(angles[1], 0)
qc.ry(angles[2], 1)
qc.rx(angles[3], 1)
return qc
def unitary(self):
"""JAX-compatible unitary corresponding to our block."""
if self.gate_name == 'cx':
entangling_matrix = cx_mat
elif self.gate_name == 'cz':
entangling_matrix = cz_mat
else:
print("Gate '{}' not yet supported'".format(self.gate_name))
x_rotations = jnp.kron(rx_mat(self.angles[1]), rx_mat(self.angles[3]))
y_rotations = jnp.kron(ry_mat(self.angles[0]), ry_mat(self.angles[2]))
return x_rotations @ y_rotations @ entangling_matrix
# Here is how they look:
# `cz` block
a0, a1, a2, a3 = [Parameter(a) for a in ['a0', 'a1', 'a2', 'a3']]
block('cz', [a0, a1, a2, a3]).circuit().draw(output='mpl')
# and `cx` block
block('cx', [a0, a1, a2, a3]).circuit().draw(output='mpl')
# Our `block` class can return a `qiskit` circuit and the corresponding unitary matrix. Of course we could have extracted the unitary from the circuit itself via `qiskit` API, but this would make the matrix representation incompatible with `JAX` which will be our workhorse for optimization. To the best of my knowledge currently it is only possible to use zero-order methods directly from `qiskit` which is a serious limitation. So at this point we needed a bit of wheel reinvention. Let's check that our implementation is consistent with `qiskit`:
# +
# That's how you use random numbers with JAX. Don't worry if this is not familiar, not essential for our purposes.
angles = random.uniform(random.PRNGKey(0), shape=(4,), minval=0, maxval=2*jnp.pi)
for gate in ['cx', 'cz']:
b = block(gate, angles)
qc = b.circuit()
qs_unitary = Operator(qc.reverse_bits()).data # Yes, we need to reverse bits in qiskit to match our conventions.
our_unitary = b.unitary()
print('qiskit unitary is the same as our unitary for block with gate {}: {}'.format(gate, jnp.allclose(qs_unitary, our_unitary)))
# -
# To match matrix representations of quantum circuits might be a headache as I discussed in [another post](https://idnm.github.io/blog/qiskit/tensor%20networks/quantum%20concepts/2021/08/18/Matrix-representation-of-quantum-circuits.html), so this was a necessary check to do.
#
# Our two building blocks (`cz` and `cx`) only differ by the type of the two-qubit gate. The circuits that we are going to build seem to do equally well for any choice of two-qubit gate. I will mostly use `cz` gate because it is symmetric under the swap of qubits, but I will also occasionally bring up the `cx` gate to illustrate that it has the same performance. Angles $a_0$-$a_3$ are going to be optimized.
# + [markdown] tags=[] toc-hr-collapsed=true
# ## Optimization with `JAX`
# + [markdown] tags=[]
# ### A word about `JAX`
# -
# What is `JAX`? Well, I personally think of it as `numpy` on steroids. You can check out [the official documentation](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html) or numerous nice overwievs on the web. For our purposes two key features of `JAX` are
# 1. Autograd.
# 2. JIT or just-in-time compilation.
#
# Autograd allows to define functions the same way you do in `numpy` and have analytic derivatives available with no extra coding on your side. At the moment `grad` function can only be applied to real scalars. For example, let us define the absolute value of the trace of `cx` block as function of rotations gate angles
def block_tr_abs(angles):
b = block('cx', angles)
tr = jnp.trace(b.unitary())
return jnp.abs(tr)
# Since everything so far has been defined using `jax.numpy` we have immediate access to the gradient of this function
grad(block_tr_abs)([0.,1.,2.,3.])
# Autograd feature of `JAX` allows us to just define the loss function associated with our circuit in plain `numpy` terms and use advanced first-order optimizers such as Adam out of the box.
#
# The next crucial ingredient is `jit`-compilation. When used with a bit of care, it allows to speed up evaluation of similar expression by orders of magnitude. For example let us compare runtimes of the `jit`ted and un`jit`ted versions of our trace function. Let's first define a sample of random
# angles
test_angles = random.uniform(random.PRNGKey(0), shape=(1000, 4), minval=0, maxval=2*jnp.pi)
# and now time evaluation of un`jit`ted trace function
# %%time
for angles in test_angles:
block_tr_abs(angles)
# Now awe to the power of `jit`!
# %%time
jit_block_tr_abs = jit(block_tr_abs)
for angles in test_angles:
jit_block_tr_abs(angles)
# What happened here is that during the first call to the `jit`ted function it's efficient `XLA` version was compiled and then used to evaluate all subsequent calls.
# + [markdown] tags=[]
# ### Gradient descent
# -
# We will use the following measure of discrepancy between two unitaries $disc(U, V) = 1-\frac1{N}\operatorname{Tr}\left( U^\dagger V\right)$ where $U,V$ are $N\times N$ matrices. It is normalized so that $disc(U,U)=0$ and $disc(U,V)=0$ when $U$ and $V$ are orthogonal. Note that this measure is insensitive to global phases.
def disc(U, U_target):
n = U_target.shape[0]
return 1-jnp.abs((U.conj() * U_target).sum())/n
# Here is the optimization routine that we are
# going to use. It is pretty straightforward and I will not give much explanations, but illustrate with an example.
# +
@partial(jit, static_argnums=(0, 1, )) # <--- Here is where the magic happens!
# Remove this line and everything will run 1000 times slower:)
def unitary_update(loss_and_grad, opt, opt_state, angles):
"""Single update step."""
loss, grads = loss_and_grad(angles)
updates, opt_state = opt.update(grads, opt_state)
angles = optax.apply_updates(angles, updates)
return angles, opt_state, loss
def unitary_learn(U_func, U_target, n_angles,
init_angles=None, key=random.PRNGKey(0),
learning_rate=0.01, num_iterations=5000,
target_disc=1e-10):
"""Use Adam optimizer to minimize discrepancy between pamaterzied unitary and targe unitary.
Args:
U_func: function of angles returning univary matrix.
U_target: unitary matrix to approximate.
n_angles: total number of angles (parameters) in U_func.
init_angles: intial angles for gradient descent. If not provided chosen at random.
key: random seed to use for inizialization of initial angles.
learning_rate: learning rate in Adam optimizer.
num_iterations: maximum number of iterations.
target_disc: stop optimization if discrepancy drops below target_disc.
Returns: tuple (angles_history, loss_history) where
angles_history: list of angles (parameters) at each iteration step.
loss_history: values of loss_function at each iteration step.
"""
# If initial angles are not provided generate them at random.
if init_angles is None:
key = random.PRNGKey(0)
angles = random.uniform(key, shape=(n_angles,), minval=0, maxval=2*jnp.pi)
else:
angles = init_angles
# Loss function to minimize is dicrepancy defined above.
loss_func = lambda angles: disc(U_func(angles), U_target)
loss_and_grad = value_and_grad(loss_func)
# Optimizer is taken from the `optax` library and its use is self-explanotory.
opt = optax.adam(learning_rate)
opt_state = opt.init(angles)
# Optimization cycle
angles_history=[]
loss_history=[]
for _ in range(num_iterations):
angles, opt_state, loss = unitary_update(loss_and_grad, opt, opt_state, angles)
angles_history.append(angles)
loss_history.append(loss)
if loss < target_disc:
break
return angles_history, loss_history
# -
# OK, now a very simple example. Say we want to find a $ZXZ$ decomposition of $Y$-gate. Define:
def zxz_ansatz(angles):
return rz_mat(angles[0]) @ rx_mat(angles[1]) @ rz_mat(angles[2])
# Learning is now very simple: we give `unitary_learn` the ansatz unitary as function of angles, the target unitary and also explicitly the number of parameters to be trained:
angles_history, loss_history = unitary_learn(zxz_ansatz, y_mat, 3)
# We can visualize the learning progress as follows:
plt.plot(loss_history)
plt.yscale('log')
# The learned angles in $ZXZ$ decomposition are
angles_history[-1]
# It is not difficult to check directly that the result is equal to the $Y$ matrix up to a global phase with reasonable accuracy, indeed
jnp.around(1j*zxz_ansatz(angles_history[-1]), 3)
# + [markdown] tags=[]
# ## Quantum circuits with `numpy`
# -
# Now it's time to build full quantum circuits. We will think of a quantum circuit on $n$ qubits as a tensor with $2*n$ legs. First $n$ legs correspond to *output* and last to $n$ *input*. This is illustrated at the picture.
#
# <img src="myimages/mlexperiments/tensor.svg" alt="Drawing" style="width: 540px;"/>
#
# It is natural for input legs to be on the left because in matrix notation a unitary $U$ acts on a state $\psi$ by left multiplication $U\psi$. On the other hand note that quantum circuits are usually drawn left-to-right and to compare the two descriptions a left-right reflection must be made.
#
# Suppose now that given an $n-$qubit circuit $U$ we want to append an additional $m-$qubit gate $V$ at the end. Here is a concrete example (a picture is worth a thousand words!)
#
# <img src="myimages/mlexperiments/tensor_contraction.svg" alt="Drawing" style="width: 540px;"/>
# Several things to keep in mind:
#
# 1. To append gate $V$ at the end in quantum circuit notation, we need to draw it on the left here.
# 1. Tensor legs are joined by `numpy`'s `tensordot` operation. Which axes to contract is clear from the picture -- we need to join axes 2, 3 of $V$ to 1, 3 of $U$.
# 1. In the resulting tensor the output legs are not in the correct order. Instead of being numbered from top to bottom after `tensordot` first several axes are those of $V$ and the remaining are uncontracted output axes of $U$ (take a look at the leftmost column of numbers). This needs to be corrected by explicit transposition of output axes.
# 1. The final caveat is that if some of the legs connecting gate to the circuit are twisted the output legs needs to be transposed accordingly. Here is an example
#
# <img src="myimages/mlexperiments/tensor_contraction_twisted.svg" alt="Drawing" style="width: 640px;"/>
#
# Here is the code that implements this program.
# +
def gate_transposition(placement):
"""Determine transposition associated with initial placement of gate."""
position_index = [(placement[i], i) for i in range(len(placement))]
position_index.sort()
transposition = [i for _,i in position_index]
return transposition
def transposition(n_qubits, placement):
"""Return a transposition that relabels tensor axes correctly.
Example (from the figure above): n=6, placement=[1, 3] gives [2, 0, 3, 1, 4, 5].
Twiseted: n=6, placement=[3, 1] gives [2, 1, 3, 0, 4, 5]."""
gate_width = len(placement)
t = list(range(gate_width, n_qubits))
for position, insertion in zip(sorted(placement), gate_transposition(placement)):
t.insert(position, insertion)
return t
def apply_gate_to_tensor(gate, tensor, placement):
"""Append `gate` to `tensor` along legs specified by `placement`. Transpose the output axes properly."""
gate_width = int(len(gate.shape)/2)
tensor_width = int(len(tensor.shape)/2)
# contraction axes for `tensor` are input axes (=last half of all axes)
gate_contraction_axes = list(range(gate_width, 2*gate_width))
contraction = jnp.tensordot(gate, tensor, axes=[gate_contraction_axes, placement])
# input(=last half) indices are intact
t = transposition(tensor_width, placement) + list(range(tensor_width, 2*tensor_width))
return jnp.transpose(contraction, axes=t)
# -
# Now, using this tensor language we will construct unitary matrices corresponding to our ansatz circuits. To specify the ansatz we must supply the number of qubits in the circuit, type of entangling blocks to use and arrangement of these blocks.
#
# The simplest way to specify arrangement would be to just give a list like `[[0,1], [1, 3], [2, 1]]` etc of pairs of qubits to put entangling blocks on to. However for performance reasons I need to make it more complicated. To construct a matrix for our quantum circuit we basically need to loop over all entangling gates and append them one by one. When using `JAX` plain python loops are simply unrolled and then compiled. For large loops this leads to very large compilation times. If there is no structure in how we place our gates in the circuit this is probably the best one can do. However, we can be more efficient than that if there is a structure. Take a look at this picture
# +
#collapse
qc = QuantumCircuit(4)
i = 0
for _ in range(11):
qc.cx(i,i+1)
i = (i+1) % 3
if i % 3 == 0:
qc.barrier()
qc.draw()
# -
# Here $CNOT$s are just placeholders for any entangling block of our interest. There is a regular pattern. Most of the circuit consists of identical layers up to a couple of final gates. Construction and optimization of such circuits with `JAX` can be made way more efficient by using `lax.fori_loop` ([see here for docs](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.fori_loop.html)) or a similar construct. This allows to exploit the regularity and reduce the compilation time dramatically.
#
# The price to pay is a bit of a hassle in separating all gates into regular ones and the remainder. My core function `build_unitary` accepts the regular layers as an argument `layer_placements=[layer, number_of_repetitions]` and the remainder gates are described by `free_placements`. Also, we need some way to access all parameters (angles) in our circuit. I chose the simplest approach here, to supply angles as a 1d array, but internally they play a bit different roles so there is also a function `split_angles` to separate a 1d array of all angles into several logical blocks.
#
# OK, so here is the code. Examples are found in the end of this section.
# +
def split_angles(angles, num_qubits, layer_len, num_layers, free_placements_len):
"""Splits 1d array of all angles in a circuit into four groups.
Args:
angles: all angles in a circuit as 1d array.
num_qubits: number of qubits in a circuit.
layer_len: length (depth) of a single layer in a circuit.
num_layers: number of repeated layers.
free_placements_len: number of entanglig blocks not in layers.
Returns: a tuple (surface_angles, layers_angles, free_block_angles) where
surface_angles: angles in initial single-qubit blocks.
block_angles: angles of all entangling blocks.
layers_angles: angles for entangling blocks that are parts of complete layers.
free_block_angles: angles of remaining entangling blocks.
"""
surface_angles = angles[:3*num_qubits].reshape(num_qubits, 3)
block_angles = angles[3*num_qubits:].reshape(-1, 4)
layers_angles = block_angles[:layer_len*num_layers].reshape(num_layers, layer_len, 4)
free_block_angles = block_angles[layer_len*num_layers:]
return surface_angles, block_angles, layers_angles, free_block_angles
def build_unitary(num_qubits, block_type, angles, layer_placements=((), 0), free_placements=()):
"""
Builds `JAX`-compatible unitary matrix of a quantum circuit.
Arguments specify structure of the circuit and values of parameters.
Args:
num_qubits: number of qubits.
block_type: type of entangling block to use. Currently only 'cx' and 'cz' are supported.
angles: 1d array of all angle parameters in the circuit.
layer_placements: a tuple (single_layer, n) where `single_layer` specifies
positions of several entangling blocks and `n` how many time to repeat each layer.
free_placements: Positions of entangling blocks that do no belong to layers.
Returns:
A `jax.numpy` unitary matrix of the quantum circuit.
"""
layer, num_layers = layer_placements
layer_depth = len(layer)
num_blocks = len(layer)*num_layers+len(free_placements) # Count all entangling blocks.
# Divides 1d array of all angles into three logically distinct groups.
surface_angles, _, layers_angles, free_block_angles = split_angles(angles, num_qubits,
len(layer), num_layers, len(free_placements))
# Initizlizes identity matrix of the proper size.
u = jnp.identity(2**num_qubits).reshape([2]*num_qubits*2)
# Unitary matrix is built in three steps.
# First, 3 single-qubit gates are applied to each qubit.
# Second, all entangling blocks that are parts of layers are applied.
# Finally, remainder blocks that a not parts any layer are applied.
# Initial round of single-qubit gates
for i, a in enumerate(surface_angles):
gate = rz_mat(a[2]) @ rx_mat(a[1]) @ rz_mat(a[0])
u = apply_gate_to_tensor(gate, u, [i])
# Sequence of layers wrapped in `fori_loop`.
# Using `fori_loop` instead of plain `for` loop reduces the compilation time significantly.
# To use `fori_loop` it is convenient to define a separate function that applies a whole layer of gates.
def apply_layer(i, u, layer, layers_angles):
"""Apply several gates to a given quantum circuit.
Supplying the totality of `layers_angles` makes
the function compatible with `fori_loop`.
Args:
i: index of the layer.
u: matrix to apply gates to.
layer: positions of all gates to be applied.
layers_angles: angles of all layers.
"""
layer_angles = layers_angles[i]
for block_angles, position in zip(layer_angles, layer):
gate = block(block_type, block_angles).unitary().reshape(2,2,2,2)
u = apply_gate_to_tensor(gate, u, position)
return u
if num_layers>0:
u = lax.fori_loop(0, num_layers, lambda i, u: apply_layer(i, u, layer, layers_angles), u)
# Adds the remainding (free) entangling blocks.
for angles, position in zip(free_block_angles, free_placements):
gate = block(block_type, angles).unitary().reshape(2,2,2,2)
u = apply_gate_to_tensor(gate, u, position)
return u.reshape(2**num_qubits, 2**num_qubits)
# + [markdown] tags=[]
# ## Layers
# -
# Here are a couple of simple functions to help define gate arrangements. The basic layer is `sequ_layer` which consists of entangling gates applied to each possible pair of two qubit gates enumerated by pairs $(i,j)$ with $i<j$.
# +
def sequ_layer(num_qubits):
return [[i,j] for i in range(num_qubits) for j in range(i+1, num_qubits)]
def fill_layers(layer, depth):
num_complete_layers = depth // len(layer)
complete_layers = [layer, num_complete_layers]
incomplete_layer = layer[:depth % len(layer)]
return complete_layers, incomplete_layer
# -
# Function `fill_layers` allows to specify how much entangling gates we want in total and splits them into complete layers (to be used as `layer_placements`) and possible remainder gates (that become `free_placements`). For example, a `sequ_layer` on three qubits consists of three gates at positions
sequ_layer(3)
# If we want to have the sequ pattern and 10 entangling gates in total we can put three complete layers and a final single gate. `fill_layers` does just that
layer_placements, free_placements = fill_layers(sequ_layer(3), 10)
print(layer_placements)
print(free_placements)
# + [markdown] tags=[]
# ## Packing everything together: ansatz circuits
# -
# Now that we have defined our building blocks and convenience functions to assemble them it is time to pack everything together and reap the harvest.
#
# I will define `ansatz` class that assembles our building blocks according to a predefined pattern. It's `circuit` method gives a `qiskit` circuit which can be used for visualization and cross-checks. It's `unitary` attribute returns fully `jax`-compatible matrix representation of the same circuit. Finally, its `learn` method uses our optimization routine to approximate a target unitary. First the code, then an example.
class Ansatz():
"""Parametric quantum circuit.
Ansatz/parametric circuit is defined by tupes of entangling blocks and their arrangement.
Concrete values of parameters are not considered part of the ansatz. Class provides access
to both `qiskit` version of the circuit and `jax.numpy` unitary matrix.
Attributes:
num_qubits: number of qubits
block_type: type of entangling blocks
num_angles: total number of angles (parameters) in the circuit.
unitary: `jax.numpy` unitary matrix of the circuit as function of angles.
Methods:
circuit: `qiskit` version of the circuit.
learn: numerical approximation of the target unitary.
"""
def __init__(self, num_qubits, block_type, layer_placements=[[], 0], free_placements=[]):
self.num_qubits = num_qubits
self.block_type = block_type
self.layer, self.num_layers = layer_placements
self.free_placements = free_placements
self.all_placements = self.layer*self.num_layers+free_placements
self.num_angles = 3*num_qubits+4*len(self.all_placements)
self.unitary = lambda angles: build_unitary(self.num_qubits, self.block_type, angles,
layer_placements=[self.layer, self.num_layers],
free_placements=self.free_placements)
def circuit(self, angles=None):
"""qiskit version circuit. If angles not specified a parametric circuit is constructed."""
if angles is None:
angles = np.array([Parameter('a{}'.format(i)) for i in range(self.num_angles)])
surface_angles, block_angles, _, _ = split_angles(angles, self.num_qubits,
len(self.layer), self.num_layers,
len(self.free_placements))
qc = QuantumCircuit(self.num_qubits)
# Initial round of single-qubit gates.
for n, a in enumerate(surface_angles):
qc.rz(a[0], n)
qc.rx(a[1], n)
qc.rz(a[2], n)
# Entangling gates accoring to placements
for a, p in zip(block_angles, self.all_placements):
qc_block = block(self.block_type, a).circuit()
qc = qc.compose(qc_block, p)
return qc
def learn(self, u_target, **kwargs):
"""Use numerical optimization to approximate u_target."""
u_func = self.unitary
return unitary_learn(u_func, u_target, self.num_angles, **kwargs)
# Here is an example that should illustrate how all this can be used.
# + tags=[]
n_qubits = 3
block_type = 'cx'
# For technical reasons all entangling gates are divided into 'layers' and 'free' gates.
single_layer = [[0, 1], [2, 1]] # We make single layer consisting of 'cx' block on qubits [0,1]
# followed by reversed 'cx' block on qubits [1,2].
layers = [single_layer, 3] # The layer is repeated 3 times.
free_placements = [[1, 0], [0, 1], [1, 2], [2, 1]] # Apeend remaining `free placements` a.
anz = Ansatz(n_qubits, block_type, layer_placements=layers, free_placements=free_placements)
# -
# Here is what resulting circuit looks like.
anz.circuit().draw(output='mpl')
# Just to make sure let us check that the unitary matrix of this circuit extracted from qiskit agrees with our own implementation for a random set of angles.
# +
angles = random.uniform(random.PRNGKey(0), shape=(anz.num_angles,), minval=0,maxval=2*jnp.pi)
qs_u = Operator(anz.circuit(angles).reverse_bits()).data # qiskit matrix representation
our_u = anz.unitary(angles) # our matrix representation
print(jnp.allclose(qs_u, our_u, rtol=1e-6, atol=1e-7))
# + [markdown] tags=[] toc-hr-collapsed=true
# # Experiments
# -
# Now that the hard work is behind we can sit back and reap the benefits. I will go through a series of examples. Primary goal is to back up the claims from the introduction about reaching the theoretical lower bound, agile performance on restricted topology etc. But I will also try to make clear how my code can be used if you wish to do a little experimenting with it yourself.
# + [markdown] tags=[]
# ## Learning 2-qubit random unitary
# -
# Let's start by learning a random 2-qubits unitary. First, define one.
u_target = unitary_group.rvs(4, random_state=0)
# Here is the parametrized circuit we are going to use. `cz` means that the entangling gate is controlled-Z while `free_placements` are just positions where to put these entangling gates. There isn't much choice for 2 qubits as you could guess. I will explain why I call these `free_placements` a bit later.
anz = Ansatz(2, 'cz', free_placements=[[0,1], [0,1], [0, 1]])
anz.circuit().draw(output='mpl') # anz.circuit() is a fully-functional `qiskit` version of our ansatz.
# + active=""
# The learning process is easy as pie:
# +
# %%time
angles_history, loss_history = anz.learn(u_target)
plt.plot(loss_history)
plt.yscale('log')
# -
# The graph shows that we achieve great fidelity in under 500 iterations.
#
# Don't believe me? Is there a way to tell if this plot indeed reflects a successful compilation without looking under the hood? OK OK, since you're asking, I will double-check using pure `qiskit`:
angles = angles_history[-1] # Last(=best) angles in the optimization process.
qc = anz.circuit(angles) # genuine qiskit circuit.
u_qs = Operator(qc.reverse_bits()).data # qiskit API to extract the unitary matrix.
disc(u_qs, u_target) # OK, I guess here you have believe I've implemented the cost function properly.
# If you want to compare the matrices component-wise, fine with me.
# Similar checks can be done in more complicated scenarios below.
#
# You can move forward to other examples or try some experiments here. Some ideas:
# 1. Changing gate type from `cz` to `cx` (should not affect the result).
# 1. Decreasing the number of layers (fidelity won't be nearly as good).
# 1. Increasing the number of layers (same fidelity with less iterations).
# + [markdown] tags=[]
# ## Learning 3-qubit random unitary
# -
# I advertised in the introduction that with just 14 entangling gates any 3-qubit unitary can be nearly perfectly approximated. Let me back up this claim. Here is how we can construct the corresponding ansatz.
# +
num_qubits = 3
block_type = 'cz'
depth = 14
layer_placemets, free_placements = fill_layers(sequ_layer(num_qubits), depth)
anz = Ansatz(num_qubits, block_type, layer_placements=layer_placements, free_placements=free_placements)
anz.circuit().draw(output='mpl')
# -
# The way gate placements are passes to `Ansatz` here require a bit of unpacking. This is an implementation detail I didn't take
# enough care to hide. For technical reasons I explained in the numerical section optimization is much faster when gates are arranged in a regular pattern. The pattern we use here is called `sequ_layer` and for three qubits it is simply
sequ_layer(num_qubits)
# i.e. it just lists all possible pairs of three qubits. However, since 14 % 3 = 2 the two last gates do not fit into the regular pattern and require a bit of a special treatment. This is what the function `fill_layers` does for us. Indeed
layer_placements, free_placements = fill_layers(sequ_layer(num_qubits), depth)
print('basic layer is repeated four times:', layer_placements)
print('remaining blocks reside at positions:', free_placements)
# I hope that did explain the way that gate positions are passed to the `Ansatz`. Instead of `sequ_layer` you can pass any arrangment of gates to be periodically repeated. We will do just that when considering a restricted topology.
#
# Now let's run the optimization.
# +
# %%time
u_target = unitary_group.rvs(2**num_qubits, random_state=0)
angles_history, loss_history = anz.learn(u_target)
plt.plot(loss_history)
plt.yscale('log')
# -
# OK, I hope this does convince you that our ansatz was indeed good enough! Another interesting thing to do is to make a sweep to see how the fidelity increases (error drops) with the number of layers.
# +
# %%time
best_loss = [[], []]
for depth in range(15): # TLB(3)=14
layer_placemets, free_placements = fill_layers(sequ_layer(n_qubits), depth)
for i, block_type in enumerate(['cx', 'cz']):
anz = Ansatz(num_qubits, block_type, layer_placements=layer_placemets, free_placements=free_placements)
angles, loss_history = anz.learn(u_target, target_disc=10e-4)
best_loss[i].append(min(loss_history))
plt.plot(best_loss[0], label='cx loss')
plt.plot(best_loss[1], label='cz loss')
plt.ylabel('error')
plt.xlabel('number of entangling gates')
plt.legend()
# -
# One lesson here is that both types of two-qubits gate perform similarly well at all depths. This is not surprising for because `cx` and `cz` gates can be related by single-qubit Hadamard transformations. It would be interesting to see if other two-qubit gates perform differently.
#
# Another important observation is that the best fidelity is a monotonic function of the the amount of two-qubit gates. There is some work on variational algorithms testing various metrics that would adequately reflect expressivity of the ansatz. I think that plain number of $CNOT$ gates should in fact be a fantastic and simple metric for this.
# + [markdown] tags=[]
# ## Learning 6-qubit random unitary
# -
# I do know that 3 is followed by 4, but shall we perhaps get more ambitious? Let's try to compile a 6-qubit random unitary (you can try to go higher if your machine allows):
# +
# %%time
num_qubits = 6
depth = TLB(num_qubits) # 1020 for 6 qubits
layer_placements, free_placements = fill_layers(sequ_layer(num_qubits), depth)
u_target = unitary_group.rvs(2**num_qubits, random_state=0)
anz = Ansatz(num_qubits, 'cz', layer_placements=layer_placements, free_placements=free_placements)
angles_history, loss_history = anz.learn(u_target, num_iterations=5000)
plt.title('number of qubits: {}'.format(num_qubits))
plt.xlabel('number of iterations')
plt.ylabel('error')
plt.plot(loss_history)
plt.yscale('log')
# -
# Note that depth of the theoretical lower bound for 6 qubits is $TLB(6)=1020$ which implies that there are $\approx 4000$ parameters in our ansatz. On my modest laptop the training completes in about 10 minutes. Of course I would not claim this to be the cutting edge, but our `JAX` setup seems to be competitive at the scale (3-6 qubits) addressed in the literature so far.
# + [markdown] tags=[]
# ## Restricted topology
# -
# One of the most remarkable features of this approach is that topology restrictions do not seem to bring any overhead to compilation of random unitaries. To make the point and illustrate this claim I will consider the least connected topology I can think of, the chain topology. The corresponding layer consists of all pairs of adjacent qubits.
def chain_layer(num_qubits):
return [(i,i+1) for i in range(num_qubits-1)]
# Here is a 6-qubit illustration.
Ansatz(6, 'cx', layer_placements=[chain_layer(6), 1]).circuit().draw(output='mpl')
# Here I drew a single layer consisting of 5 blocks. To reach the theoretical lower bound requires to stack together 1020/5=204 layers. Let's do that and see how the learning goes.
# +
# %%time
num_qubits = 6
depth = TLB(num_qubits)
layer_placements, free_placements = fill_layers(chain_layer(num_qubits), depth)
u_target = unitary_group.rvs(2**num_qubits, random_state=0)
anz = Ansatz(num_qubits, 'cx', layer_placements=layer_placements, free_placements=free_placements)
angles_history_chain, loss_history_chain = anz.learn(u_target)
# -
# Let's compare the results with the previously considered fully connected topology.
plt.title('number of qubits: {}'.format(num_qubits))
plt.xlabel('number of iterations')
plt.ylabel('error')
plt.plot(loss_history, label='fully connected')
plt.plot(loss_history_chain, label='chain')
plt.legend()
plt.yscale('log')
# As you can see, the chain topology performs only slightly worse than the fully connected topology which seems truly remarkable.
# + [markdown] tags=[] toc-hr-collapsed=true
# # Final remarks
# The main goal was to illustrate that numerical compilation of small-scale random unitaries can be very efficient in terms of gate count, and seems to reach the theoretical lower bound in all cases considered, regardless of topological restrictions.
#
# It is interesting to note that a variety of optimization procedures are used in the literature. In [M&S](http://arxiv.org/abs/2106.05649) a simple version of the gradient descent is used, in [R&Z](http://arxiv.org/abs/2109.06770) an interesting procedure of one-qubit gate decoupling is used (I must admit I do not understand exactly what it does), and in [KTS preprint](http://arxiv.org/abs/2109.13223) a funny optimization one-angle-at a time is used (because as a function of each angle the circuit is a simple triginometric function, it is trivial to optimize one parameter at a time). Here we used a slightly more advanced version of the gradient descent, the Adam algorithm. All approaches seem to work well on random unitaries.
#
# My preliminary investigations show that for special gates things get much more complicated than for generic random unitaries. But this is where the most intersting stuff is found, e.g. compilation of multi-component Toffoli gates on restricted connectivity. I hope to address these cases in a future blog post!
| _notebooks/2021-12-13-Machine learning compilation of quantum circuits -- experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/luisgs7/Monitoria/blob/main/aula_01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="hBQlfKHkIhiC"
# # Monitoria 01 - VAMOS LÁ!!!!!!!
#
# 01 - Desenvolva uma calculadora que permita realizar a soma de dois numeros inteiros informados pelo usuário.
# + colab={"base_uri": "https://localhost:8080/"} id="t_WwOxem9nxI" outputId="be115667-e51b-4507-ff90-c043a704ce80"
# DECLARAÇÃO DE VARIÁVEIS EM PYTHON E ENTRADA DE DADOS
primeiro_valor = int(input("Digite o primeiro valor a ser somado: "))
segundo_valor = int(input("Digite o segundo valor a ser somado: "))
soma = 0
# SOMA DOS VALORES INFORMADOS
soma = primeiro_valor + segundo_valor
print("O resultado da soma eh: ", soma)
# + [markdown] id="D4RLy_sLEuWn"
# 02 - Desenvolva um algoritmo que com base na idade informada, apresente ao usuário se ele poderá ou não votar, de acordo com o seguintes critérios:
# - Menor de 16 anos, apresente a mensagem: "Não Vota".
# - A partir de 16 anos e menor que 18, apresente a mensagem: "Voto Opcional".
# - A partir de 18 anos e menor que 70, aprensente a mensagem: "Voto Obrigatório"
# - Maior de 70 anos, apresente a mensagem: "Voto Opcional"
# + colab={"base_uri": "https://localhost:8080/"} id="vb3SJZm5Asi4" outputId="fe083809-e1d8-4455-fb3e-840453de7b9b"
idade = int(input("Digite a sua idade: "))
if idade < 16:
print("Não Vota")
elif idade >= 16 and idade < 18:
print("Voto Opcional")
elif idade >= 18 and idade < 70:
print("Voto Obrigatório")
else:
print("Voto Opcional")
# + [markdown] id="q9tc9LsFJqj6"
# 03 - Imagine que de acordo com a nota que você tirar na prova da disciplina de Lógica de Programação e Introdução a Programação, você poderá realizar as seguintes atividades no Domingo:
#
# * Caso a sua nota seja menor ou igual a 8 ficará em casa
# * Se tirar até 9 na prova, poderá tomar apenas sorvete.
# * Se você tirar até 9.5 na prova, poderá tomar sorvete e ir ao cinema
# * Se tirar mais de 9.5 na prova da disciplina de Lógica de Programação e Introdução a Programação, você poderá tomar sorvete, ir ao cinema e almoçar no shopping.
#
#
#
# + id="yHx0p1L-LOpt" colab={"base_uri": "https://localhost:8080/"} outputId="6bdfd7a2-a22e-4c1e-b66c-a7b34cb7a393"
nota = float(input("Digite a sua nota da prova: "))
if nota <= 8:
print("Ficará em casa")
elif nota <= 9:
print("Poderá tomar sorvete")
elif nota <= 9.5:
print("Poderá tomar sorvete e ir ao cinema")
else:
print("Poderá tomar sorvete, ir ao cinema e almoçar no shopping")
# + [markdown] id="djjVTNLOLX8Z"
# 4 - Informe a nota das duas avaliações da disciplina de Lógica de Programação e Introdução a Programação, em seguida calcule a média e verifique as condições abaixo para informar o status do estudante:
#
# - Caso a média seja menor ou igual a 6: imprima "Reprovado"
# - Caso a média seja menor que 7: imprima "Recuperação"
# - Caso a média seja menor ou igual a 8: imprima "Aprovado"
# - Se a soma das notas for menor que 9: imprima "Parabéns"
# - Caso a média das notas for maior ou igual a 9: imprima "Excelente"
#
# + id="S1ZuUjnIM6u2" colab={"base_uri": "https://localhost:8080/"} outputId="7a9de74d-0d71-478b-e87b-366a28618290"
primeira_nota = float(input("Digite a nota da primeira prova: "))
segunda_nota = float(input("Digite a nota da segunda prova: "))
media = (primeira_nota + segunda_nota)/2
if media <= 6:
print("Reprovado")
elif media < 7:
print("Recuperação")
elif media <= 8:
print("Aprovado")
elif media < 9:
print("Parabéns")
else:
print("Excelente")
# + [markdown] id="MQK3xj1TNZie"
# # REVISEM A ATIVIDADE E OS SLIDES PESSOAL, TENDO DÚVIDAS POSTE NO GRUPO DA DISCIPLINA.
# # SUCESSO!!!!
| algoritmos/aula-01-25_05_2021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Natural Language Processing: Applications
# :label:`chap_nlp_app`
#
# We have seen how to represent tokens in text sequences and train their representations in :numref:`chap_nlp_pretrain`.
# Such pretrained text representations can be fed to various models for different downstream natural language processing tasks.
#
# In fact,
# earlier chapters have already discussed some natural language processing applications
# *without pretraining*,
# just for explaining deep learning architectures.
# For instance, in :numref:`chap_rnn`,
# we have relied on RNNs to design language models to generate novella-like text.
# In :numref:`chap_modern_rnn` and :numref:`chap_attention`,
# we have also designed models based on RNNs and attention mechanisms for machine translation.
#
# However, this book does not intend to cover all such applications in a comprehensive manner.
# Instead,
# our focus is on *how to apply (deep) representation learning of languages to addressing natural language processing problems*.
# Given pretrained text representations,
# this chapter will explore two
# popular and representative
# downstream natural language processing tasks:
# sentiment analysis and natural language inference,
# which analyze single text and relationships of text pairs, respectively.
#
# 
# :label:`fig_nlp-map-app`
#
# As depicted in :numref:`fig_nlp-map-app`,
# this chapter focuses on describing the basic ideas of designing natural language processing models using different types of deep learning architectures, such as MLPs, CNNs, RNNs, and attention.
# Though it is possible to combine any pretrained text representations with any architecture for either application in :numref:`fig_nlp-map-app`,
# we select a few representative combinations.
# Specifically, we will explore popular architectures based on RNNs and CNNs for sentiment analysis.
# For natural language inference, we choose attention and MLPs to demonstrate how to analyze text pairs.
# In the end, we introduce how to fine-tune a pretrained BERT model
# for a wide range of natural language processing applications,
# such as on a sequence level (single text classification and text pair classification)
# and a token level (text tagging and question answering).
# As a concrete empirical case,
# we will fine-tune BERT for natural language inference.
#
# As we have introduced in :numref:`sec_bert`,
# BERT requires minimal architecture changes
# for a wide range of natural language processing applications.
# However, this benefit comes at the cost of fine-tuning
# a huge number of BERT parameters for the downstream applications.
# When space or time is limited,
# those crafted models based on MLPs, CNNs, RNNs, and attention
# are more feasible.
# In the following, we start by the sentiment analysis application
# and illustrate the model design based on RNNs and CNNs, respectively.
#
# :begin_tab:toc
# - [sentiment-analysis-and-dataset](sentiment-analysis-and-dataset.ipynb)
# - [sentiment-analysis-rnn](sentiment-analysis-rnn.ipynb)
# - [sentiment-analysis-cnn](sentiment-analysis-cnn.ipynb)
# - [natural-language-inference-and-dataset](natural-language-inference-and-dataset.ipynb)
# - [natural-language-inference-attention](natural-language-inference-attention.ipynb)
# - [finetuning-bert](finetuning-bert.ipynb)
# - [natural-language-inference-bert](natural-language-inference-bert.ipynb)
# :end_tab:
#
| d2l/tensorflow/chapter_natural-language-processing-applications/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
#
# ## Risk Parity Portfolio Selection using MOSEK Optimizer API
#
#
# Portfolio selection (or portfolio management) is the art and science of making decisions about investment mix and policy, matching investments to objectives, asset allocation for individuals and institutions, and balancing risk against performance.
#
# This tutorial demonstrates the use of exponential cones in modeling logarithmic terms using Python Fusion. It is largerly based on <NAME>., <NAME>., <NAME>. (2013), Least-squares approach to risk parity in portfolio selection. We assume basic familiarity with the Markowitz risk-return model.
# # Risk Parity
#
# Consider the problem of investing in $n$ different assets. Let $x_i$ be the percentage of money invested in assets $i$, and $\mu_i$ its expected return. Let assume we know the covariance matrix $\Sigma$ that link the assets together. We can then define two global measures of the portfolio performance, expected return:
#
# $$
# \mu =\sum_{i=1}^n \mu_i x_i,
# $$
#
# and risk:
#
# $$
# \sigma^2 = \sum_{i=1}^n \sum_{j=1}^n \Sigma_{i,j}x_i x_j = x^T \Sigma x.
# $$
#
# For different choices of $x_i$ the investor will get different combinations of $\mu$ and $\sigma^2$. The set of all possible ($\sigma^2$, $\mu$) combinations is called the $\textit{attainable set}$. The theory assumes that investors prefer to minimize risk: given the choice of two portfolios with equal returns, investors will choose the one with the least risk.
#
# In a fully-invested portfolio, every asset $i$ has a contribution in terms of risk. Let $RC_i(x)$ be this contribution, we have that the total risk of the invested portfolio is:
#
# $$
# \mathcal{R}(x) = \sum_{i=1}^n RC_i(x)
# $$
#
# where
#
# $$
# RC_i(x) = x_i \frac{\partial \mathcal{R}(x)}{\partial x_i}
# $$
#
# Let $b=(b_1,..,b_n)$ be a vector of $\textbf{budgets}$ such as $b_i > 0$ and $\sum_{i=1}^n b_i = 1$. We define a **risk parity** (or risk budgeting) portfolio as the solution of the following set of equations:
#
# $$
# \begin{aligned}
# &\begin{cases}
# RC_1(x) = b_1 \mathcal{R}(x)\\
# ..\\
# RC_i(x) = b_i \mathcal{R}(x)\\
# ..\\
# RC_n(x) = b_n \mathcal{R}(x)
# \end{cases}
# \end{aligned}
# $$
#
# From the above equations, we can write the risk parity constraint as:
#
# $$
# \frac{x_i}{b_i} \frac{\partial \mathcal{R}(x)}{\partial x_i} = \frac{x_j}{b_j} \frac{\partial \mathcal{R}(x)}{\partial x_j} \hspace{1em} \forall i,j
# $$
#
# The Risk Parity portfolio requires that each asset has the same total contribution to risk, that is:
#
# $$
# \begin{aligned}
# &\frac{x_i}{b_i} \frac{\partial \mathcal{R}(x)}{\partial x_i} = \frac{x_j}{b_j} \frac{\partial \mathcal{R}(x)}{\partial x_j} && \forall i,j\\
# &x_i \geq 0, &&b_i > 0 \\
# &\sum_{i=1}^n x_i = 1, &&\sum_{i=1}^n b_i = 1
# \end{aligned}
# $$
#
# If we set
#
# $$
# \mathcal{R}(x) = \sqrt{x^T\Sigma x}
# $$
#
# solving the following problem that incorporates a logarithmic barrier in the objective function is equivalent to find a Risk Parity solution:
#
# $$
# \begin{equation}
# \label{eq:11}
# \begin{array}{l}
# \min_x \frac{1}{2} x^T \Sigma x - c \sum_{i=1}^{n} b_i ln(x_i)\\
# \text{s.t.}\\
# x > 0
# \end{array}
# \end{equation}
# $$
#
# where $b_i > 0$, $\sum_i b_i = 1$ and $c$ is a positive constant.
#
# ### Proof
#
# Since $\Sigma$ is positive semidefinite and the logarithm function is strictly concave, the objective function is $\textbf{strictly convex}$. From the first order condition, the unique solution is in corrispondence of the point where the gradient of the objective function is zero:
#
# $$
# \Sigma x - c b_i x^{-1} = 0
# $$
#
# Hence, at optimality we have
#
# $$
# (\Sigma x)_i = \frac{c b_i}{x_i} \Rightarrow \frac{x_i(\Sigma x)_i}{b_i} = \frac{x_j(\Sigma x)_j}{b_j}, \quad \forall i,j,
# $$
#
# since
#
# $$
# \frac{\partial \mathcal{R}(x)}{\partial x_i} = (\Sigma x)_i.
# $$
#
# # MOSEK Implementation
# We begin by translating the problem in conic form. Assume that we have a factorization $\Sigma=F^TF$, then $x^T\Sigma x=\|Fx\|_2^2$ and we can write an equivalent model as:
#
# \begin{equation}
# \begin{array}{ll}
# \min_x & r - c b^Tt \\
# \text{s.t.}& (1,r,Fx)\in \mathcal{Q}_r\\
# & t_i\leq \mathrm{ln} x_i,\ i=1,\ldots,n \\
# & x \geq 0
# \end{array}
# \end{equation}
#
# Indeed, the first constraint involving the rotated quadratic cone $\mathcal{Q}_r$ means
#
# $$ 2\cdot 1\cdot r\geq \|Fx\|^2 = x^T \Sigma x $$
#
# and moreover each inequality $t_i\leq \mathrm{ln} x_i$ can be expressed with an exponential cone as
#
# $$ (x_i, 1, t_i)\in K_{\mathrm{exp}}. $$
#
# We proceed with implementation of this model in Fusion:
# +
from mosek.fusion import *
def parityModel(n, F, c, b):
M = Model('parity')
# Define all variables appearing in the model
x = M.variable('x', n)
t = M.variable(n)
r = M.variable()
# Objective r-cb^Tt
M.objective(ObjectiveSense.Minimize, Expr.sub(r, Expr.mul(c, Expr.dot(b,t))))
# Bound on risk - construct the vector (1,r,Fx)
M.constraint(Expr.vstack(1, r, Expr.mul(F,x)), Domain.inRotatedQCone())
#Logarithmic bounds - all together in matrix form
M.constraint(Expr.hstack(x, Expr.constTerm(n,1.0), t), Domain.inPExpCone())
# That's all
return M
# -
# Now, we create some sample data.
# +
import numpy as np
n = 20
k = 8
F = np.random.sample([k,n])
Sigma = np.dot(F.transpose(), F) # Not used
b = np.random.sample(n)
b = b/np.sum(b)
c = 1.0
PM = parityModel(n, F, c, b)
import sys
PM.setLogHandler(sys.stdout)
PM.solve()
# -
# We can access, normalize and plot the optimal solution
# +
xx = PM.getVariable('x').level()
xx = xx/sum(xx)
# %matplotlib inline
import matplotlib.pyplot as plt
plt.bar(np.arange(n), xx)
plt.show()
# -
# We can also verify that the total contribution of each assed towards total risk is proportional to the budgeting vector $b$, as required:
RC = xx * (np.dot(Sigma,xx))
plt.bar(np.arange(n), RC/b)
plt.show()
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. The **MOSEK** logo and name are trademarks of <a href="http://mosek.com">Mosek ApS</a>. The code is provided as-is. Compatibility with future release of **MOSEK** or the `Fusion API` are not guaranteed. For more information contact our [support](mailto:<EMAIL>).
| portfolio-riskparity/riskparity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Illustrates linear complementarity problem
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demslv10.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# # !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
#
import matplotlib.pyplot as plt
plt.style.use('seaborn')
def basicsubplot(ax, title, yvals,solution):
ax.set(title=title,
xlabel='',
ylabel='',
xlim=[-0.05,1.05],
ylim=[-2,2],
xticks=[0,1],
xticklabels=['a','b'],
yticks=[-2,0,2],
yticklabels=['','0',''])
ax.plot([0,1],[0,0],'k-',linewidth=1.5)
ax.plot([0,0],[-2,2],'k:',linewidth=2.5)
ax.plot([1,1],[-2,2],'k:',linewidth=2.5)
ax.plot([0, 1],yvals)
ax.plot(solution[0], solution[1],'r.', ms=18)
# ## Possible Solutions to Complementarity Problem, $f$ Strictly Decreasing
fig1, axs = plt.subplots(1,3,figsize=[9,4])
basicsubplot(axs[0],'f(a) > f(b) > 0', [1.5, 0.5], [1.0,0.5])
basicsubplot(axs[1],'f(a) > 0 > f(b)', [0.5, -0.5], [0.5,0.0])
basicsubplot(axs[2],'0 > f(a) > f(b)', [-0.5, -1.5],[0.0,-0.5])
# ## Possible Solutions to Complementarity Problem, $f$ Strictly Increasing
fig2, axs = plt.subplots(1,3,figsize=[9,4])
basicsubplot(axs[0],'f(a) < f(b) < 0', [-1.5, -0.5], [0.0,-1.5])
basicsubplot(axs[1],'f(a) < 0 < f(b)', [-0.5, 0.5], [0.5,0.0])
basicsubplot(axs[2],'0 < f(a) < f(b)', [0.5, 1.5],[1.0,1.5])
axs[1].plot(0.0,-0.5,'r.',ms=18)
axs[1].plot(1.0,0.5,'r.',ms=18)
| _build/jupyter_execute/notebooks/slv/10 Illustrates linear complementarity problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Contributing to any of the **paso** Projects
# The public [project](https://github.com/bcottman/paso) is on GitHub .
#
# You can contribute to the **paso** Project in many ways. Below are listed some of the areas:
#
# - Fix a typo(s) in documentation.
# - Improve some of the documentation.
# - Fix typo or/and improve a docstring(s).
# - Report a documentation bug.
# - Improve existing test or/and code more tests.
# - Execute test suite for a distribution candidate and report any test failures.
# - Post a new issue.
# - Fix a bug in a issue.
# - Re-factor some code to add functionality or improve performance.
# - Suggest a new feature.
# - Implement a new feature.
# - Improve or/and add a lesson.
#
# Remember to post in issues the proposed change and if accepted it will be closed.
# See [issues](https://github.com/bcottman/paso/issues) or [projects](https://github.com/bcottman/paso/projects/1) for open issues for the **paso** project.
#
# You can find a issue , or you can post a new issue, to work on. Next, or your alternative first step, you need to set-up your local **paso** development environment. Code, Documentation and Test can have separate or shared environments. Each one is discussed in the following sections.
# ## Contributing to the **paso** Documentation
# One of the best ways to start CONTRIBUTE-ing to the **paso** project is by accomplishing a documentation task.
#
# The **paso** project uses ``Sphinx``. Sphinx, written by <NAME> and licensed under the BSD license, was originally created for the Python documentation. Unfortunately, you will be wresting with ``.rst`` unless you choose the much simpler markup of Jupyter notebooks or even the more powerful ``LaTex``.
#
# Once you finish your documentation task, please Submit a GitHub pull request, as show in
#
# Creating a push request
#
# in [Github pull request](https://github.com/bcottman/paso/tree/master/docs/nbdoc/Contributing.ipynb)
#
# Documentation sources can be found [Doc sources](https://github.com/bcottman/paso/docs/nbdoc/)
#
# **paso** Documentation can be found [Docs.](https://paso.readthedocs.io)
# ### Create a Documentation environment using conda
# Most of what you need in both ``Sphinx`` packages and environment creation are in the [Anaconda](https://www.anaconda.com) distribution.
#
# First you should create an environment for **paso** documenation. Detailed instuctions are found [here.](https://conda.io/docs/user-guide/tasks/manage-environments.html).
#
# Almost all of the packages you need for documentation are included in the Anaconda distribution. You only need add to your environment (for this and other examples we call our environment **paso**).
#
# >>> (paso) pip install paso
# >>> (paso) pip install sphinx
# >>> (paso) pip install sphinx-rtd-theme
# >>> (paso) pip install sphinx-apidoc
# ### Create your own docs
#
# The ``bcottman/paso project has created the ``docs``directory and the initial infrastructure. You will finish creating your local doc development environment.
#
# Clone the ``bcottman/paso to the local directory you have selected using ``git``,
#
# Go topaso project directory
#
# >>> (paso) cd ../paso/docs
#
# Create ``rst`` files from docstrings in ``py`` files.:
#
# >>> (paso) sphinx-apidoc -o generated/ -d 4 -fMa ../paso
#
# Generate documnentation:
#
# >>> (paso) clear;make clean;make html
#
# The HTML of the documentation can be found [here.](.../paso/docs/_build/html/index.html)
# ### How to use notebooks (``ipynb`` ) for documentation
#
# Python notebooks (``ipynb`` ) can be used to document instead of or with ``rst`` files. ``nbsphinx`` is a Sphinx extension that enables ``ipynb`` files.
#
# To Install ``nbsphinx``:
#
# >>> (**paso**) pip install nbsphinx --user
#
# in your **paso** doc environment.
#
# Edit your ``conf.py`` and change ``source_suffix``:
#
# source_suffix = ['.rst', '.ipynb']
#
# Edit your ``index.rst`` and add the names of your *.ipynb files to the toctree.
#
# More detailed information is found [here.](https://nbsphinx.readthedocs.io/en/0.2.8/rst.html)
# ### Sphinx and other Documentation Resources
# - [Overview](https://pythonhosted.org/an_example_pypi_project/sphinx.html)
# - [yao](https://codeandchaos.wordpress.com/2012/07/30/sphinx-autodoc-tutorial-for-dummies/)
# + [markdown] heading_collapsed=true
# ## Contributing to the **paso** Issues and Reviews
# + [markdown] hidden=true
#
# - Answering queries on the issue tracker.
# - Investigating, isolating or/and implementing code that demostrates a bug that other(s) have reported on [issues](https://github.com/bcottman/paso/issues).
# - Review other developers’ pull requests especially merges that could cause conflicts.
# - Report any issues that you may encounter.
# - Reference the project from your blog aor publications that you may write.
# - Link to it from your social media account or website
# - Use your imagination. Let us know as we will add to this list.
#
# -
# ## Contributing to the **paso** Tests
# The next suggested step (sic.) to **CONTRIBUTE** to the various **paso** projects is testing. Create a testing enhancement, then Submit a GitHub pull request.
#
# Test sources can be found [**paso** unit test suite](https://github.com/bcottman/**paso**/tests). Developing a [lesson](https://github.com/bcottman/paso/paso/lessons) serves also as integration test.
#
# Adding more tests for existing **paso** objects, and other supporting code is a great method to familiarize yourself and make your starting contributions to the **paso** project.
#
# Also,it will be not be possible for your contributed code to be merged into the master **paso** repo without accompanying docstring and unit tests that provide coverage for the critical parts of your contribution.
#
# You can expect your contribution to not past review unless tests are provided to cover edge cases and test for error conditions. Remember, you are asking people to use your contributed code.
# + [markdown] heading_collapsed=true
# ### **peso** Test Guidelines
# + [markdown] hidden=true
# Some of these guidelines have been adapted from [writing tests](https://docs.python-guide.org/writing/tests/) and
# [pandas testing](https://github.com/pandas-dev/pandas/wiki/Testing).
#
#
#
# - (RECCOMENED) Learn your tools and learn how to run a single test or a test case. Then, when developing a function inside a module, run this function’s tests frequently, ideally automatically when you save the code.
#
# - (REQUIRED) Each test unit must be fully independent. Each test must be able to run alone, and also within the test suite, regardless of the order that they are called. The implication of this rule is that each test must be loaded with a fresh dataset and may have to do some cleanup afterwards. This standard is that this is handled by setUp() and tearDown() methods. (if you use ``pytsest``it will take care of this you.)
#
# - (RECCOMENED) Run the full test suite before a coding session, and run it again after.
#
# - (RECCOMENED) The first step when you are debugging your code is to write a new test pinpointing the bug. While it is not always possible to do, those bug catching tests are among the most valuable pieces of code in your project.
#
# - (RECCOMENED) Use long and descriptive names for testing functions. These function names are displayed when a test fails, and should be as descriptive as possible.
#
# - (REQUIRED) When something goes wrong or has to be changed, and if your code has a good set of tests, you or other maintainers will rely largely on the testing suite to fix the problem or modify a given behavior. Therefore the testing code will be read as much as or even more than the running code.
#
# - (RECCOMENED) Testing code is as an introduction to any developers. When someone will have to work on the code base, running and reading the related testing code is often the best thing that they can do to start. They will or should discover the hot spots, where most difficulties arise, and the corner cases. If they have to add some functionality, the first step should be to add a test to ensure that the new functionality is not already a working path that has not been plugged into the interface.
# + [markdown] heading_collapsed=true
# ### Create a testing environment
# + [markdown] hidden=true
# We recommend you create a virtual environment for your testing. Use your favorite tool to create a virtual environment.
#
# Use or source activate (on mac or ubuntu) the virtual environment named paso:
#
# >>> source activate paso
#
# install the packages you will need to develop test for paso. The following are the standard packages we use:
#
# (paso)>>> pip install paso
# (paso)>>> pip install pytest
# (paso)>>> pip install pandas
# (paso)>>> pip install coverage
#
# You may already have pandas as part of your environment. What you will need to import into python is:
#
# import pytest
# # paso imports
# import joblib
# from paso.pasoBase import pasoFunctionBase, pasoModelBase,pasoError
# from paso.common.PipeLine import get_paso_log
# <any other needed paso files>
#
# + [markdown] heading_collapsed=true
# ### Recommended Testing Resources
# + [markdown] hidden=true
# - https://docs.python-guide.org/writing/tests/
# - https://semaphoreci.com/community/tutorials/testing-python-applications-with-pytest
# - http://pythontesting.net/framework/nose/nose-introduction/
# - https://ymichael.com/2014/12/17/python-testing-with-nose-for-beginners.html
# - https://github.com/pandas-dev/pandas/wiki/Testing
# + [markdown] heading_collapsed=true
# ### Coverage tool for **paso**
# + [markdown] hidden=true
# Coverage measurement is typically used to measure the effectiveness of tests. It can show which parts of your code are being exercised by tests, and which are not. You can use any coverage tool you wish. We recommend
#
# Coverage.py (see documentation for installation and usage)
#
# ,a tool for measuring code coverage of Python programs. It monitors your test suite, noting which parts of the code have been executed, then analyzes the source to identify code that could have been executed but was not.
#
# Also a good introduction to Coverage.py is:
#
# https://www.blog.pythonlibrary.org/2016/07/20/an-intro-to-coverage-py/
# + [markdown] hidden=true
# #### Branch Coverage
# + [markdown] hidden=true
#
# You can use another powerful feature of coverage.py: branch coverage. Testing every possible branch path through code, while a great goal to strive for, is a secondary goal to getting 100% line coverage for the entire **paso** package.
#
# If you decide you want to try to improve branch coverage, add the ``--branch`` flag to your coverage run:
#
# /python COVERAGEDIR run --pylib --branch <arguments>
#
# This will result in a report stating not only what lines were not covered, but also what branch paths were not executed.
# -
# ## Contributing to **paso** Distributions
# This contribution consists of running the test suite on configuration of the underlying environment of the distribution.
# ## Contributing to the **paso** Code
# 1. You want to propose a new Feature and implement it post about your intended feature (under issues or projects) and project management shall discuss the design and implementation. Once we agree that the plan looks good, go ahead and implement it.
#
# 1. You want to implement a feature refactor or bug-fix for an outstanding issue. Look at the outstanding [issues](https://github.com/bcottman/paso/issues). Pick an issue and comment on the task that you want to work on this feature. If you need more context on a particular issue, please ask and someone will provide a suggestion on how to proceed.
#
# Code sources can be found under the [source code](https://github.com/bcottman/paso/paso)/ directory.
#
# Once you finish your feature enhancement, please Submit a GitHub pull request.
# ## Contributing to the **paso** lessons
# **paso** enables both documentation and learning by **paso** **lessons**. Tasks for **lessons** include:
#
# 1. Add to/improve a lesson.
# 1. Implement new lesson.
#
#
# ## Creating a push request
# Navigate to the [paso repo](https://github.com/bcottman/paso).
#
# 1. Click the “Fork” button in the top-right part of the page, right under the main navigation. A copy of the **paso** repo in your Github account.
# 1. Clone your Github account copy of the of the **paso** repo on your local client machine where you will do your development enhancements for the **paso** project.
#
# cd <local client machine development directory>
# git clone https://github.com/<your github username>paso.git
#
# Locally, create a branch for your work
#
# git checkout -b <branch-name>
#
# Locally, accomplish your changes to N files named \<file-1\>...\<file-n\>
#
# git add <file-1>
# .
# .
# git add <file-n>
#
# commit locally N-files from staging area
#
# git commit -a -m '<message-documentation-change>'
#
# show associated remote repo on GitHub
#
# git remote -v
#
# push to remote GitHub, aliased as origin, from local branch \<branch-name\>
#
# git push origin <branch-name>
#
# 1. on your remote Github account repo, change to branch
#
# <branch-name>
#
# 1. Navigate to the base repo at [paso repo](https://github.com/bcottman/paso) issues and click the “New Pull Request” button
#
# what you are doing is: “I have some code in my fork of the ****paso**** project in ``<branch-name>`` that I want to merge into the ****paso**** base repo
#
| docs/nbdoc/Contributing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sos
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SoS
# language: sos
# name: sos
# ---
# # Quick Start Guide
# Before we start our tutorial, it is important to understand a SoS script can be executed from command line in batch mode, or interactively in Jupyter notebook. This tutorial is written in Jupyter but you can execute the scripts in batch mode if you save them to disk and execute them using command `sos run` or `sos-runner`.
#
# The following commands create some fake input files and fake commands in order to demonstrate the commands. Please refer to [Chapter Notebook Interface](../documentation/Notebook_Interface.html) of the SoS documentation if you would like to learn more about them.
# + kernel="SoS"
# %set -v2 -s default
# %preview --off
!touch mutated.fastq control.fastq ctrl.fastq case.fastq
!cp STAR Rscript ~/.sos/bin/
!sos remove --signature -v 0
# -
# ## Multi-Language Notebook
# Let us assume that you are a bioinformaticist needed to compare the expression levels between two samples. You can use a Jupyter notebook with SoS kernel to perform the analysis using different script, the trick here is to select the right kernel for each cell. For example, you can run the following cell in bash if you choose `bash` as the kernel.
# + kernel="Bash"
# using a fake STAR in ~/
export PATH=$HOME/.sos/bin:$PATH
# index reference genome
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
# align reads to the reference genome
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
# -
# The first command (supposedly) builds an index of the reference genome in preparation for the latter steps, the second command aligns reads from the first sample to the reference genome, and the third command aligns reads from the second sample to the reference genome. Do not panic if you do not know what these commands are doing, this is just an example.
#
# These commands generate, among other files, two files named ``aligned/control.out.tab`` and ``aligned/mutated.out.tab`` with expression counts of all genes. You then wrote a [R](https://www.r-project.org/) script to analyze the results, something like
# + kernel="R"
control.count <- read.table('aligned/control.out.tab')
mutated.count <- read.table('aligned/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# ## String interpolation
# When you work with multiple kernels in the same SoS notebook, the SoS kernel is called the **master kernel** and the rest are called **subkernels**. The SoS kernel is based on a Python syntax so you can define any variables and execute any Python statements such as
# + kernel="SoS"
INDEXDIR = 'STAR_index'
ALIGNDIR = 'aligned'
# -
# Then, a trick to "compose" scripts in subkernels is to use string interpolation to expand expressions inside `{ }` with their values. For example, with the above definitions, you can execute the commands as
# + kernel="Bash"
# %expand
# using a fake STAR in ~/
export PATH=$HOME/.sos/bin:$PATH
# index reference genome
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir {INDEXDIR}
# align reads to the reference genome
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix {ALIGNDIR}/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix {ALIGNDIR}/mutated
# + kernel="R"
# %expand
control.count <- read.table('{ALIGNDIR}/control.out.tab')
mutated.count <- read.table('{ALIGNDIR}/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# using magic `%expand`. Here we use the default method to expand expressions inside sigil `{ }`, but if you scripts already have braces, you can use magics such as `%expand ${ }` to use an alternative sigil.
# ## Data Exchange among subkernels
# String interpolation is useful for composing scripts to be executed by subkernels, but it would be awkward to pass a large amount of information in this way, and it disallows passing information among subkernels.
#
# SoS provides a few `magics` to facilitate data exchange among subkernels. For example, the `%get` magic retrieves information from the SoS kernel to a subkernel, so your shell script could be written as:
# + kernel="Bash"
# %get INDEXDIR ALIGNDIR
# index reference genome
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir $INDEXDIR
# align reads to the reference genome
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix $ALIGNDIR/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix $ALIGNDIR/mutated
# -
# after retriving `INDEXDIR` and `ALIGNDIR` from the SoS kernel and create two native shell variables `INDEXDIR` and `ALIGNDIR`. Similarly, your R script could be written as
# + kernel="R"
# %get ALIGNDIR
control.count <- read.table(paste0(ALIGNDIR, '/control.out.tab'))
mutated.count <- read.table(paste0(ALIGNDIR, '/mutated.out.tab'))
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# The subkernels are persistent so the passed variables can be used in later cells using the same kernel, for example, variable `ALIGNDIR` can be used in the Bash kernel in new commands as follows:
# + kernel="Bash"
echo Align directory is $ALIGNDIR
# -
# Note that SoS magics and multiple live subkernels are only supported by Jupyter notebook and cannot be used in SoS batch mode.
# ## Your first SoS script
# The project completed successfully and you needed to archive the scripts for later reference. The Jupyter notebook is a perfect format to record both the commands and the results. However, if you would like to only save the steps of your analysis, you can save the notebook in `.sos` format (`File`->`Download As`->`.sos`) or copy/paste the commands to a single SoS script named ``myanalysis.sos``, with content
# + kernel="SoS"
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
run:
# index reference genome
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
# align reads to the reference genome
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
R:
control.count <- read.table('aligned/control.out.tab')
mutated.count <- read.table('aligned/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# Here **`run`** and **`R`** are SoS actions that executes the following scripts in `bash` and `R` respectively. The scripts are included verbatim and end after reaching another SoS action or directive. For the sake of time, we created fake `STAR` and `R` commands that simply states the file generated.
#
# The script is executed in Jupyter when the notebook (this documentation) is executed. In command line, you would need to run the script using command
#
# ```
# % sos run myscript
# ```
#
# or
#
# ```
# % myscript.sos
# ```
#
# if you make `myscript.sos` executable (by running `chmod +x myscript.sos`.
#
# As you can see, this sos script simply executes the embedded shell and R script and generates the four output files.
# Although it is convenient to embed scripts verbatim, it is often clearer to indent the script and write your SoS script as:
# + kernel="SoS"
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
run:
# index reference genome
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq\
--genomeDir STAR_index
# align reads to the reference genome
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
# compare expression values
R:
control.count <- read.table('aligned/control.out.tab')
mutated.count <- read.table('aligned/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# The scripts in this case end with the end of indentation so that you can add some comments for the `R` script without being considered as part of the previous script. The script works identically to the one without indentation.
# ## Separate scripts into steps
# Because the scripts perform different tasks, it is logically clearer to separate them into different **steps**. Actually, because the first reference-generating command is not a data processing step, it makes sense to separate it into two scripts. Now we can insert step headers to the script as follows:
# + kernel="SoS"
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
[1]
# index reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[2]
# align reads to the reference genome
run:
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
[3]
# compare expression values
R:
control.count <- read.table('aligned/control.out.tab')
mutated.count <- read.table('aligned/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# Now, when you execute the script with command
#
# ```
# sos run myscript
# ```
#
# SoS will display `default_1`, `default_2` and `default_3` to report progress. The comments after section heads are considered step comments and will also be displayed during execution.
# It is worth noting that you can write SoS workflows in Jupyter notebook as long as you specify a header for each step. For example, you can create a Jupyter notebook with the following cells
# + kernel="SoS"
[1]
# index reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
# + kernel="SoS"
[2]
# align reads to the reference genome
run:
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn control.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn mutated.fastq --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
# + kernel="SoS"
[3]
# compare expression values
R:
control.count <- read.table('aligned/control.out.tab')
mutated.count <- read.table('aligned/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# and execute the workflow with command
#
# ```
# sos run myscript.ipynb
# ```
#
# The notebook can contain markdown cells and cells with other kernels, and sos cells without section header, and the `sos run` (and other) commands will ignore all such cells and only extract and execute workflows defined in the notebook.
# ## Make the script work for other input files
# After a while, before you almost forgot about this analysis, you needed to analyze another pair of samples. You could copy ``myanalysis.sos`` to ``myanalysis2.sos``, change filenames and run it, but an easier way is to change your SoS file to accommodate other input files. This can be done by defining a command line argument and passing files name to a **SoS variable**:
# + kernel="SoS"
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
# Two input files in .fastq formats. The first one for control sample
# and the second one for mutated sample.
parameter: fastq_files=['control.fastq', 'mutated.fastq']
[1]
# index reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[2]
# align reads to the reference genome
run: expand=True
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {fastq_files[0]} --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {fastq_files[1]} --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
[3]
# compare expression values
R: expand=True
control.count <- read.table('aligned/control.out.tab')
mutated.count <- read.table('aligned/mutated.out.tab')
# normalize, compare, output etc, ignored.
pdf('myfigure.pdf')
# plot results
dev.off()
# -
# A command line argument `fastq_files` is defined in with `parameter` keyword. With this definition, you can pass two filenames to variable `fastq_files` from command line
#
# ```bash
# sos run myanalysis --fastq_files ctrl.fastq case.fastq
# ```
#
# `{fastq_files[0]}` and `{fastq_files[1]}` in command `STAR --genomeDir ...` will be replaced with their values before the commands are executed. Here `fastq_files[0]` and `fastq_files[1]` are Python expressions that will be evaluated during execution.
#
# In Jupyter notebook, we can execute the previous cell with additional option using magic `%rerun`.
# + kernel="SoS"
# %rerun --fastq_files ctrl.fastq case.fastq
# -
# Note that the `parameter` statement is defined before any SOS step, actually in a special `[global]` section although the section is usually ignored in `.sos` format.
# ## Ignore steps that do not need to be rerun
# Although the SoS script now accepts command line arguments, it is still no more than a compilation of scripts and you immediately realized that it is a waste of time to execute the first command each time. To solve this problem, you can convert the SoS script to a real workflow by telling SoS the input and output of each step:
# + kernel="SoS"
# %run -v4
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
# Two input files in .fastq formats. The first one for control sample
# and the second one for mutated sample.
parameter: fastq_files=['control.fastq', 'mutated.fastq']
[1]
# create a index for reference genome
output: 'STAR_index/chrName.txt'
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[2]
# align the reads to the reference genome
input: fastq_files
depends: 'STAR_index/chrName.txt'
output: ['aligned/control.out.tab', 'aligned/mutated.out.tab']
run: expand=True
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input[0]} --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input[1]} --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
[3]
# compare expression values
print(input)
output: 'myfigure.pdf'
R: expand=True
control.count <- read.table('{input[0]}')
mutated.count <- read.table('{input[1]}')
# normalize, compare, output etc, ignored.
pdf('{_output}')
# plot results
dev.off()
# -
# Here we
#
# - Use **output directive** to specify the expected output of all steps.
# - Use **input directive** to specify the input of step 2. Step 1 by default has no input and input for step 3 by default is the output of step 2, its previous step.
# - Use **depends directive** to let step 2 depend on the output of step 1.
# - Use `{_input[0]}` and `{_input[1]}` in step 2 and 3 because these steps now have properly-defined `_input`. This variable is defined by step input as the input file of the step.
#
# With such information, when you run the same command with another set of input file
#
# ```bash
# sos run myanalysis --input ctrl.fastq case.fastq
# ```
#
# SoS will ignore step 1 if this step has been run with output `STAR_index/chrName.txt`. The same happens to step 2 and 3 so all steps will be ignored if you run the script repeatedly with the same input and processing scripts. SoS uses **runtime signature** for each step and will re-run the step if and only if the content or filename of input, output files or the processing scripts are changed.
# + kernel="SoS"
# %rerun --fastq-files ctrl.fastq case.fastq
# -
# An added benefit of specifying input and output of steps is it allows you to create an archive of your project with all input, output, and intermediate files. This can be achived using command
#
# ```bash
# % sos pack -o myanalysis.sar
# ```
#
# if only one workflow has been executed under the current directory. The output is a compressed archive with extension `.sar` and can be examined and unpacked using command `sos unpack`.
# ## Use make-rule to define resource-providing steps
# Instead of using runtime signature to avoid re-running the first step, we can also make step 1 an optional step that will be executed only necessary. That is to say, we can define this step as an `auxiliary step` that will only be called when the file it **provides** does not exist.
# + kernel="SoS"
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
# Two input files in .fastq formats. The first one for control sample
# and the second one for mutated sample.
parameter: fastq_files=['control.fastq', 'mutated.fastq']
[build_index: provides='STAR_index/chrName.txt']
# create a index for reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[1]
# align the reads to the reference genome
input: fastq_files
depends: 'STAR_index/chrName.txt'
output: ['aligned/control.out.tab', 'aligned/mutated.out.tab']
run: expand=True
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input[0]} --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input[1]} --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
[2]
# compare expression values
output: 'myfigure.pdf'
R: expand=True
control.count <- read.table('{input[0]}')
mutated.count <- read.table('{input[1]}')
# normalize, compare, output etc, ignored.
pdf('{_output}')
# plot results
dev.off()
# -
# The new script consists of two steps, and an auxiliary step `build_index` that will only be executed when `STAR_index/chrName.txt` is not available. A slight difference between this version and the previous one is that while the previous version will always execute the first step, this version will not execute it if `STAR_index/chrName.txt` has been generated before in any way, perhaps not by SoS.
# + kernel="SoS"
# %rerun
# -
# ## Execute long-running jobs externally
# The first step will takes a long time to execute. Instead of executing them by SoS, you might want to submit the commands to a job-queue and be executed and monitored externally. This is especially useful when you need to execute multiple SoS workflows on a cluster-based system. Without going through the details on how to set up your job-queue (see another tutorial for details), it is almost trivial to modify your script to define part of a **step process** to a **task**:
# + kernel="SoS"
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
# Two input files in .fastq formats. The first one for control sample
# and the second one for mutated sample.
parameter: fastq_files=['control.fastq', 'mutated.fastq']
[build_index: provides='STAR_index/chrName.txt']
# create a index for reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[1]
# align the reads to the reference genome
input: fastq_files
depends: 'STAR_index/chrName.txt'
output: ['aligned/control.out.tab', 'aligned/mutated.out.tab']
task:
run: expand=True
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input[0]} --quantMode GeneCounts \
--outFileNamePrefix aligned/control
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input[1]} --quantMode GeneCounts \
--outFileNamePrefix aligned/mutated
[2]
# compare expression values
output: 'myfigure.pdf'
R: expand=True
control.count <- read.table('{input[0]}')
mutated.count <- read.table('{input[1]}')
# normalize, compare, output etc, ignored.
pdf('{_output}')
# plot results
dev.off()
# -
# As you can see, the only difference is the insertion of `task:` directive before `run`. Now, when you execute the command, the `STAR` commands will be executed externally and you can monitor the status of the jobs using your web browser. If no job-queue is set up, the command will be executed in a separate process.
# ## Execute steps in parallel
# Although the two steps of this example have to be executed sequentially, the first step runs the `STAR` command twice on two input files, and can be executed in parallel. You can tell this to SoS by modifying the script as follows
# + kernel="SoS"
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
# Two input files in .fastq formats. The first one for control sample
# and the second one for mutated sample.
parameter: fastq_files=['control.fastq', 'mutated.fastq']
[build_index: provides='STAR_index/chrName.txt']
# create a index for reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[1]
# align the reads to the reference genome
sample_type = ['control', 'mutated']
input: fastq_files, group_by='single', paired_with='sample_type'
depends: 'STAR_index/chrName.txt'
output: f"aligned/{_sample_type}.out.tab"
task: concurrent=True
run: expand=True
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input:q} --quantMode GeneCounts \
--outFileNamePrefix aligned/{_sample_type}
[2]
# compare expression values
output: 'myfigure.pdf'
R: expand=True
control.count <- read.table('{input[0]}')
mutated.count <- read.table('{input[1]}')
# normalize, compare, output etc, ignored.
pdf('{_output}')
# plot results
dev.off()
# -
# Here we
#
# 1. Use option `group_by='single'` to pass input one by one to action. The action will be executed twice with `_input` set to the first and second input file respectively.
# 2. Define a variable `sample_type` and pair it with input files (option `paired_with`). This will generate a variable `_sample_type` for each input file so `_sample_type` will be `control` for the first input file, and `mutated` for the second.
# 3. Use `{_input}` and `{_sample_type}` to define partial `output`.
# 4. Use `{_input:q}` instead of `{_input}` in the script. This is a small trick to shell-quote filenames so that filenames with spaces and other special characters can be properly quoted in shell commands.
#
# Now, if you execute the script with option `-j 2` (2 concurrent processes),
#
# ```bash
# sos run myanalysis.sos --input control1.fastq control2.fastq -j 2
# ```
#
# the second step submit two jobs to job-queue, or execute them in two separate processes if a job-queue is not setup.
# ## Generating a report
# In addition to functions such as `run` and `R` that execute scripts in different languages, SoS provides a number of other functions (called actions) that can be used to, for example, generate reports using command `pandoc` or `Rmarkdown`. For example, by adding a step with a `pandoc` action, you can compose a script in Markdown format and generate a nice-looking report in HTML format.
# + kernel="SoS"
# %preview report.html
# %run
#!/usr/bin/env sos-runner
#fileformat=SOS1.0
# This script aligns raw reads of a control and a mutated sample
# to the reference genome and compare the expression values
# of the samples at genes A, B and C.
# Two input files in .fastq formats. The first one for control sample
# and the second one for mutated sample.
parameter: fastq_files=['control.fastq', 'mutated.fastq']
[build_index: provides='STAR_index/chrName.txt']
# create a index for reference genome
run:
STAR --runMode genomeGenerate --genomeFastaFile human38.fastq \
--genomeDir STAR_index
[1]
# align the reads to the reference genome
sample_type = ['control', 'mutated']
input: fastq_files, group_by='single', paired_with='sample_type'
depends: 'STAR_index/chrName.txt'
output: f"aligned/{_sample_type[0]}.out.tab"
run: expand=True
STAR --genomeDir STAR_index --outSAMtype BAM SortedByCoordinate \
--readFilesIn {_input:q} --quantMode GeneCounts \
--outFileNamePrefix aligned/{_sample_type[0]}
report: output='align.md', active=-1, expand=True
## Alignment of reads
* input file: {_input}
* output file: {_output}
[2]
# compare expression values
output: 'myfigure.pdf'
R: expand=True
control.count <- read.table('{_input[0]}')
mutated.count <- read.table('{_input[1]}')
# normalize, compare, output etc, ignored.
pdf('{_output}')
# plot results
dev.off()
report: output='analysis.md', active=-1, expand=True
## Statistical analysis
* input file: {_input}
* Figure: {_output}
[3]
pandoc: input=['align.md', 'analysis.md'], output='report.html'
# An analysis of two RNA Seq samples
# -
# In this example, partial reports are prepared whenever a step completes, and a final step is used combine all partial reports (with a header) and process it with pandoc.
#
# Please refer to [the report generation tutorial](Generating_Reports.html) for details.
# We have showed you multiple versions of the same SoS script, each using more features of SoS. This actually demonstrates one of the advantages of the SoS system, namely you can start using SoS in minutes without knowing any of its advanced features, and gradually improve your script if needed.
| src/tutorials/Quick_Start_Guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import gym
import random
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from collections import deque
print("Gym:", gym.__version__)
print("Tensorflow:", tf.__version__)
class QNetwork():
def __init__(self, state_dim, action_size, tau=0.01):
tf.reset_default_graph()
self.state_in = tf.placeholder(tf.float32, shape=[None, *state_dim])
self.action_in = tf.placeholder(tf.int32, shape=[None])
self.q_target_in = tf.placeholder(tf.float32, shape=[None])
self.importance_in = tf.placeholder(tf.float32, shape=[None])
action_one_hot = tf.one_hot(self.action_in, depth=action_size)
self.q_state_local = self.build_model(action_size, "local")
self.q_state_target = self.build_model(action_size, "target")
self.q_state_action = tf.reduce_sum(tf.multiply(self.q_state_local, action_one_hot), axis=1)
self.error = self.q_state_action - self.q_target_in
self.loss = tf.reduce_mean(tf.multiply(tf.square(self.error), self.importance_in))
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)
self.local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="local")
self.target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target")
self.updater = tf.group([tf.assign(t, t + tau*(l-t)) for t,l in zip(self.target_vars, self.local_vars)])
def build_model(self, action_size, scope):
with tf.variable_scope(scope):
hidden1 = tf.layers.dense(self.state_in, 100, activation=tf.nn.relu)
q_state = tf.layers.dense(hidden1, action_size, activation=None)
return q_state
def update_model(self, session, state, action, q_target, importance):
feed = {self.state_in: state, self.action_in: action, self.q_target_in: q_target, self.importance_in: importance}
error, _, _ = session.run([self.error, self.optimizer, self.updater], feed_dict=feed)
return error
def get_q_state(self, session, state, use_target=False):
q_state_op = self.q_state_target if use_target else self.q_state_local
q_state = session.run(q_state_op, feed_dict={self.state_in: state})
return q_state
class PrioritizedReplayBuffer():
def __init__(self, maxlen):
self.buffer = deque(maxlen=maxlen)
self.priorities = deque(maxlen=maxlen)
def add(self, experience):
self.buffer.append(experience)
self.priorities.append(max(self.priorities, default=1))
def get_probabilities(self, priority_scale):
scaled_priorities = np.array(self.priorities) ** priority_scale
sample_probabilities = scaled_priorities / sum(scaled_priorities)
return sample_probabilities
def get_importance(self, probabilities):
importance = 1/len(self.buffer) * 1/probabilities
importance_normalized = importance / max(importance)
return importance_normalized
def sample(self, batch_size, priority_scale=1.0):
sample_size = min(len(self.buffer), batch_size)
sample_probs = self.get_probabilities(priority_scale)
sample_indices = random.choices(range(len(self.buffer)), k=sample_size, weights=sample_probs)
samples = np.array(self.buffer)[sample_indices]
importance = self.get_importance(sample_probs[sample_indices])
return map(list, zip(*samples)), importance, sample_indices
def set_priorities(self, indices, errors, offset=0.1):
for i,e in zip(indices, errors):
self.priorities[i] = abs(e) + offset
class DDQNAgent():
def __init__(self, env):
self.state_dim = env.observation_space.shape
self.action_size = env.action_space.n
self.q_network = QNetwork(self.state_dim, self.action_size)
self.replay_buffer = PrioritizedReplayBuffer(maxlen=100000)
self.gamma = 0.97
self.eps = 1.0
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def get_action(self, state):
q_state = self.q_network.get_q_state(self.sess, [state])
action_greedy = np.argmax(q_state)
action_random = np.random.randint(self.action_size)
action = action_random if random.random() < self.eps else action_greedy
return action
def get_env_action(self, action):
return action
def train(self, state, action, next_state, reward, done, use_DDQN=True, a=0.0):
self.replay_buffer.add((state, action, next_state, reward, done))
(states, actions, next_states, rewards, dones), importance, indices = self.replay_buffer.sample(50, priority_scale=a)
next_actions = np.argmax(self.q_network.get_q_state(self.sess, next_states, use_target=False), axis=1)
q_next_states = self.q_network.get_q_state(self.sess, next_states, use_target=use_DDQN)
q_next_states[dones] = np.zeros([self.action_size])
q_next_states_next_actions = q_next_states[np.arange(next_actions.shape[0]), next_actions]
q_targets = rewards + self.gamma * q_next_states_next_actions
errors = self.q_network.update_model(self.sess, states, actions, q_targets, importance**(1-self.eps))
self.replay_buffer.set_priorities(indices, errors)
if done: self.eps = max(0.1, 0.98*self.eps)
def __del__(self):
self.sess.close()
class DiscretizedDQNAgent(DDQNAgent):
def __init__(self, env, n_actions=10):
self.is_discrete = type(env.action_space) == gym.spaces.discrete.Discrete
if not self.is_discrete:
env.action_space.n = n_actions
self.actions = np.linspace(env.action_space.low, env.action_space.high, n_actions)
super().__init__(env)
def get_env_action(self, action):
if not self.is_discrete:
action = [self.actions[action, 0]]
return action
env_names = ['CartPole-v0',
'MountainCar-v0',
'MountainCarContinuous-v0',
'Pendulum-v0',
'Acrobot-v1']
env = gym.make(env_names[4])
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
# +
num_runs = 1
run_rewards = []
for n in range(num_runs):
print("Run {}".format(n))
ep_rewards = []
agent = None
agent = DiscretizedDQNAgent(env)
num_episodes = 100
for ep in range(num_episodes):
state = env.reset()
total_reward = 0
done = False
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(agent.get_env_action(action))
agent.train(state, action, next_state, reward, done, a=0.7)
env.render()
total_reward += reward
state = next_state
ep_rewards.append(total_reward)
print("Episode: {}, total_reward: {:.2f}".format(ep, total_reward))
run_rewards.append(ep_rewards)
env.close()
# +
import matplotlib.pyplot as plt
# %matplotlib inline
for n, ep_rewards in enumerate(run_rewards):
x = range(len(ep_rewards))
cumsum = np.cumsum(ep_rewards)
avgs = [cumsum[ep]/(ep+1) if ep<100 else (cumsum[ep]-cumsum[ep-100])/100 for ep in x]
plt.plot(x, avgs, color=col, label=n)
plt.title("Prioritized Replay performance")
plt.xlabel("Episode")
plt.ylabel("Last 100 episode average rewards")
plt.legend()
# -
| agents/test/Discretized_Prioritized_Exp_Replay_hillClimb_DDQN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Flagon Python3
# language: python
# name: flagon-conda-env
# ---
# Cross Time Sampling
# ===
#
# Sampling from April 2014 (start of the training data period) to December 2019.
#
# A sample is generated for each month. (n=50000)
#
# Each sample is saved in a mock json file to be used for `revscoring extract`.
#
# Then, `revscoring extract` and `revscoring dump` are used to generate TSV feature files for each of the samples.
#
# Note that each section of this notebook is designed to be run independently, with no dependencies from other sections other than this intro section.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import os
from tqdm import tqdm
import bz2
import gzip
import json
import re
import hashlib
from datetime import datetime
import nltk
import scipy.stats
import para
from itertools import groupby
from collections import Counter
# git_root_dir = !git rev-parse --show-toplevel
git_root_dir = git_root_dir[0]
git_root_dir
raw_data_dir = "/export/scratch2/wiki_data"
derived_data_dir = os.path.join(git_root_dir, "data", "derived")
raw_data_dir, derived_data_dir
working_dir = os.path.join(derived_data_dir, 'stub-history-all-revisions')
os.makedirs(working_dir, exist_ok=True)
working_dir
start_date = datetime.fromisoformat('2014-01-01')
start_timestamp = int(start_date.timestamp())
end_date = datetime.fromisoformat('2020-01-01')
end_timestamp = int(end_date.timestamp())
start_timestamp, end_timestamp
# ### Load in all revisions
#
# This is a 29GB text file!
#
# We load it in, before sorting and filtering it and saving it back to a more efficient format.
start = datetime.now()
rev_ids_filepath = os.path.join(working_dir, 'rev_ids.csv')
df = pd.read_csv(rev_ids_filepath,
header=None,
names=['page_id', 'rev_id', 'rev_timestamp', 'user_text', 'user_id', 'is_revert_target', 'is_reverted', 'is_reverting'],
usecols=['rev_id', 'rev_timestamp']
)
print(f"{datetime.now() - start}")
len(df)
start = datetime.now()
df = df.sort_values(by='rev_timestamp')
print(f"{datetime.now() - start}")
total_revisions = len(df)
df = df[(df.rev_timestamp >= start_timestamp)&(df.rev_timestamp <= end_timestamp)]
revisions_in_sample_period = len(df)
print(f"{revisions_in_sample_period} / {total_revisions} ({revisions_in_sample_period / total_revisions * 100:.2f}%) revisions in the sample period.")
# save the dataframe as a pickle file
start = datetime.now()
revisions_filepath = os.path.join(working_dir, 'rev_ids_2014to2019.pkl')
df.to_pickle(revisions_filepath)
print(f"{datetime.now() - start}")
print("Finished.")
# save the dataframe as a csv file
start = datetime.now()
revisions_filepath = os.path.join(working_dir, 'rev_ids_2014to2019_sorted.csv')
df.to_csv(revisions_filepath, index=False)
print(f"{datetime.now() - start}")
print("Finished.")
# ### Process the data to generate the samples
# read the dataframe with the revision ids and timestamps
start = datetime.now()
revisions_filepath = os.path.join(working_dir, 'rev_ids_2014to2019.pkl')
df = pd.read_pickle(revisions_filepath)
print(f"{datetime.now() - start}")
len(df)
df.head()
# TODO if this process will be repeated, it would greatly benefit from parallelization
start = datetime.now()
df['month_id'] = df.rev_timestamp.map(lambda ts: int(datetime.utcfromtimestamp(ts).strftime('%Y%m')))
print(f"{datetime.now() - start}")
month_ids_in_sample = set(df.month_id)
len(month_ids_in_sample)
# the first month in the sample (Jan 2014) has approximately 3 million revisions
len(df[df.month_id == 201401])
month_sample_n = 100000
for month_id, group in tqdm(df.groupby(by='month_id'), total=len(month_ids_in_sample)):
month_id_str = str(month_id)
year = month_id_str[:4]
month = month_id_str[4:]
if len(group) < month_sample_n:
print(f"Not sampling for month {month}/{year}, which only has {len(group)} revisions.")
continue
month_sample = group.sample(n=month_sample_n)
sample_rev_ids_filepath = os.path.join(working_dir, f"month_sample/rev_ids/rev_ids_month_sample_{year}_{month}.txt")
with open(sample_rev_ids_filepath, 'w') as outfile:
for rev_id in month_sample.rev_id:
outfile.write(str(rev_id) + "\n")
xtick_labels = []
month_revisions_list = []
for month_id, group in tqdm(df.groupby(by='month_id'), total=len(month_ids_in_sample)):
month_id_str = str(month_id)
year = month_id_str[:4]
month = month_id_str[4:]
xtick_labels.append(f"{month}/{year[2:]}")
month_revisions_list.append(len(group))
fig, ax = plt.subplots(1, 1, figsize=(10,5))
plt.plot(range(len(month_revisions_list)-1), month_revisions_list[:-1])
plt.xticks(range(0, len(month_revisions_list)-1, 6), xtick_labels[:-1][::6])
plt.title("Enwiki revision counts by month")
plt.xlabel("Month")
plt.ylabel("Total revisions")
plt.show()
# ### Generate mock JSON
#
# Generate mock JSON for use with `revscoring` and generate the appropriate revscoring bash script.
#
# +
# now generate mock JSON for month samples
rev_ids_dir = os.path.join(working_dir, 'month_sample/rev_ids')
mock_json_dir = os.path.join(working_dir, 'month_sample/mock_json')
revscoring_scripts_dir = os.path.join(working_dir, 'month_sample/revscoring_scripts')
revscoring_cache_dir = os.path.join(working_dir, 'month_sample/revscoring_cache')
revscoring_features_dir = os.path.join(working_dir, 'month_sample/revscoring_features')
mock_template = '{"rev_id": %d, "auto_labeled": false, "damaging": false, "goodfaith": true, "autolabel": {}}'
script_template = """#!/bin/bash
# Auto-generated script to use revscoring to extract features for a set of revisions
if [ -f {revscoring_features_filepath} ]; then
echo "Skipping '$(basename {mock_json_filepath})' with existing features file."
else
cat {mock_json_filepath} | revscoring extract editquality.feature_lists.enwiki.damaging editquality.feature_lists.enwiki.goodfaith --host https://en.wikipedia.org --extractors 32 --verbose > {revscoring_cache_filepath} \\
|| (echo "Failed to build feature caches" && exit 1)
revscoring dump_cache --input {revscoring_cache_filepath} --output {revscoring_features_filepath} editquality.feature_lists.enwiki.damaging damaging \\
|| (echo "Failed to dump cache" && exit 1)
echo "Successfully extracted '$(basename {mock_json_filepath})'."
fi
"""
revscoring_script_filepaths = []
for filename in tqdm(os.listdir(rev_ids_dir), total=72):
if not filename.endswith(".txt"):
continue
rev_ids_filepath = os.path.join(rev_ids_dir, filename)
rev_ids_list = []
with open(rev_ids_filepath) as infile:
for line in infile:
if line.strip() != "":
rev_ids_list.append(int(line.strip()))
fname_base = os.path.splitext(filename)[0]
mock_json_filepath = os.path.join(mock_json_dir, fname_base + ".mock.json")
revscoring_script_filepath = os.path.join(revscoring_scripts_dir, fname_base + '_revscoring.sh')
revscoring_cache_filepath = os.path.join(revscoring_cache_dir, fname_base + ".mock.w_cache.json")
revscoring_features_filepath = os.path.join(revscoring_features_dir, fname_base + ".mock.damaging.tsv")
with open(mock_json_filepath, 'w') as outfile:
for rev_id in rev_ids_list:
line = mock_template % rev_id
outfile.write(line + "\n")
script_text = script_template.format(
mock_json_filepath=mock_json_filepath,
revscoring_cache_filepath=revscoring_cache_filepath,
revscoring_features_filepath=revscoring_features_filepath
)
with open(revscoring_script_filepath, 'w') as outfile:
outfile.write(script_text)
revscoring_script_filepaths.append(revscoring_script_filepath)
# +
revscoring_all_script_base = """#!/bin/bash
# Auto-generated script that calls the sub-scripts to extract features for all month samples
cd /export/scratch2/levon003/repos/revscoring && conda activate wiki-revscoring
"""
revscoring_all_script = revscoring_all_script_base
for revscoring_script_filepath in revscoring_script_filepaths:
revscoring_all_script += revscoring_script_filepath + " && \\ \n"
revscoring_all_script += 'echo "Success." && exit 0\n'
revscoring_all_script += 'echo "Failure." && exit 1\n'
revscoring_all_script_filepath = os.path.join(working_dir, "month_sample/revscoring_extract_all.sh")
with open(revscoring_all_script_filepath, 'w') as outfile:
outfile.write(revscoring_all_script)
print("Finished.")
# -
# Then, the permissions on all of these scripts should be manually set.
#
# i.e. `chmod +x *.sh`
| notebook/CrossTimeSampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/chrismarkella/Kaggle-access-from-Google-Colab/blob/master/Nominal_one_column.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="A6gWYK0vgOFA" colab_type="code" colab={}
import numpy as np
import pandas as pd
# + id="kngt_WI0gTKe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="575dfc33-ea44-49aa-9aab-0a59f25d1a0f"
nominal_df = pd.DataFrame(
data={
'Color': [
'Red',
'Yellow',
'Blue',
'Green',
'Yellow',
]
},
index= list('ABCDE')
)
df = nominal_df.copy()
df
# + id="BstRClfVglSb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="f5ea2219-56b7-4104-82f7-6a7f269927ea"
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
df['Color encoded'] = label_encoder.fit_transform(df['Color'])
df
# + id="7-yGbL-KgzL0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="a830958d-15e6-48ac-e9c7-4f4aa4d46f2e"
df = nominal_df.copy()
df
# + id="YPruC0MvhXg7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="6c4a16aa-ed05-4beb-e068-36fb81b5cc10"
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(sparse=False)
one_hot_encoded = one_hot_encoder.fit_transform(df)
one_hot_encoded
# + id="Q-q0Mj2the4S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="89287cd6-5c42-46ad-d1ec-30e3f1a4449d"
one_hot_df = pd.DataFrame(one_hot_encoded,
columns=[_ for _ in one_hot_encoder.categories_[0]])
one_hot_df.index = df.index
one_hot_df
# + id="PUIDwPWwiMHJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="949e6245-c5db-4cfe-83f1-6defde0943a0"
pd.concat([df, one_hot_df], axis='columns')
# + id="XBdXWWcsjA54" colab_type="code" colab={}
| Nominal_one_column.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# # Building a basic predictive maintenance model
# Simply put, **predictive maintenance (PdM)** is about pre-emptively finding and fixing flaws in a system (as long as it collects data over time, using sensors for example) in order to reduce downtime. Given a failure in some component or part of the system, we are asking how likely it is that this would result in system failure and downtime soon after.
# ## Loading and examining the data
# +
import os # standard lib for OS operations
import urllib.request # for downloading data
# plotting libs
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(15,8)}) # set figure size
# ML classifiers and the like
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn import svm
# metrics for evaluating results
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
# -
os.makedirs('../data', exist_ok = True)
container = 'https://sethmottstore.blob.core.windows.net/predmaint/'
urllib.request.urlretrieve(container + 'telemetry.csv', filename='../data/telemetry.csv')
urllib.request.urlretrieve(container + 'maintenance.csv', filename='../data/maintenance.csv')
urllib.request.urlretrieve(container + 'machines.csv', filename='../data/machines.csv')
urllib.request.urlretrieve(container + 'failures.csv', filename='../data/failures.csv')
urllib.request.urlretrieve(container + 'errors.csv', filename='../data/errors.csv')
urllib.request.urlretrieve(container + 'anoms.csv', filename='../data/anoms.csv')
#urllib.request.urlretrieve(container + 'telemetry_w_anoms.csv', filename='../data/telemetry_w_anoms.csv')
# The relevant data sources for predictive maintenance include, but are not limited to:
# - **Machine operating conditions:** data of the equipment health over time (usually sensor-based and streaming). We will refer to this data as machine *telemetry data*.
# - **Error histor:** this data contains logs of *non-breaking* errors that happen thoughout a machine's operation and which parts of the machine they came from
# - **Failure history:** this data contains logs of severe errors that broke the machine down (requiring maintenance to operate again) and parts of the machine that caused it
# - **Maintenance/repair history:** what parts were fixed/replaced (as part of scheduled maintenance or due to failure) and when
# - **Equipment metadata:** anything we know about equipment and parts (such as make, model, etc.)
# ### Quiz
# <div class="alert alert-info">
# Pick two of the use cases [mentioned earlier](#usecases), and provide examples of the four kinds of data needed to perform PdM for those use cases.
# </div>
# +
# write solution here
# -
# From now on we will adopt to following consistent terminology to avoid confusion:
#
# - A system as a whole will be called a **machine** and its parts are called **components**
# - A machine can experience **errors** when anomalies happen. Errors do NOT result in shutdown, and they are NOT tied to any particular components, but they can cause one or several component to *eventually* fail.
# - A machine can experience **failure** when one of its components shuts down. This requires the component to be replaced before the machine can be operational again.
# - For our purposes, **maintenance** means a component was replaced. This can be either as part of a routine schedule or because the component failed (prior to its scheduled maintenance).
# Let's now begin loading all the data and looking at the kind of information it contains. We begin with the telemetry data.
# +
import pandas as pd
df_telemetry = pd.read_csv('../data/telemetry.csv', header=0)
df_telemetry['datetime'] = pd.to_datetime(df_telemetry['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_telemetry.head()
# -
# Here's an example of the voltage for one machine over time.
ax = sns.lineplot(x="datetime", y="volt", data=df_telemetry.loc[df_telemetry['machineID'] == 1, ])
# Next we have the error logs, which contains information about **non-breaking** errors that happened over the course of the machine running.
df_errors = pd.read_csv('../data/anoms.csv', header=0)
df_errors['datetime'] = pd.to_datetime(df_errors['datetime'])
df_errors.head()
# We used **anomaly detection** to find errors in the above dataset. There are four kinds of errors, one for each of the telemetry variables we collect, namely voltage, rotation, pressure and vibration. There is a lot to be said about the topic of error detection. For examples, the errors we have here are univariate, meaning that we detect anomalies separately for each telemetry variable. We can also try a multi-variate anomaly detection algorithm. In this case, we could use a method like principal component analysis (PCA) to detect anomalies on the most important principal component(s).
# ### Lab
# + [markdown] hide_input=false solution2="hidden" solution2_first=true
# <div class="alert alert-info">
# A simple question we can ask is this: Do some errors happen more often in some machines than others? In other words, what is the distribution of errors across machines?
#
# Use `pd.crosstab` to answer the above quesion. Hint: use the `normalize` argument.
# </div>
# -
rep_dir = {"volt":"error1", "rotate":"error2","pressure":"error3","vibration":"error4"}
df_errors = df_errors.replace({"errorID": rep_dir})
ct = pd.crosstab(df_errors['machineID'], df_errors['errorID'], rownames=['device'], colnames=['error'], normalize='columns')
# + hide_input=false solution2="hidden"
# %cat ../solutions/crosstab.py
# -
# <div class="alert alert-info">
# With so many machines, it may be easier to answer our question visually. We can pass the output of `pd.crosstab` directly to `sns.heatmap` to generate a heat map. How would you answer the question based on the heatmap below? Please provide examples.
# </div>
ax = sns.heatmap(ct, xticklabels=2, yticklabels=False)
# %cat ../solutions/heatmap.py
# ### End of lab
# We can visualize the errors that happen on a given machine to get a sense of how they spread over time.
df_subset = df_errors.loc[(df_errors.datetime.between('2015-01-01', '2016-01-01')) & (df_errors.machineID == 1)]
df_subset.head()
ax = sns.stripplot(x="datetime", y="errorID", data=df_subset, jitter=0)
del df_subset
# Let's now move on to the dataset that logs failures. As we can see, failures are logged by component (although any component failing will result in the machine as a whole failing).
df_fails = pd.read_csv('../data/failures.csv', header=0)
df_fails['datetime'] = pd.to_datetime(df_fails['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_fails.head()
# Now we look at the dataset of maintenance log, which is also by component.
df_maint = pd.read_csv('../data/maintenance.csv', header=0)
df_maint['datetime'] = pd.to_datetime(df_maint['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_maint.head()
# ### Lab
# + [markdown] hide_input=false solution2="hidden" solution2_first=true
# <div class="alert alert-info">
# For each component, find the percentage of replacements that are due to component failure (as opposed to scheduled maintenance).
# </div>
# +
df_counts = pd.DataFrame({'replacements' : df_maint.groupby(['comp']).count()['machineID'],
'failures' : df_fails.groupby(['failure']).count()['machineID']})
df_counts['percent_due_to_failure'] = df_counts['failures'] / df_counts['replacements']
df_counts
# + solution2="hidden"
# %cat ../solutions/percent_replacements.py
# -
# ### End of lab
# We can obtain the same answer in a more detailed way by doing an **outer join** of the maintenance logs and the failure logs to see how many records matched and where they came from (in `pd.merge` we can use the `indicator=True` argument to get a column called `_merge` that indicates if the keys were present in the left, right, or both datasets.
df_join = pd.merge(left=df_maint, right=df_fails.rename(columns={'failure':'comp'}), how = 'outer', indicator=True,
on=['datetime', 'machineID', 'comp'], validate='one_to_one')
df_join.head()
# - If a record is present in the left dataset only, it represents a working component being replaced due to scheduled maintenance.
# - If a record is present in the right dataset only, it represents a failed component that was not replaced immediately. This case should be rare since it would result in downtime.
# - If a record is present in both datasets, it represents a failed component that was immediately replaced (we can also call this **un-**scheduled maintenance).
# We can run `pd.crosstab` to get counts for each of the above categories, broken up by component.
ct = pd.crosstab(df_join['comp'], df_join['_merge'], margins=True)
ct.rename(columns={"left_only":"not_failed_but_replaced", "right_only":"failed_not_replaced", "both":"failed_and_replaced"})
# We can confirm that the second category is rare. This is usually the case in cases where downtime can result in significant costs.
# The last dataset we look at is the machine metadata. In this case, we only have information about the model and age of the machine.
df_machines = pd.read_csv('../data/machines.csv', header=0)
df_machines.head()
# We are now ready to move on to the next phase, where we gradually combine our datasets into one dataset that will be used for modeling and contains the features we think will be useful.
# ## Feature engineering
# Our approach to getting the data ready for modeling will consist mainly of two things:
#
# - for the telemetry data, we get rolling aggregates (means and standard deviation)
# - for the error, failure and maintenance logs, we get obtain the number of hours since each of these events happened
#
# We then combine the result of the above two datasets into one, and add the machine metadata at the end. For the most part the feature engineering steps described above are relatively straight-forward, but in some cases we need to process the data in creative ways to get the results we want.
# +
df_left = df_telemetry.loc[:, ['datetime', 'machineID']] # we set this aside to this table to join all our results with
# this will make it easier to automatically create features with the right column names
df_errors['error'] = df_errors['errorID'].apply(lambda x: int(x[-1]))
df_maint['comp'] = df_maint['comp'].apply(lambda x: int(x[-1]))
df_fails['failure'] = df_fails['failure'].apply(lambda x: int(x[-1]))
# -
# Let's begin with a function that will give us rolling mean and standard deviation for the telemetry data.
# +
import numpy as np
def get_rolling_aggregates(df, colnames, suffixes, window, on, groupby, lagon = None):
"""
calculates rolling averages and standard deviations
Arguments:
df -- dataframe to run it on
colnames -- names of columns we want rolling statistics for
suffixes -- suffixes attached to the new columns (provide a list with strings)
window -- the lag over which rolling statistics are calculated
on -- the interval at which rolling statistics are calculated
groupby -- the column used to group results by
lagon -- the name of the datetime column used to compute lags (if none specified it defaults to row number)
Returns:
a dataframe with rolling statistics over a specified lag calculated over a specified interval
"""
rolling_colnames = [c + suffixes[0] for c in colnames]
df_rolling_mean = df.groupby(groupby).rolling(window=window, on=lagon)[colnames].mean()
df_rolling_mean.columns = rolling_colnames
df_rolling_mean.reset_index(inplace=True)
rolling_colnames = [c + suffixes[1] for c in colnames]
df_rolling_sd = df.groupby(groupby).rolling(window=window, on=lagon)[colnames].var()
df_rolling_sd.columns = rolling_colnames
df_rolling_sd = df_rolling_sd.apply(np.sqrt)
df_rolling_sd.reset_index(inplace=True, drop=True)
df_res = pd.concat([df_rolling_mean, df_rolling_sd], axis=1)
df_res = df_res.loc[df_res.index % on == on-1]
return df_res
# -
# We will apply this function twice, once to get rolling aggregates using a sliding window of 3 hours collected every 3 hours, and a second time to get rolling aggregates using a sliding window of 12 hours also collected every 3 hours.
# +
cols_to_average = df_telemetry.columns[-4:]
df_telemetry_rolling_3h = get_rolling_aggregates(df_telemetry, cols_to_average,
suffixes = ['_ma_3', '_sd_3'],
window = 3, on = 3,
groupby = 'machineID', lagon = 'datetime')
# df_telemetry_rolling_3h.head(20)
# -
df_telemetry_rolling_12h = get_rolling_aggregates(df_telemetry, cols_to_average,
suffixes = ['_ma_12', '_sd_12'],
window = 12, on = 3,
groupby = 'machineID', lagon = 'datetime')
# df_telemetry_rolling_12h.head(20)
# We can combine both results into a single table and back-fill any missing values.
df_telemetry_rolling = pd.concat([df_telemetry_rolling_3h, df_telemetry_rolling_12h.drop(['machineID', 'datetime'], axis=1)],
axis=1, sort = True)
# df_telemetry_rolling.head()
df_telemetry_feat_roll = df_left.merge(df_telemetry_rolling, how="inner", on=['machineID', 'datetime'], validate = "one_to_one")
df_telemetry_feat_roll.fillna(method='bfill', inplace=True)
df_telemetry_feat_roll.head()
del df_telemetry_rolling, df_telemetry_rolling_3h, df_telemetry_rolling_12h
# We now write a function that takes care of extracting features showing when events (errors, failures, replacements) occured. The data is then passed to the same 3-hour sliding filter as the telemetry data. Using a rolling max function, we compute if there was an event sometime in the last 3 hours. Finally we compute time elapsed since the last event.
#
# We use the following naming convention for the column names in the final dataset. For a given machine at a given date and time:
#
# - `e_1` is a flag indicating if error 1 occured, likewise for `e_2` through `e_5`
# - `de_1` is a numeric feature that represents the hours elapsed since the last time error 1 occured, likewise for `de_2` through `de_5`
# - `m_1` is a flag indicating if component 1 was replaced, likewise for `m_2` through `m_4`
# - `dm_1` is a numeric feature that represents the hours elapsed since the last time component 1 was replaced, likewise for `dm_2` through `dm_4`
# - `f_1` is a flag indicating if component 1 failed, likewise for `f_2` through `f_4`
# - `df_1` is a numeric feature that represents the hours elapsed since the last time component 1 failed, likewise for `df_2` through `df_4`
#
# Finally, we will use `f_1` through `f_4` to create the targets `y_1` through `y_4`:
# - `y_1` is a flag indicating if component 1 is about to fail, likewise for `y_2` through `y_4`
def get_datetime_diffs(df_left, df_right, catvar, prefix, window, on, lagon = None, diff_type = 'timedelta64[h]', validate = 'one_to_one', show_example = True):
"""
finds the last time an event (error, failure, maintenance) happened over a sliding window and the time elapsed since
Arguments:
df_left -- dataframe with keys
df_right -- dataframe with events (in this case: errors, failures, or maintenance)
catvar -- the column in df_right which encodes events
prefix -- prefix to add to new column names
window -- the lag over which rolling max is calculated
on -- the interval at which rolling max are calculated
lagon -- the name of the datetime column used to compute lags (if none specified it defaults to row number)
diff_type -- the format to convert time differences to (hours is the default)
validate -- set to 'one_to_one' to ensure the validity of the ensuing merge operation
show_example -- prints an example so we can check results
Returns:
the dataframe with the following columns for each event:
- a dummy column showing which event happened
- a corresponding difference column showing the time elapsed since the event last occured
"""
# create dummy columns and merge them with left data
keys = ['machineID', 'datetime']
df_dummies = pd.get_dummies(df_right[catvar], prefix=prefix)
df_wide = pd.concat([df_right.loc[:, keys], df_dummies], axis=1)
df_wide = df_wide.groupby(keys).sum().reset_index()
df = df_left.merge(df_wide, how="left", on=keys, validate = validate).fillna(0)
# run a rolling window through event flags to aggregate data
dummy_col_names = df_dummies.columns
df = df.groupby('machineID').rolling(window=window, on=lagon)[dummy_col_names].max()
df.reset_index(inplace=True)
df = df.loc[df.index % on == on-1]
df.reset_index(inplace=True, drop=True)
df_first = df.groupby('machineID', as_index=False).nth(0)
# calculate the time of the last event and the time elapsed since
for col in dummy_col_names:
whenlast, diffcol = 'last_' + col, 'd' + col
df.loc[:, col].fillna(value = 0, inplace=True)
# let's assume an event happened in row 0, so we don't have missing values for the time elapsed
df.iloc[df_first.index, df.columns.get_loc(col)] = 1
df.loc[df[col] == 1, whenlast] = df.loc[df[col] == 1, 'datetime']
# for the first occurence we don't know when it last happened, so we assume it happened then
df.iloc[df_first.index, df.columns.get_loc(whenlast)] = df.iloc[df_first.index, df.columns.get_loc('datetime')]
df[whenlast].fillna(method='ffill', inplace=True)
# df.loc[df[whenlast] > df['datetime'], whenlast] = np.nan
df.loc[df[whenlast] <= df['datetime'], diffcol] = (df['datetime'] - df[whenlast]).astype(diff_type)
df.drop(columns = whenlast, inplace=True)
if show_example == True:
col = np.random.choice(dummy_col_names, size = 1)[0]
idx = np.random.choice(df.loc[df[col] == 1, :].index.tolist(), size = 1)[0]
print('Example:\n')
print(df.loc[df.index.isin(range(idx-3, idx+5)), ['datetime', col, 'd' + col]])
return df
df_errors_feat_roll = get_datetime_diffs(df_left, df_errors, catvar='errorID', prefix='e', window = 6, lagon = 'datetime', on = 3)
df_errors_feat_roll.tail()
df_errors_feat_roll.loc[df_errors_feat_roll['machineID'] == 2, :].head()
df_maint_feat_roll = get_datetime_diffs(df_left, df_maint, catvar='comp', prefix='m',
window = 6, lagon = 'datetime', on = 3, show_example=False)
df_maint_feat_roll.tail()
df_maint_feat_roll.loc[df_maint_feat_roll['machineID'] == 2, :].head()
df_fails_feat_roll = get_datetime_diffs(df_left, df_fails, catvar='failure', prefix='f',
window = 6, lagon = 'datetime', on = 3, show_example=False)
df_fails_feat_roll.tail()
# ### Combine features in one dataset
#
# We now combine all four datasets into one dataset called `df_all`. First we check of course that all data frames have the same dimensions.
assert(df_errors_feat_roll.shape[0] == df_fails_feat_roll.shape[0] == df_maint_feat_roll.shape[0] == df_telemetry_feat_roll.shape[0])
# +
df_all = pd.concat([df_telemetry_feat_roll,
df_errors_feat_roll.drop(columns=['machineID', 'datetime']),
df_maint_feat_roll.drop(columns=['machineID', 'datetime']),
df_fails_feat_roll.drop(columns=['machineID', 'datetime'])], axis = 1, verify_integrity=True)
# df_all = pd.merge(left=df_telemetry_feat_roll, right=df_all, on = ['machineID', 'datetime'], validate='one_to_one')
df_all = pd.merge(left=df_all, right=df_machines, how="left", on='machineID', validate = 'many_to_one')
# -
del df_join, df_left
del df_telemetry_feat_roll, df_errors_feat_roll, df_fails_feat_roll, df_maint_feat_roll
# ### Lab
# + [markdown] hide_input=false solution2="shown" solution2_first=true
# <div class="alert alert-info">
# This may be a good place to stop and look at the correlation matrix for all the features we have in the data. We expect some obvious correlations, but let's see if we get any less obvious ones too. We will use `sns.heatmap` to visualize the correlation matrix.
# </div>
# -
import seaborn as sns
corr = df_all.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# %cat ../solutions/correlation_matrix.py
# + [markdown] hide_input=false
# <div class="alert alert-info">
# Describe what you see in the correlation matrix. What would relatively high correlations between `m_1` through `m_4` suggest? What about the relatively high correlations between `m_1` through `m_4` and `f_1` through `f_4`?
# </div>
# + [markdown] hide_input=false
# <div class="alert alert-info">
# We can export the data for one of the machines to a CSV file. Export the subset of the data corresponding to the machine with ID 51 to CSV, then download the CSV file and open it in Excel to examine its content. Comment on what you see.
# </div>
# -
df_all.loc[(df_all['machineID'] == 51), :].sort_values(['datetime', 'machineID']).to_csv('bla.csv')
# %cat ../solutions/export_csv.py
# ### End of lab
# Let's look at all the features we've so far added to the data.
df_all.info()
# The last step in data prep is for us to create labels for the PdM model. You might wonder why we don't just use `f_1` through `f_4` as our labels, since they indicate when a machine failed. In fact we could, but PdM is not about predicting when a machine fails, but predicting when it's **about to fail**. So it's better to create labels indicate the state of the machine shortly prior to failure (how far back we want to go is something we need to determine).
# ### Lab
# <div class="alert alert-info">
# This is a difficult coding exercise, so we've done part of the work for you already. So far we know that we each machine has four components, and we have a feature column for each, called `f_1`, `f_2`, `f_3`, and `f_4` which tell us when a component failed. Using these features, we want to create four labels called `y_1`, `y_2`, `y_3`, and `y_4` which tell us when a component is about to fail. To get more precise, initiate with `y_1 = 0` and for a given machine, let `y_1 = 1` whenever the date and time is anywhere between 3 hours and 2 days prior to a failure occuring. Similary compute `y_2`, `y_3`, and `y_4`. Places where you need to enter code are marked as `## YOUR CODE GOES HERE`. HINT: Use the `Timedelta` method for `datetime` column types.
# </div>
for i in range(1, 5): # iterate over the four components
# find all the times a component failed for a given machine
df_temp = df_all.loc[df_all['f_' + str(i)] == 1, ['machineID', 'datetime']]
label = 'y_' + str(i) # name of target column (one per component)
## YOUR CODE GOES HERE (initialize y_i = 0)
for n in range(df_temp.shape[0]): # iterate over all the failure times
machineID, datetime = df_temp.iloc[n, :]
## YOUR CODE GOES HERE (set y_i = 1 whenever datetime is between 2 days and 3 hours prior to failure)
# # %load ../solutions/compute_labels.py
for i in range(1, 5): # iterate over the four components
# find all the times a component failed for a given machine
df_temp = df_all.loc[df_all['f_' + str(i)] == 1, ['machineID', 'datetime']]
label = 'y_' + str(i) # name of target column (one per component)
df_all[label] = 0
for n in range(df_temp.shape[0]): # iterate over all the failure times
machineID, datetime = df_temp.iloc[n, :]
dt_end = datetime - pd.Timedelta('3 hours') # 3 hours prior to failure
dt_start = datetime - pd.Timedelta('2 days') # n days prior to failure
if n % 500 == 0:
print("a failure occured on machine {0} at {1}, so {2} is set to 1 between {4} and {3}".format(machineID, datetime, label, dt_end, dt_start))
df_all.loc[(df_all['machineID'] == machineID) &
(df_all['datetime'].between(dt_start, dt_end)), label] = 1
# <div class="alert alert-info">
# To run the above script change the magic `%cat` to `%load` which will load the content of the script into the cell. Then select the cell a second time and run it.
# </div>
# ### End of lab
# +
import itertools
ct = pd.concat([pd.crosstab(df_all['y_' + str(i)], df_all['f_' + str(i)]) for i in range(1, 5)], axis=1)
ct.columns = ['f' + str(i) + '=' + str(j) for i, j in itertools.product(range(1, 5), range(2))]
ct
# -
# A word of caution here is in order. We should more carefully examine the distribution of the labels across machines. A brief glance at it for 10 randomly chosen machines shows that the distribution for `y_3` and `y_4` is not evenly distributed and that many machines contain only negative labels (because `f_3` and `f_4` are 0) while the machines with positive labels show a large numbers of failures. Problems like this can cause bias in our models, even when such differences can be legimtimately explained away by differences in the underlying components. As an example of the kind of problem we may run into, consider this: If in the modeling phase we choose to split the data into training and testing by machine ID (assign some machine IDs to training and the rest to testing), we will need to ensure that machines with both positive and negative labels are well represented in both datasets.
# +
import itertools
ct = pd.concat([pd.crosstab(df_all['machineID'],
df_all['y_' + str(i)]) for i in range(1, 5)], axis=1)
ct.columns = ['y_' + str(i) + '=' + str(j) for i, j in itertools.product(range(1, 5), range(2))]
ct.loc[np.random.randint(1, 100, 10)]
# -
# ## Modeling
# See [here](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenance#modeling-techniques-for-predictive-maintenance) for more about modeling approaches for PdM.
# We constructed a binary label that can be used to predict the probability that the system will fail in the next $T$ time steps (48 hours, based on our specified choice). If explanability is also a goal here, then we should prefer models that can also help us explain the root cause of a failure.
#
# We have two ways of splitting the data into training and testing:
# - we choose a cut-off time $T_c$ such that the training data is all the data before $T_c - w$ and the test data is all the data after $T_c$, where $w$ is a safe margin to make sure that as we avoid leakage into the training data when we label the data
# - we split training and test set based machine ID so that assets show up in one or the other split
# For your benefit, here is a list of [solution templates for predictive maintenance](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenance#solution-templates-for-predictive-maintenance).
df_all.columns
# Let's begin by splitting the data into training and test sets, based on a date cut-off.
# +
X_drop = ['datetime', 'machineID', 'f_1', 'f_2', 'f_3', 'f_4', 'y_1', 'y_2', 'y_3', 'y_4', 'model']
Y_keep = ['y_1', 'y_2', 'y_3', 'y_4']
X_train = df_all.loc[df_all['datetime'] < '2015-10-01', ].drop(X_drop, axis=1)
y_train = df_all.loc[df_all['datetime'] < '2015-10-01', Y_keep]
X_test = df_all.loc[df_all['datetime'] > '2015-10-15', ].drop(X_drop, axis=1)
y_test = df_all.loc[df_all['datetime'] > '2015-10-15', Y_keep]
# -
# %store X_train ../data
# %store X_test ../data
# %store y_train ../data
# %store y_test ../data
# ### Lab
# <div class="alert alert-info">
# Report the number of failures that occur in the training and test data. Do you think the split is adequate or should we split based on a different cut-off? If so, do you recommend a higher or lower cut-off?
# </div>
print(pd.DataFrame({"train": y_train.apply(sum, axis = 0), "test": y_test.apply(sum, axis = 0)}))
# %cat ../solutions/train_test_failures.py
# ### End of lab
# We can now train our model. We have chosen a MLP (multi-linear perceptron) as our model, which is a basic neural network model.
pipeline = Pipeline([('scaler', StandardScaler()), ('classifier', MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 10), random_state=1))])
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
# ### Lab
# <div class="alert alert-info">
# Print the confusion matrix and precision and recall for each of the four failure types. You can use the functions `confusion_matrix` and `classification report` to do the computation for you. The rows in the matrix represent actual cases of non-failure and failure. The columns represent predicted cases. How is precision and recall calculated from the confusion matrix?
# </div>
# +
# write solution here
# +
# # %load ../solutions/confusion_matrix.py
print("confusion matrix:")
for y_idx in range(4):
print("---------------- for y_" + str(y_idx+1))
print(confusion_matrix(y_test.values[:, y_idx], y_pred[:, y_idx]))
print("\nclassification report:")
print(classification_report(y_test, y_pred))
print("AUC = {}".format(roc_auc_score(y_test, y_pred, average='weighted')))
# -
# ### End of lab
# Finally, let's create ROC plots for each type of failure.
sns.set(rc={'figure.figsize':(18,5)})
# +
from sklearn.metrics import auc, roc_curve
plt.close('all')
fig, axs = plt.subplots(ncols=4, sharex=True, sharey=True)
for y_idx in range(4): # choose one of the outcomes
fpr, tpr, thresholds = roc_curve(y_test.values[:, y_idx], y_pred[:, y_idx])
roc_auc = auc(fpr, tpr)
axs[y_idx].set_title('ROC of y_' + str(y_idx))
axs[y_idx].set_ylabel('TPR')
axs[y_idx].set_xlabel('FPR')
axs[y_idx].plot(fpr, tpr, 'b', label = 'AUC = {0:.2f}'.format(roc_auc))
axs[y_idx].legend(loc = 'lower right')
axs[y_idx].plot([0, 1], [0, 1],'r--')
plt.show()
# -
# Let's store the training and test data so we can reload it in the next Notebook.
# # The end
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
| lab02.1_PdM_Model_Development/train_basic_PdM_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + code_folding=[0]
# Import packages needed & load image
import gc
import pickle
from tkinter import Tk
from tkinter.filedialog import askopenfilename, asksaveasfilename
import platform
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
from matplotlib.widgets import RectangleSelector, PolygonSelector
import numpy as np
from skimage import io, draw
import mplcursors
import IPython.display as Disp
from ipywidgets import widgets
import cv2
from osgeo import gdal
import pandas as pd
import general_funcs
# OS related settings
if platform.system() == 'Windows':
# # %matplotlib nbagg
# Sometimes tk/qt will not let cells rerun after an ERROR occurs
# # %matplotlib tk
# %matplotlib qt
elif platform.system() == 'Darwin':
Tk().withdraw()
# %matplotlib osx
elif platform == 'linux' or platform == 'linux2':
;
# This line of "print" must exist right after %matplotlib command,
# otherwise JN will hang on the first import statement after this.
print('Interactive plot activated')
# Extend width of Jupyter Notebook Cell to the size of browser
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# image_file = askopenfilename(title='Load image file', initialdir='./data/field_image')
# image_file_ref = askopenfilename(title='Load image file', initialdir='./data/field_image')
# mtp_file = askopenfilename(title='Load mtp file', initialdir='./data/mtp')
# mtp_file_ref = askopenfilename(title='Load mtp file', initialdir='./data/mtp')
image_file = "/Users/lj/Library/Mobile Documents/com~apple~CloudDocs/Developer/RGB-IR_Field_Image_Processing/data/field_image/BRC/BRC_20190904_121517_Thermal.tif"
image_ref_file = "/Users/lj/Library/Mobile Documents/com~apple~CloudDocs/Developer/RGB-IR_Field_Image_Processing/data/field_image/BRC/BRC_20190814_141033_RGB.tif"
mtp_file = "/Users/lj/Library/Mobile Documents/com~apple~CloudDocs/Developer/RGB-IR_Field_Image_Processing/data/mtp/BRC_20190904_121517_Thermal_mtp.pkl"
mtp_ref_file = "/Users/lj/Library/Mobile Documents/com~apple~CloudDocs/Developer/RGB-IR_Field_Image_Processing/data/mtp/BRC_20190814_141033_RGB_mtp.pkl"
try:
with open(mtp_file, 'rb') as f:
mtp = pickle.load(f)
except Exception as e:
showerror(type(e).__name__, str(e))
try:
with open(mtp_ref_file, 'rb') as f:
mtp_ref = pickle.load(f)
except Exception as e:
showerror(type(e).__name__, str(e))
ds_ref = gdal.Open(image_ref_file)
gt_ref = ds_ref.GetGeoTransform()
mtp_ref_geo = []
for point in mtp_ref:
geo_loc = general_funcs.pix2geo(point, gt_ref)
mtp_ref_geo.append(geo_loc)
ds = gdal.Open(image_file)
gt = ds.GetGeoTransform()
mtp_geo = []
for point in mtp:
geo_loc = general_funcs.pix2geo(point, gt)
mtp_geo.append(geo_loc)
mtp_ref_geo = np.array(mtp_ref_geo)
mtp_geo = np.array(mtp_geo)
shift_geo = mtp_ref_geo - mtp_geo
# plot_loc_file_ref = askopenfilename(title='Load plot location file', initialdir='./data/plot_location')
plot_loc_file_ref = "/Users/lj/Library/Mobile Documents/com~apple~CloudDocs/Developer/RGB-IR_Field_Image_Processing/data/plot_location/BRC_20190814_141033_RGB_plot_loc_TEST.pkl"
try:
with open(plot_loc_file_ref, 'rb') as f:
interested_area_ref = pickle.load(f)
plot_vertices_gps_ref = pickle.load(f)
plot_notes_ref = pickle.load(f)
except Exception as e:
showerror(type(e).__name__, str(e))
plot_vertices_ref = general_funcs.plotVGPS2plotV(plot_vertices_gps_ref, gt_ref)
plot_vertices = {}
plot_vertices_before_transform = {}
for plot_name in plot_vertices_ref.keys():
one_plot_vertices_ref = plot_vertices_ref[plot_name]
one_plot_vertices = []
one_plot_vertices_before_transform = []
for pix_loc_ref in one_plot_vertices_ref:
geo_loc_ref = general_funcs.pix2geo(pix_loc_ref, gt_ref)
point_shift_geo = general_funcs.find_point_in_img_ref(pix_loc_ref, mtp, mtp_ref, shift_geo)
geo_loc = geo_loc_ref - point_shift_geo
pix_loc = general_funcs.geo2pix(geo_loc, gt)
pix_loc_before_transform = general_funcs.geo2pix(geo_loc_ref, gt)
one_plot_vertices.append(pix_loc)
one_plot_vertices_before_transform.append(pix_loc_before_transform)
one_plot_vertices = np.array(one_plot_vertices)
one_plot_vertices_before_transform = np.array(one_plot_vertices_before_transform)
plot_vertices[plot_name] = one_plot_vertices
plot_vertices_before_transform[plot_name] = one_plot_vertices_before_transform
# plot_vertices = np.array(plot_vertices)
# plot_vertices_before_transform = np.array(plot_vertices_before_transform)
img = io.imread(image_file)
# img_ref = io.imread(image_ref_file)
layer_RGB, layer_IR, layer_mask = general_funcs.extract_layers(img)
# layer_RGB_ref, layer_IR_ref, layer_mask_ref = general_funcs.extract_layers(img_ref)
h, w, d = img.shape
# h_ref, w_ref, d_ref = img_ref.shape
# +
fig, ax = plt.subplots(figsize=(7, 7))
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95)
if d == 5 or d == 4:
myax = ax.imshow(layer_RGB)
elif d == 2:
mask_not_0_inds = np.where(layer_mask > 0)
vmin, vmax = general_funcs.cal_vmin_vmax(layer_IR, layer_mask)
myax = ax.imshow(layer_IR, cmap='gist_gray', vmin=vmin, vmax=vmax)
cbar = fig.colorbar(myax)
for plot_name in plot_vertices.keys():
one_plot_vertices = plot_vertices[plot_name]
polygon = patches.Polygon(one_plot_vertices, True, facecolor = matplotlib.colors.to_rgba('red', 0.05), edgecolor=matplotlib.colors.to_rgba('orange', 0.5))
ax.add_patch(polygon)
text_loc = np.mean(one_plot_vertices, 0)
axtx = ax.text(text_loc[0], text_loc[1], plot_name, ha='center', va='center')
| Image_Processing_X-Plot_Loc_Transform.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="X0dDyOnbzIxr"
# #%config Completer.use_jedi = False
# + id="A3NlZoWlzIx0"
import pandas as pd
import numpy as np
# + [markdown] id="EijJS1m6zIx2"
# ###### Loading and exploring data, spanish is the original language of dataset
# + id="sOxzPUa1zIx6"
properties = pd.read_excel('outputs/propiedades_pasto_11_05_21_3.xlsx',engine='openpyxl', index_col=0)
# + id="d1X5taejzIx8" outputId="c2047e94-4278-4728-9a8f-954248d15148"
properties.head(4)
# + id="45RRW_RlzIx-" outputId="87099bd8-acca-4291-e4a0-3a3c84b1d95a"
properties.info()
# + id="zz3Eu5jnzIyA" outputId="80041856-479d-420b-9609-c7b910b1d7a0"
properties.describe()
# + id="_msPIfKozIyD" outputId="8a1e9795-e56e-4800-8c14-0b7bdce7c087"
apartments = properties.where(properties['v_type'] == "Apartamento").dropna(subset=['v_coordinate_x','v_coordinate_y']).reset_index().drop(columns='index')
apartments.info()
# + id="Lcjgiut-zIyH" outputId="5679bccb-05f1-44e4-ebec-34380ad66061"
apartments[~apartments['v_size'].notnull()]
# + id="lUUh8x3lzIyK" outputId="846ffb14-a959-465d-d27e-1d7dccf0c6db"
sub_apartments = apartments[["v_price","v_size"]].dropna()
sub_apartments.info()
# + [markdown] id="GbeiXXa6zIyM"
# We can see, just a few apartments have information about size, we are going to explore "price" and "size" dimensions to view a possible solution to complement the null values
# + id="TtBw1DiqzIyN"
import matplotlib.pyplot as plt
import seaborn as sns
# + id="yBeIwonMzIyO" outputId="48b652e3-b1ac-45eb-b920-e5c094a5652f"
plt.figure()
plt.scatter(sub_apartments.v_price/10000000,sub_apartments.v_size)
plt.title("Apartments Price vs Apartment size")
plt.xlabel("Price * 10^-7 COP")
plt.ylabel("Size(square meter)")
plt.show()
print("We can see something like a linear correlation between both variables")
# + id="j-lMIzh8zIyP" outputId="b977a00b-64a4-43c8-d7ea-4c4c2abe0cba"
sub_apartments.corr()
# + [markdown] id="XAJbIMklzIyQ"
# 0.78 is a strong positive correlation, we are going to try a linear regression between both variables
# + id="21aJ7htQzIyQ"
from sklearn.model_selection import train_test_split
# + id="VXHol5oozIyR"
X_train, X_test, y_train, y_test = train_test_split((sub_apartments['v_price']/10000000), sub_apartments['v_size'],test_size=0.2)
X_train = np.array(X_train).reshape(len(X_train),1)
X_test = np.array(X_test).reshape(len(X_test),1)
y_train = np.array(y_train).reshape(len(y_train),1)
y_test = np.array(y_test).reshape(len(y_test),1)
# + id="-Lt_I52yzIyS"
from sklearn.linear_model import LinearRegressiongression
# + id="MbT3oNE1zIyT"
reg = LinearRegression()
reg = reg.fit(X_train,y_train)
# + id="JaBpJGNUzIyU"
y_train_pred = reg.predict(X_train)
y_test_pred = reg.predict(np.array(X_test).reshape(len(X_test),1))
# + id="OS7_3-8PzIyV"
#Now we see if our results are trustworthy
def kpi_ML(y_train, y_train_pred, y_test, y_test_pred, name=''):
df = pd.DataFrame(columns=['MAE','RMSE','Bias'], index=['Train','Test'])
df.index.name = name
df.loc['Train','MAE'] = 100*np.mean(abs(y_train - y_train_pred))/np.mean(y_train)
df.loc['Train','RMSE'] = 100*np.sqrt(np.mean((y_train - y_train_pred)**2))/np.mean(y_train)
df.loc['Train','Bias'] = 100*np.mean((y_train - y_train_pred))/np.mean(y_train)
df.loc['Test','MAE'] = 100*np.mean(abs(y_test - y_test_pred))/np.mean(y_test)
df.loc['Test','RMSE'] = 100*np.sqrt(np.mean((y_test - y_test_pred)**2))/np.mean(y_test)
df.loc['Test','Bias'] = 100*np.mean((y_test - y_test_pred))/np.mean(y_test)
df = df.astype(float).round(1)
print(df)
# + id="zL-juNOzzIyW" outputId="5ec9a947-f301-495e-9bf9-e74339ffc2eb"
kpi_ML(y_train, y_train_pred, y_test, y_test_pred, name='Linear Regression')
# + id="D6PFhvpczIyY" outputId="fea4ed92-9021-4bfe-a71b-5a5044b1b931"
plt.figure()
plt.scatter(sub_apartments.v_price/10000000,sub_apartments.v_size, c='blue')
plt.scatter(X_train,y_train_pred, c='green')
plt.scatter(X_test,y_test_pred, c='red')
plt.title("Apartments Price vs Apartment size")
plt.xlabel("Price * 10^-7 [COP]")
plt.ylabel("Size[square meter]")
plt.show()
# + id="mffAsxO7zIyZ" outputId="98cded8a-72ef-4cd0-ef4e-16777a56b92e"
X = (sub_apartments['v_price']/10000000)
y = sub_apartments['v_size']
X = np.array(X).reshape(-1,1)
y = np.array(y).reshape(-1,1)
R_sq = reg.score(X,y)
print('This model explains {}% of real variable'.format(round(R_sq*100,2)))
# + [markdown] id="4TzWBJFzzIya"
# ###### As we can see, this model has some KPI far from been perfect, but are acceptable to apply to our dataset apartment sizes
# + id="vGyiW9iNzIyb"
apartments_new = apartments.copy()
apartments_new = apartments_new.dropna(subset=['v_price']).reset_index()
# + id="qbp-tGGgzIyc"
indexes = apartments_new[~apartments_new['v_size'].notnull()].index.to_list()
for row in indexes:
price = apartments_new.loc[row,'v_price']/10000000
size = reg.predict(np.array(price).reshape(-1,1))
apartments_new.loc[row,'v_size'] = size[0][0]
apartments_new.drop(columns=['index'])
# + id="yBAKGEwWzIyc" outputId="b174a2b0-77a9-417a-8376-3f6878b5925f"
apartments_new.info()
# + id="HSU6xw74zIyd"
apartments_new['v_price_red'] = apartments_new['v_price']/10000000
# + [markdown] id="t6h4Ep9EzIyd"
# Now we have an almost full dataset, and we are going to clusterize our apartments, before data normalization
# + id="7CXVLLElzIyd" outputId="933f6e18-336e-42b1-bdf9-90ece0553a80"
apartments_new.columns
# + id="LMx14P4ezIye" outputId="dd51d6c4-79be-44be-abf4-98ca065fd21e"
sns.set(style='ticks',color_codes=True)
sns.pairplot(apartments_new, vars=['v_coordinate_x', 'v_coordinate_y', 'v_size', 'v_price_red'])
plt.show()
# + id="YD9ryx9GzIye"
def scaler(df):
mean = df.mean(axis=0)
maxi = df.max(axis=0)
mini = df.min(axis=0)
df_norm = pd.DataFrame()
df_norm = df.subtract(mean,axis=1)
df_norm = df_norm.divide(maxi-mini,axis=1).fillna(0)
return df_norm
# + id="LxP7SfpWzIyf" outputId="1379f542-3a50-4dd4-9e52-a1ad1d3014e4"
apartments_new[['v_coordinate_x', 'v_coordinate_y', 'v_size',
'v_rooms', 'v_baths', 'v_price_red']].dropna().info()
# + id="2cLBtnxEzIyf"
apartments_norm = scaler(apartments_new[['v_coordinate_x', 'v_coordinate_y', 'v_size',
'v_rooms', 'v_baths', 'v_price_red']].dropna())
# + id="o0RIX-8JzIyg" outputId="461a3fc4-c654-446d-b40c-5a5faa18c043"
sns.boxplot(data = apartments_norm)
# + id="wIJfMFtXzIyg" outputId="211ae3fd-0ac4-4b3c-ac37-38244ec328c4"
apartments_norm.columns
# + [markdown] id="vyWBh2EhzIyh"
# #### CLUSTERING
# + id="_1FZ6XHPzIyh"
from sklearn.cluster import KMeans
# + [markdown] id="YfTYbiXwzIyi"
# ##### KMEANS
# + [markdown] id="cigXKsLxzIyi"
# ###### Inertia
# + id="_A-IYUvazIyi" outputId="949abd09-ade5-4fe8-bbe9-c8151f2358ab"
results = []
for n in range(1,10):
kmeans = KMeans(n_clusters=n, random_state=0).fit(apartments_norm)
results.append([n,kmeans.inertia_])
results = pd.DataFrame(data=results,columns=['Number of clusters','Inertia']).set_index('Number of clusters')
results.plot()
# + id="mEED8oTHzIyj" outputId="5b155f48-d015-46a5-a985-8b2624d12c21"
kmeans = KMeans(n_clusters=5).fit(apartments_norm)
r_group = kmeans.predict(apartments_norm)
r_group = pd.Series(data=r_group, index=apartments_norm.index)
r_group
# + id="1cDEtzSIzIyj" outputId="388b2feb-8c07-4b88-8bbb-e89aa4848e13"
centers = pd.DataFrame(data=kmeans.cluster_centers_,
columns=['v_coordinate_x', 'v_coordinate_y', 'v_size', 'v_rooms', 'v_baths', 'v_price_red'])
centers
# + id="KtPrANESzIyk"
apartments_new['KM_Group'] = r_group
# + id="RSSOyF_uzIyk" outputId="3365994e-0d62-4689-f6b8-58fd52519759"
apartments_new.groupby("KM_Group").mean()
# + id="B3GnDzG_zIyl" outputId="de94acab-7cba-4887-e556-71924cc6ee99"
apartments_new.groupby("KM_Group").median()
# + id="zVLny-tJzIyl" outputId="c4722a0b-1851-4ddd-e497-5a029005ab76"
apartments_new[apartments_new['KM_Group'].isnull()]
# + id="25NgHUaGzIyl" outputId="57618464-aeb4-4ffa-97dd-c3152c792871"
apartments_new['KM_Group'].value_counts()
# + id="XQ8sJxftzIym"
# New File with the information to upload to Tableau
apartments_new[['url','coordinates','v_coordinate_x', 'v_coordinate_y', 'v_price','v_size', 'v_rooms', 'v_baths','KM_Group']].to_csv('outputs/apartments_clust0.csv')
| 03_final_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What you will learn
# - Filters
# - Filters in relation to lambdas and maps
# ## What is a filter in Python?
#
# The filter() method filters the given sequence with the help of a function that tests each element in the sequence to be true or not.
#
# +
DUE = 1586062848.417618 # TSE time stamp
def testTime(timeStamp):
if timeStamp <= DUE:
return True
if __name__=="__main__":
someTimeStamps = [1586062838.417618, 1586062839.417618, 1586062840.417618, 1586062841.417618, 1586062842.417618, 1586062843.417618, 1586062844.417618, 1586062845.417618, 1586062846.417618, 1586062847.417618, 1586062848.417618, 1586062849.417618, 1586062850.417618, 1586062851.417618, 1586062852.417618, 1586062853.417618, 1586062854.417618, 1586062855.417618, 1586062856.417618, 1586062857.417618]
results = filter(testTime, someTimeStamps)
for thing in results:
print(thing)
# -
# ### Is this like a map?
#
# Well a little - but really no. Let me show you what this would look like if we used map
# +
DUE = 1586062848.417618 # TSE time stamp
def testTime(timeStamp):
if timeStamp <= DUE:
return True
if __name__=="__main__":
someTimeStamps = [1586062838.417618, 1586062839.417618, 1586062840.417618, 1586062841.417618, 1586062842.417618, 1586062843.417618, 1586062844.417618, 1586062845.417618, 1586062846.417618, 1586062847.417618, 1586062848.417618, 1586062849.417618, 1586062850.417618, 1586062851.417618, 1586062852.417618, 1586062853.417618, 1586062854.417618, 1586062855.417618, 1586062856.417618, 1586062857.417618]
results = map(testTime, someTimeStamps)
for thing in results:
print(thing)
# -
# ## Filter vs Map
#
# - The filter took our iterable object (a list) and ran it against a test function. Check item in our iterable object is checked against the test function and if it passes that test it will be added to the results
#
# - Map on the otherhand will simply take what ever was returned from the function and add it the results
# ### When is this useful
#
# Have you ever tried to search for something online? Maybe, on Door Dash you only wanted Asian food tonight. How does it reutrn only those results... yet some form of a filter!
#
# Want to only select studnets that are failing? Use a filter. Want to find what tests produced results that were outside of tolerance? Use a filter.
# ## Filters and lambdas
# +
# rewritting the above code with lambdas in place of functions
DUE = 1586062848.417618 # TSE time stamp
if __name__=="__main__":
someTimeStamps = [1586062838.417618, 1586062839.417618, 1586062840.417618, 1586062841.417618, 1586062842.417618, 1586062843.417618, 1586062844.417618, 1586062845.417618, 1586062846.417618, 1586062847.417618, 1586062848.417618, 1586062849.417618, 1586062850.417618, 1586062851.417618, 1586062852.417618, 1586062853.417618, 1586062854.417618, 1586062855.417618, 1586062856.417618, 1586062857.417618]
results = filter(lambda time: time <= DUE, someTimeStamps)
for thing in results:
print(thing)
# -
# #### Note
# - We can use lambda functions here in place of a function
# - This can help clean up code
# # What you need to do
#
# Below is the task from week 2 lession 6 - Time. Redo it using lambdas, maps, and filters. You can use
#
#
# + active=""
# if __name__=="__main__":
# -
# and
# + active=""
# def run():
# -
# as functions but only those two. The rest should be lambdas, maps, and filters
# #### Why am I reusing past assignments
#
# I am not trying to be lazy writing these lessons. Yes coming up with good assignments is the most time-consuming part but I want you to realize there are many ways to write a program. Each approach will have advantages and disadvantages of course. I hope that by rewriting code you can get a feeling for what paths of attack suit a particular problem best.
#
# I also hope that if you redo a problem you will solve it in a better way and realize mistakes you made the first time.
# ## The task
#
# At the bottom of this module there is a variable called hw. This is a dict that contains names of students and info
# about their homework including the grade they earned and what time they submitted it.
#
# Some studnets turned their homework in late. For each hour they turned it in late remove 5% of their grade. Greate a master grade dict with the ajusted grades. If a student format to turn in their homework (the grade is None) then give them a 0.
#
# The homework was due on November 1st at 6pm (don't worry about the year).
#
# Your output should look like ...
# {
# "Amy": {
# "Final Grade": 39
# },
# "Brad": {
# "Final Grade: 0
# }, ...
# }
hw ={
"Amy": {
"grade": 64,
"Time": 1572676823.0
},
"Brad": {
"grade": None,
"Time": 1572677809.0
},
"Alex": {
"grade": 60,
"Time": 1572590298.0
},
"Dillon": {
"grade": None,
"Time": 1572591061.0
},
"Rhianna": {
"grade": 89,
"Time": 1572590301.0
},
"Sara": {
"grade": None,
"Time": 1572590639.0
},
"Jordan": {
"grade": 97,
"Time": 1572674570.0
},
"Hannah": {
"grade": None,
"Time": 1572591401.0
},
"Abi": {
"grade": 68,
"Time": 1572589751.0
},
"Sam": {
"grade": 72,
"Time": 1572591009.0
},
"Cat": {
"grade": 85,
"Time": 1572588399.0
},
"Tomas": {
"grade": None,
"Time": 1572588220.0
},
"Ashly": {
"grade": None,
"Time": 1572591074.0
}
}
| Python_Jupyter_Training/Week_3/.ipynb_checkpoints/3_Filters-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# all_tutorial
! [ -e /content ] && pip install -Uqq mrl-pypi # upgrade mrl on colab
# # Adeno-Associated Virus Capsid Design
#
# >Using MRL to design AAV capsids
# ## Adeno-Associated Virus Capsid Design
#
# This tutorial runs an end to end workflow for designing AAV capsids
#
# Adeno-Associated Viruses (AAV) are small viruses that generally do not cause disease. The non-pathogenic nature of these viruses makes them an attractive target for gene therapy delivey vectors. One drawback to using AAV capsids for gene deivery is they can be neutralized by natural immunity. This sets up a protein engineering problem. We want to develop new variants of the virus that caan evade the human immune system.
#
# We could approach this with a scanning mutation approach, but this would result in a high number of invalid sequences. Instead, we will build a score function based off laboratory data and use a generative model to exploit this score function. We hope this approach will give more realistic variants.
# ## Performance Notes
#
# The workflow in this notebook is more CPU-constrained than GPU-constrained due to the need to evaluate samples on CPU. If you have a multi-core machine, it is recommended that you uncomment and run the `set_global_pool` cells in the notebook. This will trigger the use of multiprocessing, which will result in 2-4x speedups.
#
# This notebook may run slow on Collab due to CPU limitations.
#
# If running on Collab, remember to change the runtime to GPU
# +
import sys
sys.path.append('..')
from mrl.imports import *
from mrl.core import *
from mrl.chem import *
from mrl.templates.all import *
from mrl.torch_imports import *
from mrl.torch_core import *
from mrl.layers import *
from mrl.dataloaders import *
from mrl.g_models.all import *
from mrl.vocab import *
from mrl.policy_gradient import *
from mrl.train.all import *
from mrl.model_zoo import *
from sklearn.metrics import r2_score
# -
os.makedirs('untracked_files', exist_ok=True)
# ## Data
#
# The dataset comes from the paper [Generative AAV capsid diversification by latent interpolation ](https://www.biorxiv.org/content/10.1101/2021.04.16.440236v1.full.pdf). The authors looked at a 28 amino acid section of the AAV2 VP3 protein shown to have immunological significance.
#
# The authors created a mutation library based on 564 naturally occurring sequences. They then trained a VAE model on this dataset, sampled new 28-AA sequences from this model, and tested them in the lab.
#
# We will use their laboratory data to build a score function. Then we will run a generative screen against that score function
# +
# download data
# # ! wget https://raw.githubusercontent.com/churchlab/Generative_AAV_design/main/Data/vae2021_processed_data.csv
# -
df = pd.read_csv('vae2021_processed_data.csv')
df.dropna(subset=['VAE_virus_S'], inplace=True)
df.head()
# Our metric of interest is `VAE_virus_S`. This is the log-2 ratio of the frequency of a variant in a virus pool relative to the frequency of the corresponding plasmid in the plasmid pool. Higher values indicate higher viability. The goal of the design is to produce variants predicted to beat the wildtype sequence. The dataset indicates which sequences are currently performing at this level
df.VAE_virus_S.hist(alpha=0.5, density=True, label='full_dataset')
df[(df.viable==1) & (df.beats_wt==1)].VAE_virus_S.hist(alpha=0.5, density=True, label='Beats WT')
plt.legend();
# ## Score Function
#
# Now we want to develop a score function for predicting antimicrobial activity. We will use a CNN encoder with a MLP head to predict the score described above.
#
# Our input data will be token integers for amino acids. Note that fingerprint representations are a poor fit for peptide work because peptides contain many repeating substructures.
#
# We will train on 90% of the data and validate on the 10% held out.
train_df = df.sample(frac=0.9, random_state=42).copy()
valid_df = df[~df.index.isin(train_df.index)].copy()
# +
aa_vocab = CharacterVocab(AMINO_ACID_VOCAB)
train_ds = Text_Prediction_Dataset(train_df.aa.values, train_df.VAE_virus_S.values, aa_vocab)
test_ds = Text_Prediction_Dataset(valid_df.aa.values, valid_df.VAE_virus_S.values, aa_vocab)
# -
# This is the model we will use:
class Predictive_CNN(nn.Module):
def __init__(self,
d_vocab,
d_embedding,
d_latent,
filters,
kernel_sizes,
strides,
dropouts,
mlp_dims,
mlp_drops,
d_out,
outrange
):
super().__init__()
self.conv_encoder = Conv_Encoder(
d_vocab,
d_embedding,
d_latent,
filters,
kernel_sizes,
strides,
dropouts,
)
self.mlp_head = MLP(
d_latent,
mlp_dims,
d_out,
mlp_drops,
outrange=outrange
)
def forward(self, x):
encoded = self.conv_encoder(x)
out = self.mlp_head(encoded)
return out
# +
d_vocab = len(aa_vocab.itos)
d_embedding = 256
d_latent = 512
filters = [256, 512, 1024]
kernel_sizes = [7, 7, 7]
strides = [2,2,2]
dropouts = [0.2, 0.2, 0.2]
mlp_dims = [512, 256, 128]
mlp_drops = [0.2, 0.2, 0.2]
d_out = 1
outrange = [-10, 10]
virus_model = Predictive_CNN(
d_vocab,
d_embedding,
d_latent,
filters,
kernel_sizes,
strides,
dropouts,
mlp_dims,
mlp_drops,
d_out,
outrange
)
# -
r_agent = PredictiveAgent(virus_model, MSELoss(), train_ds, opt_kwargs={'lr':1e-3})
r_agent.train_supervised(32, 20, 1e-3)
# Optional: save score function weights
# +
# r_agent.save_weights('untracked_files/virus_predictor.pt')
# -
# Optional: to load the exact weights used, run the following:
# +
# r_agent.load_state_dict(model_from_url('virus_predictor.pt'))
# +
# validate
valid_dl = test_ds.dataloader(256, num_workers=0, shuffle=False)
r_agent.model.eval();
preds = []
targs = []
with torch.no_grad():
for i, batch in enumerate(valid_dl):
batch = to_device(batch)
x,y = batch
pred = r_agent.model(x)
preds.append(pred.detach().cpu())
targs.append(y.detach().cpu())
preds = torch.cat(preds).numpy()
targs = torch.cat(targs).numpy()
preds = preds.squeeze(-1)
# -
# Our score function has an r^2 value of about 0.883 on the validation dataset
# +
fig, ax = plt.subplots()
ax.scatter(targs, preds, alpha=0.5, s=1)
plt.xlabel('Target')
plt.ylabel('Prediction')
slope, intercept = np.polyfit(targs, preds, 1)
ax.plot(np.array(ax.get_xlim()), intercept + slope*np.array(ax.get_xlim()), c='r')
plt.text(5., 9., 'R-squared = %0.3f' % r2_score(targs, preds));
# -
# We should also take a look at the prediction distribution for our known actives. The predicted values will differ somewhat from the actual values. This will give us a sense of what score we want to see from the model
df['preds'] = r_agent.predict_data(df.aa.values).detach().cpu().numpy()
df.preds.max()
np.percentile(df.preds, 99)
# The maximum predicted value is `8.63`. A compound scoring `5.33` or higher would be in the top 1% of all known sequences
# ## Chemical Space
#
# Next we need to develop our chemical space. This is where we decide what sequences will be allowed and what sequences will be removed.
#
# Getting the right filtering parameters makes a huge difference in sequence quality. In practice, finding the right constraints is an interative process. First run a generative screen. Then examine the highest scoring sequences and look for undesirable properties or structural features. Then update the template and iterate.
#
# For peptides, the presence of Arginine has shown to be toxic [[ref](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5625148/)]. We will apply a template filter for the number of Arginine residues per unit length.
#
# We will also limit the maximum residue frequency in a sample to 0.3. This prevents a common failure mode of seeing the same residue repeated multiple times (ie `MSSSSSSSRP`). This is a flaw in the simplistic score functions we are using
# +
aa_vocab = CharacterVocab(AMINO_ACID_VOCAB)
template = Template([ValidityFilter(),
CharacterCountFilter(['A'], min_val=0, max_val=0.1, per_length=True, mode='protein'),
CharacterCountFilter(aa_vocab.itos[4:], min_val=0, max_val=0.3,
per_length=True, mode='protein')],
[], fail_score=-10., log=False, use_lookup=False, mode='protein')
template_cb = TemplateCallback(template, prefilter=True)
# -
# ## Load Model
#
# We load the `LSTM_LM_Small_Swissprot` model. This is a basic LSTM-based language model trained on part of the Swissprot protein database
agent = LSTM_LM_Small_Swissprot(drop_scale=0.3, opt_kwargs={'lr':5e-5})
# ## Fine-Tune Model
#
# The pretrained model we loaded is a very general model that can produce a high diversity of structures. However, what we actually want are structues with antimicrobial activity. To induce this, we fine-tune on high scoring sequences.
#
# The dataset denotes two categories for high scoring sequences. The `viable` category contains ~3000 samples that were able to successfully assemble and package genetic material. The `beats_wt` category contains ~300 samples with higher expression than the wildtype variant.
#
# The `beats_wt` dataset feels a bit small for fine-tuning, so we will first fine-tune on the `viable` dataset, then the `beats_wt` dataset
# +
agent.update_dataset_from_inputs(df[df.viable==1].aa.values)
agent.train_supervised(32, 8, 5e-5)
agent.update_dataset_from_inputs(df[df.beats_wt==1].aa.values)
agent.train_supervised(32, 6, 5e-5)
agent.base_to_model()
# -
# Optional: save fine-tuned weights
# +
# agent.save_weights('untracked_files/finetuned_model.pt')
# +
# agent.load_weights('untracked_files/finetuned_model.pt')
# -
# # Reinforcement Learning
#
# Now we enter the reinforcement learning stage
# ### Loss
#
# We use `PPO` as our policy gradient loss
# +
pg = PPO(0.99,
0.5,
lam=0.95,
v_coef=0.5,
cliprange=0.3,
v_cliprange=0.3,
ent_coef=0.01,
kl_target=0.03,
kl_horizon=3000,
scale_rewards=True)
loss = PolicyLoss(pg, 'PPO',
value_head=ValueHead(256),
v_update_iter=2,
vopt_kwargs={'lr':1e-3})
# -
# ### Reward
#
# Here we pass the reward agent we trained earlier to a callback.
# +
aa_vocab = CharacterVocab(AMINO_ACID_VOCAB)
d_vocab = len(aa_vocab.itos)
d_embedding = 256
d_latent = 512
filters = [256, 512, 1024]
kernel_sizes = [7, 7, 7]
strides = [2,2,2]
dropouts = [0.2, 0.2, 0.2]
mlp_dims = [512, 256, 128]
mlp_drops = [0.2, 0.2, 0.2]
d_out = 1
outrange = [-10, 10]
reward_model = Predictive_CNN(
d_vocab,
d_embedding,
d_latent,
filters,
kernel_sizes,
strides,
dropouts,
mlp_dims,
mlp_drops,
d_out,
outrange
)
r_ds = Text_Prediction_Dataset(['M'], [0.], aa_vocab)
r_agent = PredictiveAgent(reward_model, MSELoss(), r_ds, opt_kwargs={'lr':1e-3})
r_agent.load_weights('untracked_files/virus_predictor.pt')
# r_agent.load_state_dict(model_from_url('virus_predictor.pt')) # optional - load exact weights
reward_model.eval();
freeze(reward_model)
reward_function = Reward(r_agent.predict_data, weight=1)
virus_reward = RewardCallback(reward_function, 'virus')
# -
# ### Optional Reward: Stability Metric
#
# There has been a lot of great work recently looking at large scale transformer language models for unsupervised learning of protein structures. One interesting thing that has emerged is a rough relationship between the protein sequence log probability given by a generative model and the stability of the protein sequence.
#
# We can use the log probability values from a pretrained protein transformer model as a proxy for stability. Including this as a reward function can help keep the generated peptides realistic.
#
# To include this as a reward term, run the code below to install the [ESM](https://github.com/facebookresearch/esm) library to access a pretrained protein transformer model.
#
# In the interest of time, we will use the smallest ESM model with 43M parameters, rather than the large scale 630M parameter model. Note that even with the smaller model, this reward term adds significanly to the training runtime
# +
# Optional: insall ESM
# # ! pip install fair-esm
# -
import esm
protein_model, alphabet = esm.pretrained.esm1_t6_43M_UR50S()
batch_converter = alphabet.get_batch_converter()
class PeptideStability():
def __init__(self, model, alphabet, batch_converter):
self.model = model
to_device(self.model)
self.alphabet = alphabet
self.batch_converter = batch_converter
def __call__(self, samples):
data = [
(f'protein{i}', samples[i]) for i in range(len(samples))
]
batch_labels, batch_strs, batch_tokens = self.batch_converter(data)
with torch.no_grad():
results = self.model(to_device(batch_tokens))
lps = F.log_softmax(results['logits'], -1)
mean_lps = lps.gather(2, to_device(batch_tokens).unsqueeze(-1)).squeeze(-1).mean(-1)
return mean_lps
ps = PeptideStability(protein_model, alphabet, batch_converter)
stability_reward = Reward(ps, weight=0.1, bs=300)
stability_cb = RewardCallback(stability_reward, name='stability')
stability_reward(df.aa.values[:10])
# ### Samplers
#
# We create the following samplers:
# - `sampler1 ModelSampler`: this samples from the main model. This sample will add 1000 compounds to the buffer each buffer build, and sample 40% of each batch on the fly from the main model.
# - `sampler2 ModelSampler`: this samples from the baseline model and is not sampled live on each batch
# - `sampler3 LogSampler`: this samples high scoring samples from the lig
# - `sampler4 TokenSwapSampler`: this uses token swap comibichem to generate new samples from high scoring samples
# - `sampler5 DatasetSampler`: this sprinkles in a small amount of high scoring samples into each buffer build.
# +
gen_bs = 1500
sampler1 = ModelSampler(agent.vocab, agent.model, 'live', 1000, 0., gen_bs)
sampler2 = ModelSampler(agent.vocab, agent.base_model, 'base', 1000, 0., gen_bs)
sampler3 = LogSampler('samples', 'rewards', 10, 98, 200)
sampler4 = TokenSwapSampler('samples', 'rewards', 10, 98, 200, aa_vocab, 0.2)
sampler5 = DatasetSampler(df[(df.beats_wt==1)].aa.values,
'data', buffer_size=6)
samplers = [sampler1, sampler2, sampler3, sampler4, sampler5]
# -
# ### Callbacks
#
# Additional callbacks
# - `SupervisedCB`: runs supervised training on the top 3% of samples every 400 batches
# - `MaxCallback`: prints the max reward for each batch
# - `PercentileCallback`: prints the 90th percentile score each batch
# +
supervised_cb = SupervisedCB(agent, 20, 0.5, 98, 1e-4, 64)
live_max = MaxCallback('rewards', 'live')
live_p90 = PercentileCallback('rewards', 'live', 90)
cbs = [supervised_cb, live_p90, live_max]
# -
# ## Environment and Train
#
# Now we can put together our Environment and run the training process
env = Environment(agent, template_cb, samplers=samplers, rewards=[virus_reward, stability_cb], losses=[loss],
cbs=cbs)
# +
# set_global_pool(min(12, os.cpu_count()))
# -
env.fit(128, 28, 400, 25)
env.log.plot_metrics()
# ## Evaluation
#
# Based on our score function, we determined a sequence with a predicted score of 5.33 or higher would be in the top 1% of sequences relative to the training data. A sequence with a predicted score of 8.63 or higher would beat out all sequences in the dataset
#
# We found 1670 sequences with a predicted score of 5.33 or higher and 13 sequences with a predicted score of 8.63 or higher
env.log.df[(env.log.df.virus>5.33) & ~(env.log.df.sources=='data_buffer')].shape
env.log.df[(env.log.df.virus>8.63) & ~(env.log.df.sources=='data_buffer')].shape
# We can generate logo stack plots to visualize what residues are favored
def plot_logo(seqs):
freqs = []
for i in range(len(seqs[0])):
aas = [j[i] for j in seqs]
counts = Counter(aas)
total = sum(counts.values())
norm_counts = defaultdict(lambda: 0)
for k in counts.keys():
norm_counts[k] = counts[k]/total
freqs.append(norm_counts)
aas = aa_vocab.itos[4:]
dfs = []
for i, f in enumerate(freqs):
df_iter = pd.DataFrame([[f[aa] for aa in aas]], columns=aas)
df_iter['Position'] = i
dfs.append(df_iter)
dfs = pd.concat(dfs)
dfs = dfs.set_index('Position')
return dfs.groupby('Position').mean().plot.bar(stacked=True, figsize=(12,8))
# Here's the logo plot for high scoring sequences in the dataset
plot_logo(df[(df.beats_wt==1) & (df.VAE_virus_S>7)].aa.values)
# Here's the residue plot for high scoring generated sequences
plot_logo(env.log.df[(env.log.df.virus>8.9) & ~(env.log.df.sources=='data_buffer')].samples.values)
# We can see similarities between dataset and generated sequences at many positions, with some significant differences (ie position 15)
#
# So are these residue changes real and meaningful? That depends on the quality of our score function. The only way to know is to test sequences in the lab
| nbs/tutorials/tutorials.proteins.capsid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Description
# The Gaussian density function of $m$-dimensional vectors is:
# $g(x;\mu,C) = {1\over (\sqrt{2\pi})^m |C|^{1/2}} e^{-{1 \over 2} (x-\mu)^TC^{-1}(x-\mu)}$
# where $\mu$ is the distribution mean, $C$ is the covaraince matrix. $|C|$ is the determinant of the matrix $C$.
# The $\mu$ and $C$ can be estimated from the data.
# $\mu = {\sum_{i=1}^n x_i \over m }$,
# $C = {\sum_{i=1}^n (x_i-\mu)(x_i-\mu)^T \over m-1 }$.
# # Discriminant function
# If $g(x;\mu_1,C_1)P(h_1) > g(x;\mu_2,C_2)P(h_2)$, then $x$ is classified as $C_1$.
# Problem: there may be no determinant of matrix $C$.
# Solution: $ (x-\mu_1)^TC_1^{-1}(x-\mu_1) + b < (x-\mu_2)^TC_2^{-1}(x-\mu_2)$, where $b$ is a threshold.
# # Implementation
# +
import numpy as np
import pandas as pd
import scipy
import math
import tool
class NaiveClassifier:
def __init__(self):
pass
def __prior(self):
'''
Calculate the probability for each class.
@information used: self.y, self.n
@ouput:self.priors
'''
self.priors = {}
counts = self.y.value_counts().to_dict()
for k, v in counts.items():
self.priors[k] = v / self.y.size
def __mean_variance(self):
'''
Calculate the mean, variance and so on for each class
'''
self.mean = {}
self.variance = {}
self.determinant = {}
for c in self.y.unique():
idxes = self.y==c
X = self.X[idxes,:]
# mean
mu = np.mean(X,0).reshape((-1,1))
self.mean[c] = mu
# covariance
Xc = X-mu.T
n,m = Xc.shape
# var = np.cov(Xc.T)
var = (Xc.T@Xc)/(n-1)
self.variance[c] = var
# determinant
self.determinant[c] = np.linalg.det(var)
# deal with Singular matrix
if np.linalg.det(var) <= 0:
# tool.printred('nonpositive determinant!!! ' + str(np.linalg.det(var)))
rank = np.linalg.matrix_rank(var)
D, V = tool.EVD(var)
D = D[:rank]
determinant = 1
for d in D:
determinant = determinant*d
self.determinant[c] = determinant
def __calculate_Gaussian_probability(self, x, c):
'''
x: the test data point
c: class
'''
u = self.mean[c]
C = self.variance[c]
determinant = self.determinant[c]
x = x.reshape((-1,1))
m = x.shape[0]
part1 = ((math.sqrt(2*math.pi))**m)*(determinant**0.5)
if part1 != 0:
part1 = 1/part1 # pay attention
md = (x-u).T@np.linalg.pinv(C)@(x-u)
part2 = (-1/2)*md
part2 = math.e**part2
return (part1*part2)[0,0]
def fit(self, X, y):
self.X = X
self.y = pd.Series(y)
self.n = X.shape[0]
self.__prior()
self.__mean_variance()
def predict(self, X_test):
n, m = X_test.shape
y_pre = []
for i in range(n):
x_i = X_test[i,:].reshape((-1,1))
P = {}
for c in self.y.unique():
p = self.__calculate_Gaussian_probability(x_i, c)
p = p*self.priors[c]
P[c] = p
P = tool.normalizeDict(P)
y_pre.append(tool.argmaxDict(P))
return y_pre
def predict_proba(self, X_test):
n, m = X_test.shape
y_pre = []
for i in range(n):
x_i = X_test[i,:].reshape((-1,1))
P = {}
for c in self.y.unique():
p = self.__calculate_Gaussian_probability(x_i, c)
p = p*self.priors[c]
P[c] = p
P = tool.normalizeDict(P)
return list(tool.sortDictbyKey(P).values())
# +
import numpy as np
import pandas as pd
import scipy
from sklearn.model_selection import KFold
import tool
import data
# read data
dataset_location = "Iris.csv"
X, y= data.read_csv(dataset_location, shuffle=False)
n, m = X.shape
print(X.shape)
k = 1 # reduced dimension
f = n # LEAVE ONE OUT
seed = -1
# split
if seed < 0:
kf = KFold(n_splits = f, shuffle = True)
else:
kf = KFold(n_splits = f, random_state = seed, shuffle = True)
idxesLists = kf.split(X)
splits = []
for trainidx, testindx in idxesLists:
splits.append((trainidx, testindx))
DEBUG = True
if DEBUG:
accs_imp = 0
accs_imp_reduce = 0
for trainidx, testindx in splits:
X_train = X[trainidx,:]
y_train = y[trainidx]
X_test = X[testindx,:]
y_test = y[testindx]
Xt_train = X_train.T
Xt_test = X_test.T
#1.preprocessing
# remove mean
mean = np.mean(Xt_train,1).reshape(m,-1)
Xt_train = Xt_train - mean
Xt_test = Xt_test - mean
X_train = Xt_train.T
X_test = Xt_test.T
# PCA: dimension reduction
D, V = tool.EVD(Xt_train@Xt_train.T)
V = V[:,:k]
Wt_train = V.T@Xt_train
W_train = Wt_train.T
Wt_test = V.T@Xt_test
W_test = Wt_test.T
#2. TEST
# my implementation: without PCA
clf = NaiveClassifier()
clf.fit(X_train, y_train)
y_pre = clf.predict(X_test)
diff = y_pre - y_test
acc = 1 - np.count_nonzero(diff)/len(y_test)
accs_imp += acc
# my implementation: with PCA
clf = NaiveClassifier()
clf.fit(W_train, y_train)
y_pre = clf.predict(W_test)
diff = y_pre - y_test
acc = 1 - np.count_nonzero(diff)/len(y_test)
accs_imp_reduce += acc
print('accs_imp :',accs_imp/f)
print('accs_imp pca :',accs_imp_reduce/f)
# -
| _posts/Gaussian_Naive_Bayesian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## About Time Series Forecasting
# In this chapter we introduce and define the problem of forecasting time series, discuss how to properly evaluate the performance of a forecasting model, and get to know some metrics to quantify the performance.
# ## Preamble
# %matplotlib inline
import pandas
import seaborn
import matplotlib
import matplotlib.pyplot as plt
import numpy
import forecast_lab
seaborn.set_style("ticks")
matplotlib.pyplot.rcParams["axes.grid"] = True
matplotlib.pyplot.rcParams["figure.figsize"] = (20, 3)
# ## For Example: Forecasting Taxi Demand
# Consider for example the problem of forecasting demand for taxi rides in a city. Based on [a public dataset from the City of Chicago](https://catalog.data.gov/dataset/taxi-trips), we have extracted a time series of the the number of taxi trips per day. This time series has several realistic properties, such as seasonal (e.g. weekly) patterns and a non-trivial trend.
taxi_trips = forecast_lab.datasets.read_chicago_taxi_trips_daily()
taxi_trips.head()
taxi_trips.plot()
# In the following, we are going to have a look at various statistical modelling and machine learning techniques that can be applied to forecasting a time series like this.
# ## Forecasting Concepts
# **A Forecast's Horizon**
#
# An important question to ask before building a forecast model: How far into the future do we need to look? The number of steps to forecast is called the forecast's **horizon**.
#
#
# A true forecasting model has the ability ability to **predict a time series $h$ steps ahead**, for the desired number of steps $h$. (This is a more difficult task than **one-step-ahead** prediction - given the previous $k$ points of the time series, predict the next value, which can be solved by supervised ML methods we already know at this point.)
#
# **Recursive and Direct Forecasting**
#
#
# We need to distinguish two different approaches to forecasting this:
# - **recursive forecasting**: The model has the ability to predict one step ahead - now we apply it recursively on its own predictions to forecast $h$ steps ahead.
# - **direct forecasting**: The model can directly predict the next $h$ steps without recursively using its own predictions as input.
#
#
# ## Evaluating Forecasting Models
# How good is the performance of my forecasting model? How do I set up an evaluation in order to produce a model that works in practice? This section introduces:
#
# - **error metrics for time series forecasts**
# - **splitting into training and test data**
# ### Metrics
# In general, a performance/error metric is a function $M$ that takes actual values of the time series $y$ and the coresponding forecasted values $\hat{y}$.
#
# $$M(y, \hat{y}) = \dots$$
# Measuring the error in time series forecasting is in many ways similar to how we do it with classical **regression** problems, so let's revisit the error metrics discussed in [📓 Machine Learning with Python: About Regression](../ml/ml-regression-intro.ipynb).
#
# - **Mean Absolute Error (MAE)**
# - **Root Mean Squared Error (RMSE)**
# - **$R^2$ score**
# - **Mean Absolute Percentage Error (MAPE)**
#
# ### Business Case-Specific Metrics
#
# While the general error metrics above are widely applicable, a metric specific to your business case for forecasting is even more appropriate and interpretable. It pays to spend some time on designing an appropriate performance metric - perhaps the error can be numbered in monetary terms, connected to an important KPI, etc.
#
# **Exercise: Pick a real-life forecasting problem, then brainstorm and discuss specific metrics that could be relevant!**
# ### Splitting the Data for Evaluation
# At this point we assume you already know about evaluation strategies like **train-test-split** and **cross-validation** and why they are necessary. You can read up on this in [📓 Machine Learning with Python: About Classification](../ml/ml-classification-intro.ipynb).
#
# When dealing with time series, we have to approach things somewhat differently: Here, randomly shuffling and splitting of the data points does not make sense. Rather, we want to use a past segment of the time series to predict a future segment.
#
# How large should these segments be? This is very much dependent on our application and use case. How far into the future do we need to look to make good decisions for our business case? What is more important - short-term or long-term accuracy?
# #### A Helper Class for Forecast Evaluation
# We have prepared some code to make evaluation of forecast performance more convenient: The `ForecastEvaluation` implements a couple of training and evaluation strategies. Here, we use it to:
#
# 1. Perform evaluation similar to _cross-validation_: Split the time series randomly into a training and adjacent test segment of given sizes. Fit the model to the training segment and forecast the test segment.
# 2. Evaluate the performance of the forecast through the given metrics.
# 3. Plot the forecast and diagnostic information.
metrics = {
"RMSE": forecast_lab.metrics.root_mean_squared_error,
"MAPE": forecast_lab.metrics.mean_absolute_percentage_error
}
import statsmodels.api as sm
# Our `mlts` module also provides a few wrapper classes so that we can evaluate forecasting with different approaches and libraries. For example, the `StatsmodelsWrapper` is used to package a `statsmodels`-style time series model. Note the parameters:
#
# - `estimator_params`: supplied to the constructor of the `estimator_class`
# - `fit_params`: supplied to the `fit` method call
forecast_lab.ForecastEvaluation(
ts=taxi_trips["Trips"],
metrics=metrics,
forecasting=forecast_lab.StatsmodelsWrapper(
estimator_class=sm.tsa.ARIMA,
estimator_params={
"order": (4,1,2)
},
fit_params={
"max_iter": 10
},
),
train_window_size=365,
test_window_size=100,
).evaluate(
k=2,
plot_segments=True,
plot_residuals=True,
).get_metrics().mean()
# ## Dummy Models
# When spending time on a sophisticated forecast using statistical modelling and machine learning, there is a good question that we should have an answer to: How does it compare to simple, "trivial" forecasts? We should be able to get significantly better performance to justify the engineering that goes into any ML-based solution.
#
# Here are implementations a few **dummy models** that are really simple, but can be surprisingly hard to beat with more advanced techniques.
# +
# forecast_lab.dummy.MeanForecast??
# +
# forecast_lab.dummy.LinearForecast??
# -
forecast_lab.ForecastEvaluation(
ts=taxi_trips["Trips"],
metrics=metrics,
forecasting=forecast_lab.dummy.MeanForecast(),
train_window_size=365,
test_window_size=100,
).evaluate(
k=2,
plot_segments=True,
plot_residuals=True
).get_metrics()
forecast_lab.ForecastEvaluation(
ts=taxi_trips["Trips"],
metrics=metrics,
forecasting=forecast_lab.dummy.LinearForecast(),
train_window_size=365,
test_window_size=100,
).evaluate(
k=2,
plot_segments=True,
plot_residuals=True
).get_metrics().mean()
# ## References
#
# - [Forecasting - Metrics for Time Series Forecasts](https://www.edscave.com/forecasting---time-series-metrics.html)
# - [Recursive and Direct Forecasting](https://stats.stackexchange.com/questions/346714/forecasting-several-periods-with-machine-learning)
# - [Simple Forecast Methods](https://otexts.com/fpp2/simple-methods.html)
# ---
# _This notebook is licensed under a [Creative Commons Attribution 4.0 International License (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/). Copyright © 2019 [Point 8 GmbH](https://point-8.de)_
#
#
| notebooks/forecasting-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="XHbXDIxe4zJ9"
# # Importing libraries
#
# + id="On2CN5d34zJ-" executionInfo={"status": "ok", "timestamp": 1622355487443, "user_tz": -330, "elapsed": 351, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
# + [markdown] id="leDfhYqz4zJ-"
# # Reading dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="jF8rj6ug4zJ_" executionInfo={"status": "ok", "timestamp": 1622355236375, "user_tz": -330, "elapsed": 603, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="ce8cdf77-22dd-4307-c656-2f46339825c3"
df=pd.read_csv("covid_symptoms.csv")
df
# + colab={"base_uri": "https://localhost:8080/", "height": 669} id="kvxawba64zJ_" executionInfo={"status": "ok", "timestamp": 1622355236376, "user_tz": -330, "elapsed": 16, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="19e8f31b-61e4-4eeb-81ff-c2752a73401b"
df.head(20)
# + [markdown] id="P0BmrvLk4zKA"
# # Data Preprocessing
# + [markdown] id="8pcfIdg04zKA"
# ### Removing null values
#
# + colab={"base_uri": "https://localhost:8080/"} id="779U1_FE4zKB" executionInfo={"status": "ok", "timestamp": 1622355236376, "user_tz": -330, "elapsed": 15, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="9b609ee9-5d66-4641-8ea9-8f32e890d381"
df.isnull().sum()
# + id="irjV5zpP4zKB" executionInfo={"status": "ok", "timestamp": 1622355236377, "user_tz": -330, "elapsed": 12, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
df=df.dropna()
# + colab={"base_uri": "https://localhost:8080/"} id="j8V5ruWd4zKB" executionInfo={"status": "ok", "timestamp": 1622355236378, "user_tz": -330, "elapsed": 13, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="adddd3fa-c12e-4744-da15-5ff338b1b645"
df.isnull().sum()
# + [markdown] id="oErB7LZ34zKC"
# #### There are no null values
# + colab={"base_uri": "https://localhost:8080/"} id="oWBDmIxI4zKC" executionInfo={"status": "ok", "timestamp": 1622355236380, "user_tz": -330, "elapsed": 12, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="db0e7ac9-07ac-435d-b750-74a0000d0d16"
np.shape(df)
# + [markdown] id="7g5Luy7S4zKC"
# ### Replacing categorical variables with numbers
# + id="SKQS0ZkM4zKD" executionInfo={"status": "ok", "timestamp": 1622355236381, "user_tz": -330, "elapsed": 11, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
df=df.replace(to_replace="No", value=0.0)
df=df.replace(to_replace="Yes", value=1.0)
# + id="gHTTdcgE4zKD" executionInfo={"status": "ok", "timestamp": 1622355237251, "user_tz": -330, "elapsed": 14, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
df=df.replace(to_replace="Male", value=0.0)
df=df.replace(to_replace="Female",value=1.0)
# + id="QPpZ5BJK4zKD" executionInfo={"status": "ok", "timestamp": 1622355237251, "user_tz": -330, "elapsed": 14, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
df=df.replace(to_replace="Other", value=1.0)
df=df.replace(to_replace="Abroad", value=2.0)
df=df.replace(to_replace="Contact with confirmed",value=3.0)
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="ZF9gTs_14zKD" executionInfo={"status": "ok", "timestamp": 1622355237252, "user_tz": -330, "elapsed": 14, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="c585806b-1e8d-432b-a894-e2d34f48475c"
df.head(10)
# + [markdown] id="BLPlSbTu4zKE"
# # Distribution of positive and negative results
# + colab={"base_uri": "https://localhost:8080/", "height": 334} id="zXzlre7C4zKE" executionInfo={"status": "ok", "timestamp": 1622355237253, "user_tz": -330, "elapsed": 14, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="79a92e8e-b46f-4e87-a9db-04ace74dba2d"
sn.countplot(df["corona_result"])
plt.show()
# + [markdown] id="8FxXF8Eq4zKE"
# # Checking for relationship among variables
#
# + colab={"base_uri": "https://localhost:8080/", "height": 780} id="zVQjlCUT4zKF" executionInfo={"status": "ok", "timestamp": 1622355238048, "user_tz": -330, "elapsed": 806, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="f341908e-8c7d-4982-a51e-7255e8e08f78"
plt.figure(figsize=(9,6))
c=df.corr()
sn.heatmap(c,xticklabels=c.columns,yticklabels=c.columns,annot=True)
c
# + [markdown] id="QGN07vw_4zKF"
# # Train-Test split
# + colab={"base_uri": "https://localhost:8080/"} id="VxsHHf0a4zKF" executionInfo={"status": "ok", "timestamp": 1622355238049, "user_tz": -330, "elapsed": 12, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="9878c05c-6cb8-492d-9ac8-ed519a0bf84e"
df.columns
# + id="HXRBTO5t4zKF" executionInfo={"status": "ok", "timestamp": 1622355238050, "user_tz": -330, "elapsed": 10, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
X=df[['cough', 'fever', 'sore_throat', 'shortness_of_breath', 'head_ache',
'age_60_and_above', 'gender', 'test_indication']]
# + id="6MXJKRQi4zKG" executionInfo={"status": "ok", "timestamp": 1622355238050, "user_tz": -330, "elapsed": 10, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
y=df['corona_result']
# + id="FNwTPqRk4zKG" executionInfo={"status": "ok", "timestamp": 1622355238051, "user_tz": -330, "elapsed": 10, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
X_train, X_test, y_train, y_test = train_test_split(X,y)
# + [markdown] id="WuItZpOk6vo_"
# # Naive Bayes
# + colab={"base_uri": "https://localhost:8080/"} id="AXFkgx7r6PyY" executionInfo={"status": "ok", "timestamp": 1622355316594, "user_tz": -330, "elapsed": 563, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="68cf89bc-c5d3-4e47-b047-6e0b530e0f7b"
naives_model = GaussianNB()
naives_model.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="OXZwWyMx6bqZ" executionInfo={"status": "ok", "timestamp": 1622355333284, "user_tz": -330, "elapsed": 360, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="5594ea33-812b-4871-9852-48cc58feb436"
Y_hat = naives_model.predict(X_test)
Y_hat
# + colab={"base_uri": "https://localhost:8080/"} id="3tS07Efr6dmR" executionInfo={"status": "ok", "timestamp": 1622355347192, "user_tz": -330, "elapsed": 384, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="146428c2-ae78-4435-8336-409691f5dcd5"
model_acc = accuracy_score(y_test, Y_hat)
print('The accuracy of our naive model is: %0.2f'% model_acc)
# + [markdown] id="FcgP1phx61aQ"
# # Decision Tree
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="6OJSxmdJ64-w" executionInfo={"status": "ok", "timestamp": 1622355506482, "user_tz": -330, "elapsed": 2192, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="0a0abdfd-f7d5-4fc6-d7b5-80ae08a0573a"
training_accuracy = []
test_accuracy = []
max_dep = range(1,15)
for md in max_dep:
tree = DecisionTreeClassifier(max_depth=md,random_state=0)
tree.fit(X_train,y_train)
training_accuracy.append(tree.score(X_train, y_train))
test_accuracy.append(tree.score(X_test, y_test))
plt.plot(max_dep,training_accuracy, label='Accuracy of the training set')
plt.ylabel('Accuracy')
plt.xlabel('Max Depth')
plt.legend()
# + colab={"base_uri": "https://localhost:8080/"} id="26SrWI8d7In5" executionInfo={"status": "ok", "timestamp": 1622355596834, "user_tz": -330, "elapsed": 655, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="94ef8560-049a-4d6e-cca5-89e30dfb66f7"
tree = DecisionTreeClassifier(max_depth=3,random_state=0)
tree.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="3pr2kRuR7MjQ" executionInfo={"status": "ok", "timestamp": 1622355598458, "user_tz": -330, "elapsed": 4, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="5d9291cc-b046-42f2-b5e7-f4f85bea13a2"
tree.score(X_test, y_test)
# + [markdown] id="NSCQj4v94zKJ"
# # Gradient Boosting
# + id="Ht3Zq5D34zKK" executionInfo={"status": "ok", "timestamp": 1622355238051, "user_tz": -330, "elapsed": 9, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
from sklearn.ensemble import GradientBoostingClassifier
clf=GradientBoostingClassifier()
# + colab={"base_uri": "https://localhost:8080/"} id="zkj6pGmr4zKK" executionInfo={"status": "ok", "timestamp": 1622355244819, "user_tz": -330, "elapsed": 6776, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="5e9ed642-15f7-447a-fd29-9e0b38acb02d"
clf.fit(X_train,y_train)
# + id="3Jgr5TQk4zKO" executionInfo={"status": "ok", "timestamp": 1622355244820, "user_tz": -330, "elapsed": 33, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
pred_gb=clf.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="oEw-Uo8I4zKO" executionInfo={"status": "ok", "timestamp": 1622355244822, "user_tz": -330, "elapsed": 31, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="ebd1330e-877a-47b2-c28f-ef8724027a76"
from sklearn.metrics import accuracy_score
accuracy_score(y_test,pred_gb)
# + colab={"base_uri": "https://localhost:8080/"} id="ttMEwbX44zKO" executionInfo={"status": "ok", "timestamp": 1622355244823, "user_tz": -330, "elapsed": 27, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="62892ed3-207d-4c1f-ff39-3802b536ff61"
confusion_matrix(y_test,pred_gb)
# + colab={"base_uri": "https://localhost:8080/"} id="VjuQHOqI4zKP" executionInfo={"status": "ok", "timestamp": 1622355245447, "user_tz": -330, "elapsed": 645, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="922188e8-df1c-4faf-ddef-eb29cf465544"
print(classification_report(y_test,pred_gb))
# + [markdown] id="Dz98Px5m4zKP"
# # RandomForest Classifier
# + id="6HaKlw844zKP" executionInfo={"status": "ok", "timestamp": 1622355245448, "user_tz": -330, "elapsed": 12, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(max_depth=2)
# + colab={"base_uri": "https://localhost:8080/"} id="NqxX_SRf4zKP" executionInfo={"status": "ok", "timestamp": 1622355247941, "user_tz": -330, "elapsed": 2503, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="cc82b707-7df7-4a68-ba7b-4b3f7599b27e"
rfc.fit(X_train,y_train)
# + id="2JixeZro4zKQ" executionInfo={"status": "ok", "timestamp": 1622355247943, "user_tz": -330, "elapsed": 15, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}}
pred_rfc=rfc.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="Iooy6pRw4zKQ" executionInfo={"status": "ok", "timestamp": 1622355247943, "user_tz": -330, "elapsed": 13, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="8be124ab-73a3-4cc5-d9d6-26b1642fac23"
accuracy_score(y_test,pred_rfc)
# + colab={"base_uri": "https://localhost:8080/"} id="L876dVxA4zKQ" executionInfo={"status": "ok", "timestamp": 1622355248791, "user_tz": -330, "elapsed": 860, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="b9a08d05-4f7f-4e3d-fa46-d54435800428"
confusion_matrix(y_test,pred_rfc)
# + colab={"base_uri": "https://localhost:8080/"} id="IOamL5LK4zKQ" executionInfo={"status": "ok", "timestamp": 1622355248791, "user_tz": -330, "elapsed": 12, "user": {"displayName": "Darshan", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiyJkiZzhve3Wsd2G6-G6RntYh6b_SlfCkxDPvzyg=s64", "userId": "06818158645734253554"}} outputId="66a5fd59-14ed-4a88-861d-325024e763f4"
print(classification_report(y_test,pred_rfc))
# + [markdown] id="00J_C3h04zKR"
# # Accuracy Results
# + [markdown] id="GVI2U9Uu4zKR"
# <b>
# <li>Naive Bayes- 83%
# <li>Decision Tree- 84.3%
# <li>Gradient Boosting- 86.27%
# <li>RandomForest Classifier- 84.58%
#
# + [markdown] id="lsTmmxlf4zKR"
# ### Gradient boosting gives the best results
| COVID Symptoms/COVIDsymptoms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
print(np.__version__)
x = [2, 3, 4, 5, 6]
nums = np.array([2, 3, 4, 5, 6])
type(nums)
#1D array
arr = np.arange(10)
arr
#> array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#2d array
b = np.arange(12).reshape(4,3)
print(b)
#3darray
c = np.arange(24).reshape(2,3,4)
print(c)
# +
# more than one dimensions
a = np.array([[1, 2], [3, 4]])
print (a)
# +
# minimum dimensions
a = np.array([1, 2, 3,4,5], ndmin = 2)
print (a)
# +
# dtype parameter
a = np.array([1, 2, 3], dtype = complex)
print (a)
# -
#boolen array
bool_arr = np.array([1, 0.5, 0, None, 'a', '', True, False], dtype=bool)
print(bool_arr)
#ndim
a = np.array([(1,2,3),(4,5,6)])
print(a.ndim)
#itemsize
a = np.array([(1,2,3)])
print(a.itemsize)
#dtype
a = np.array([(1,2,3)])
print(a.dtype)
a = np.array([(1,2,3,4,5,6)])
print(a.size)
print(a.shape)
#slicing
a=np.array([(1,2,3,4),(3,4,5,6)])
print(a[0:,2])
a=np.array([(8,9),(10,11),(12,13)])
print(a[0:2,1])
a=np.linspace(1,3,10)
print(a)
a= np.array([1,2,3])
print(a.min())
a= np.array([1,2,3])
print(a.max())
a= np.array([1,2,3])
print(a.sum())
a=np.array([(1,2,3),(3,4,5,)])
print(np.sqrt(a))
x= np.array([(1,2,3),(3,4,5)])
y= np.array([(1,2,3),(3,4,5)])
print(x-y)
x= np.array([(1,2,3),(3,4,5)])
y= np.array([(1,2,3),(3,4,5)])
print(x*y)
x= np.array([(1,2,3),(3,4,5)])
y= np.array([(1,2,3),(3,4,5)])
print(x/y)
x= np.array([(1,2,3),(3,4,5)])
y= np.array([(1,2,3),(3,4,5)])
print(np.vstack((x,y)))
x= np.array([(1,2,3),(3,4,5)])
y= np.array([(1,2,3),(3,4,5)])
print(np.hstack((x,y)))
x= np.array([(1,2,3),(3,4,5)])
print(x.ravel())
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1]
#zero numpy array
array1d = np.zeros(3)
print(array1d)
array2d = np.zeros((2, 4))
print(array2d)
#logspace array
thearray = np.logspace(5, 10, num=10, base=10000000.0, dtype=float)
print(thearray)
#generate random number array
print(np.random.rand(3, 2))#uniformly distributed
print(np.random.randn(3, 2))#normally distributed value
# Uniformly distributed integers in a given range.
print(np.random.randint(2, size=10))
print(np.random.randint(5, size=(2, 4)))
# identity and digonal array
print(np.identity(3))
print(np.diag(np.arange(0, 8, 2)))
print(np.diag(np.diag(np.arange(9).reshape((3,3)))))
#numpy indexing example
array1d = np.array([1, 2, 3, 4, 5, 6])
print(array1d[0]) # Get first value
print(array1d[-1]) # Get last value
print(array1d[3]) # Get 4th value from first
print(array1d[-5]) # Get 5th value from last
# Get multiple values
print(array1d[[0, -1]])
# +
#numpy indexing in multidimensional array
array3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(array3d)
print(array3d[0, 0, 0])
print(array3d[0, 0, 1])
print(array3d[0, 0, 2])
# +
#joining and stacking of numpy array
array1 = np.array([[1, 2, 3], [4, 5, 6]])
array2 = np.array([[7, 8, 9], [10, 11, 12]])
# Stack arrays in sequence horizontally (column wise).
arrayH = np.hstack((array1, array2))
print(arrayH)
# -
# Stack arrays in sequence vertically (row wise).
arrayV = np.vstack((array1, array2))
print(arrayV)
# Appending arrays after each other, along a given axis.
arrayC = np.concatenate((array1, array2))
print(arrayC)
# Append values to the end of an array.
arrayA = np.append(array1, array2, axis=0)
print(arrayA)
arrayA = np.append(array1, array2, axis=1)
print(arrayA)
# +
#numpy elementry matha metical function
array1 = np.array([[10, 20, 30], [40, 50, 60]])
print(np.sin(array1))
# -
print(np.cos(array1))
print(np.tan(array1))
print(np.sqrt(array1))
print(np.exp(array1))
print(np.log10(array1))
#element wise mathematicialoperation
array1 = np.array([[10, 20, 30], [40, 50, 60]])
array2 = np.array([[2, 3, 4], [4, 6, 8]])
array3 = np.array([[-2, 3.5, -4], [4.05, -6, 8]])
print(np.add(array1, array2))
print(np.power(array1, array2))
print(np.remainder((array2), 5))
print(np.reciprocal(array3))
print(np.sign(array3))
# +
print(np.ceil(array3))
# -
print(np.round(array3))
# +
#numpy aggregate and staticial function
array1 = np.array([[10, 20, 30], [40, 50, 60]])
print("Mean: ", np.mean(array1))
# -
print("Std: ", np.std(array1))
print("Var: ", np.var(array1))
print("Sum: ", np.sum(array1))
print("Prod: ", np.prod(array1))
| NumpyAssigment1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Learn golang
#
# https://golang.org/doc/tutorial/getting-started
#
# ```brew install golang```
#
# ls
# cd code
# ls
# !go run .
# !go help
# !go mod init hello
# !go run .
# ### Create a project directory for your project and initialize the project:
# ls
# cd ..
# ls
# mkdir memcached-operator
# cd memcached-operator/
# !operator-sdk init --domain example.com --repo github.com/example/memcached-operator
# ### Create a simple Memcached API:
# !operator-sdk create api --group cache --version v1alpha1 --kind Memcached --resource --controller
# ### Use the built-in Makefile targets to build and push your operator. Make sure to define IMG when you call make:
# !export USERNAME=<quay-namespace>
# !export OPERATOR_IMG="quay.io/$USERNAME/memcached-operator:v0.0.1"
# !make docker-build docker-push IMG=$OPERATOR_IMG
# ### OLM deployment
#
# Install OLM
# !operator-sdk olm install
# ls
| golang.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training Deep Neural Networks
#
# In the last labnote, we introduced neural networks and trained them. But it was a very shallow DNN, with only two hidden layers. What if we need to tackle a very complex problem, such as detecting hundreds of types of objects in high-resolution images? We need to traina much depper DNN, perhaps with 10 layers, each containing hundreds of neurons, connected by hundreds of thousands of connections. This would not be a walk in the park, here is the problem:
#
# - **Vanishing Gradients** (exploding gradietns) problem that affects deep neural networks and makes lower layers very hard to train.
# - With such a large network, **training would be extremely slow**.
# - A model with millions of parameters would severely risk **overfitting the training set**.
#
# In the following section, we will go through each of these problem in turn and present techniques to solve them.
# +
# Set up some basisc fucntion
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "./image/"
CHAPTER_ID = "deep"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# -
# # Vanishing/Exploding Gradient Problems
#
# The backpropagation algorithm (BP) works by going from the output layer to the input layer, propagating the error gradient on the way. Once the algorithm has computed the gradient of the cost with regards to each parameter in the networks, it uses those gradients to update each parameter with **Gradient Descent** step.
#
# - Unfortunately, gradients often get smaller and smaller as the algorithm progresses down to the lower layers. As a result, the Gradient Descent update leaves the lower layer connection weights virtually unchanged, and training never converges to a good solution. This is called the vanishing gradients problem.
# - In some cases, the opposite can happen: the gradients can grow bigger and bigger, so many layers get insanely large weight updates and the algorithm diverges. This is the exploding gradients problem, which is mostly encountered in recurrent neural networks.
#
# More generally, deep neural networks suffer from unstable gradients; different layers may learn at widely different speeds.
#
# A paper titled “Understanding the Difficulty of Training Deep Feedforward
# Neural Networks” by <NAME> and <NAME>1 found a few suspects, including the combination of the popular logistic sigmoid activation function and the weight initialization technique that was most popular at the time, namely random initialization using a normal distribution with a mean of 0 and a standard deviation of 1.
#
# In short, they showed that with this activation function and this initialization scheme, the variance of the outputs of each layer is much greater than the variance of its inputs.
#
# Going forward in the network, the variance keeps increasing after each layer until the activation function saturates at the top layers.
#
# This is actually made worse by the fact that the logistic function has a mean of 0.5, not 0 (the hyperbolic tangent function has a mean of 0 and behaves slightly better than the logistic function in deep networks).
def logit(z):
return 1 / (1 + np.exp(-z))
# +
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
#save_fig("sigmoid_saturation_plot")
plt.show()
# -
# you can see that when inputs become large (negative or positive), the function saturates at 0 or 1, with a derivative extremely close to 0. Thus when backpropagation kicks in, it has virtually no gradient to propagate back through the network, and what little gradient exists keeps getting diluted as ackpropagation progresses down through the top layers, so there is really nothing left for the lower layers.
# ## Xavier and He Initialization
#
# Note: the book uses `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function. The main differences relevant to this chapter are:
# * several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.
# * the default `activation` is now `None` rather than `tf.nn.relu`.
# * it does not support `tensorflow.contrib.framework.arg_scope()` (introduced later in chapter 11).
# * it does not support regularizer params (introduced later in chapter 11).
#
# +
import tensorflow as tf
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
# -
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
he_init = tf.contrib.layers.variance_scaling_initializer()
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
kernel_initializer=he_init, name="hidden1")
# ## Nonsaturating Activation Functions
# One of the insights in the 2010 paper by Glorot and Bengio was that the vanishing/exploding gradients problems were in part due to a poor choice of activation function.
#
# Until then most people had assumed that if Mother Nature had chosen to use
# roughly sigmoid activation functions in biological neurons, they must be an excellent choice. But it turns out that other activation functions behave much better in deep neural networks, in particular the ReLU activation function, mostly because it does not saturate for positive values (and also because it is quite fast to compute).
#
# Unfortunately, the ReLU activation function is not perfect. It suffers from a problem known as the dying ReLUs.
#
# To solve this problem, you may want to use a variant of the ReLU function, such as the leaky ReLU.
#
# ### Leaky ReLU
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
#save_fig("leaky_relu_plot")
plt.show()
# Implementing Leaky ReLU in TensorFlow:
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
def leaky_relu(z, name=None):
return tf.maximum(0.01 * z, z, name=name)
hidden1 = tf.layers.dense(X, n_hidden1, activation=leaky_relu, name="hidden1")
# -
# Let's train a neural network on MNIST using the Leaky ReLU. First let's create the graph:
# +
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=leaky_relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=leaky_relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./tmp/data/")
n_epochs = 40
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if epoch % 5 == 0:
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: mnist.validation.images, y: mnist.validation.labels})
print(epoch, "Batch accuracy:", acc_train, "Validation accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
# -
# #### ELU activation function
#
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
# +
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
#save_fig("elu_plot")
plt.show()
# -
# It looks a lot like the ReLU function, with a few major differences:
#
# - First it takes on negative values when z < 0, which allows the unit to have an average output closer to 0. This helps alleviate the vanishing gradients problem, as discussed earlier.
# - It has a nonzero gradient for z < 0, which avoids the dying units issue.
# - Third, the function is smooth everywhere, including around z = 0, which helps speed up Gradient Descent, since it does not bounce as much left and right of z = 0.
#
# The main drawback of the ELU activation function is that it is slower to compute
# than the ReLU and its variants (due to the use of the exponential function), but during training this is compensated by the faster convergence rate.
#
# So which activation function should you use for the hidden layers of your deep neural networks? Although your mileage will vary, in general
#
# **ELU > leaky ReLU (and its variants) > ReLU > tanh > logistic**.
#
# If you care a lot about runtime performance, then you may prefer leaky ReLUs over ELUs. If you don’t want to tweak yet another hyperparameter, you may just use the default α values suggested earlier (0.01 for the leaky ReLU, and 1 for ELU).
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, name="hidden1")
# -
# ### SELU
#
# This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by <NAME>, <NAME> and <NAME>, published in June 2017.
# It outperforms the other activation functions very significantly for deep neural networks, so you should really try it out.
# +
def selu(z,
scale=1.0507009873554804934193349852946,
alpha=1.6732632423543772848170429916717):
return scale * elu(z, alpha)
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
#save_fig("selu_plot")
plt.show()
# -
# With this activation function, even a 100 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
np.random.seed(42)
Z = np.random.normal(size=(500, 100))
for layer in range(100):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1/100))
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=1)
stds = np.std(Z, axis=1)
if layer % 10 == 0:
print("Layer {}: {:.2f} < mean < {:.2f}, {:.2f} < std deviation < {:.2f}".format(
layer, means.min(), means.max(), stds.min(), stds.max()))
# Here's a TensorFlow implementation (there will almost certainly be a `tf.nn.selu()` function in future TensorFlow versions):
def selu(z,
scale=1.0507009873554804934193349852946,
alpha=1.6732632423543772848170429916717):
return scale * tf.where(z >= 0.0, z, alpha * tf.nn.elu(z))
# SELUs can also be combined with dropout, check out [this implementation](https://github.com/bioinf-jku/SNNs/blob/master/selu.py) by the Institute of Bioinformatics, Johannes Kepler University Linz.
# +
# Let's create a neural net for MNIST using the SELU activation function:
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=selu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=selu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
# -
# Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
# +
means = mnist.train.images.mean(axis=0, keepdims=True)
stds = mnist.train.images.std(axis=0, keepdims=True) + 1e-10
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch_scaled = (X_batch - means) / stds
sess.run(training_op, feed_dict={X: X_batch_scaled, y: y_batch})
if epoch % 5 == 0:
acc_train = accuracy.eval(feed_dict={X: X_batch_scaled, y: y_batch})
X_val_scaled = (mnist.validation.images - means) / stds
acc_test = accuracy.eval(feed_dict={X: X_val_scaled, y: mnist.validation.labels})
print(epoch, "Batch accuracy:", acc_train, "Validation accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final_selu.ckpt")
# -
# ## Batch Normalization
# Although using He initialization along with ELU (or any variant of ReLU) can significantly reduce the vanishing/exploding gradients problems at the beginning of training, it doesn’t guarantee that they won’t come back during training.
#
# In a 2015 paper,7 <NAME> and <NAME> proposed a technique called
# **Batch Normalization (BN)** to address the vanishing/exploding gradients problems, and more generally the problem that the distribution of each layer’s inputs changes during training, as the parameters of the previous layers change (which they call the Internal Covariate Shift problem).
#
#
# The technique consists of adding an operation in the model just before the activation function of each layer, simply zero-centering and normalizing the inputs, then scaling and shifting the result using two new parameters per layer (one for scaling, the other for shifting). In other words, this operation lets the model learn the optimal scale and mean of the inputs for each layer.
#
#
# In order to zero-center and normalize the inputs, the algorithm needs to estimate the inputs’ mean and standard deviation. It does so by evaluating the mean and standard deviation of the inputs over the current mini-batch (hence the name “Batch Normalization”).
#
# At test time, there is no mini-batch to compute the empirical mean and standard
# deviation, so instead you simply use the whole training set’s mean and standard deviation.
# These are typically efficiently computed during training using a moving average.
#
#
# ** Batch Normalization does, however, add some complexity to the model (although it removes the need for normalizing the input data since the first hidden layer will take care of that, provided it is batch-normalized).**
# +
reset_graph()
import tensorflow as tf
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name='training')
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = tf.layers.batch_normalization(hidden1, training=training, momentum=0.9)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = tf.layers.batch_normalization(hidden2, training=training, momentum=0.9)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = tf.layers.batch_normalization(logits_before_bn, training=training,
momentum=0.9)
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name='training')
# -
# To avoid repeating the same parameters over and over again, we can use Python's `partial()` function:
# +
from functools import partial
my_batch_norm_layer = partial(tf.layers.batch_normalization,
training=training, momentum=0.9)
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = my_batch_norm_layer(hidden1)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = my_batch_norm_layer(hidden2)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
# +
reset_graph()
batch_norm_momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
training = tf.placeholder_with_default(False, shape=(), name='training')
with tf.name_scope("dnn"):
he_init = tf.contrib.layers.variance_scaling_initializer()
my_batch_norm_layer = partial(
tf.layers.batch_normalization,
training=training,
momentum=batch_norm_momentum)
my_dense_layer = partial(
tf.layers.dense,
kernel_initializer=he_init)
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
bn1 = tf.nn.elu(my_batch_norm_layer(hidden1))
hidden2 = my_dense_layer(bn1, n_hidden2, name="hidden2")
bn2 = tf.nn.elu(my_batch_norm_layer(hidden2))
logits_before_bn = my_dense_layer(bn2, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
n_epochs = 20
batch_size = 200
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run([training_op, extra_update_ops],
feed_dict={training: True, X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images,
y: mnist.test.labels})
print(epoch, "Test accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# -
[v.name for v in tf.trainable_variables()]
[v.name for v in tf.global_variables()]
# ## Gradient Clipping
# A popular technique to lessen the exploding gradients problem is to simply clip the gradients during backpropagation so that they never exceed some threshold.
#
# This is called Gradient Clipping. In general people now prefer Batch Normalization, but it’s still useful to know about Gradient Clipping and how to implement it.
# Let's create a simple neural net for MNIST and add gradient clipping. The first part is the same as earlier (except we added a few more layers to demonstrate reusing pretrained models, see below):
# +
reset_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 50
n_hidden5 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.relu, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
threshold = 1.0
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss) # <--- Compute the gradients
capped_gvs = [(tf.clip_by_value(grad, -threshold, threshold), var)
for grad, var in grads_and_vars] # <-- Cliping the gradients.
training_op = optimizer.apply_gradients(capped_gvs)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
n_epochs = 20
batch_size = 200
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images,
y: mnist.test.labels})
print(epoch, "Test accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# -
# ** The End **
| 02.TensorFlow/TF1.x/TensorFlow_06_Training_Deep_Nets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/iammhk/Quant-101/blob/main/kalman_filters_crypto.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zFUVGVWsXNSc"
# # Kalman Filters
# + [markdown] id="mBYzjGOVXNSh"
# In this lab you will:
#
# - Estimate Moving Average
# - Use Kalman Filters to calculate the mean and covariance of our time series
# - Modify a Pairs trading function to make use of Kalman Filters
# + [markdown] id="tlmm5jAgXNSj"
# ## What is a Kalman Filter?
#
# The Kalman filter is an algorithm that uses noisy observations of a system over time to estimate the parameters of the system (some of which are unobservable) and predict future observations. At each time step, it makes a prediction, takes in a measurement, and updates itself based on how the prediction and measurement compare.
#
# The algorithm is as follows:
# 1. Take as input a mathematical model of the system, i.e.
# * the transition matrix, which tells us how the system evolves from one state to another. For instance, if we are modeling the movement of a car, then the next values of position and velocity can be computed from the previous ones using kinematic equations. Alternatively, if we have a system which is fairly stable, we might model its evolution as a random walk. If you want to read up on Kalman filters, note that this matrix is usually called $A$.
# * the observation matrix, which tells us the next measurement we should expect given the predicted next state. If we are measuring the position of the car, we just extract the position values stored in the state. For a more complex example, consider estimating a linear regression model for the data. Then our state is the coefficients of the model, and we can predict the next measurement from the linear equation. This is denoted $H$.
# * any control factors that affect the state transitions but are not part of the measurements. For instance, if our car were falling, gravity would be a control factor. If the noise does not have mean 0, it should be shifted over and the offset put into the control factors. The control factors are summarized in a matrix $B$ with time-varying control vector $u_t$, which give the offset $Bu_t$.
# * covariance matrices of the transition noise (i.e. noise in the evolution of the system) and measurement noise, denoted $Q$ and $R$, respectively.
# 2. Take as input an initial estimate of the state of the system and the error of the estimate, $\mu_0$ and $\sigma_0$.
# 3. At each timestep:
# * estimate the current state of the system $x_t$ using the transition matrix
# * take as input new measurements $z_t$
# * use the conditional probability of the measurements given the state, taking into account the uncertainties of the measurement and the state estimate, to update the estimated current state of the system $x_t$ and the covariance matrix of the estimate $P_t$
#
# [This graphic](https://upload.wikimedia.org/wikipedia/commons/a/a5/Basic_concept_of_Kalman_filtering.svg) illustrates the procedure followed by the algorithm.
#
# It's very important for the algorithm to keep track of the covariances of its estimates. This way, it can give us a more nuanced result than simply a point value when we ask for it, and it can use its confidence to decide how much to be influenced by new measurements during the update process. The more certain it is of its estimate of the state, the more skeptical it will be of measurements that disagree with the state.
#
# By default, the errors are assumed to be normally distributed, and this assumption allows the algorithm to calculate precise confidence intervals. It can, however, be implemented for non-normal errors.
# + [markdown] id="T9mCzHX3XNSl"
# ## Install dependencies
# + id="wEU9jYcSXNSl"
#git clone https://github.com/GoogleCloudPlatform/training-data-analyst
# !pip install pykalman
# + id="ByNlAc8tXNSn"
# !pip install qq-training-wheels auquan_toolbox --upgrade
# + id="zxtiPt_WXNSo"
# Import a Kalman filter and other useful libraries
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import poly1d
from backtester.dataSource.yahoo_data_source import YahooStockDataSource
from datetime import datetime
# + [markdown] id="7Rq0fMQCXNSp"
# # Toy example: falling ball
#
# Imagine we have a falling ball whose motion we are tracking with a camera. The state of the ball consists of its position and velocity. We know that we have the relationship $x_t = x_{t-1} + v_{t-1}\tau - \frac{1}{2} g \tau^2$, where $\tau$ is the time (in seconds) elapsed between $t-1$ and $t$ and $g$ is gravitational acceleration. Meanwhile, our camera can tell us the position of the ball every second, but we know from the manufacturer that the camera accuracy, translated into the position of the ball, implies variance in the position estimate of about 3 meters.
#
# In order to use a Kalman filter, we need to give it transition and observation matrices, transition and observation covariance matrices, and the initial state. The state of the system is (position, velocity), so it follows the transition matrix
# $$ \left( \begin{array}{cc}
# 1 & \tau \\
# 0 & 1 \end{array} \right) $$
#
# with offset $(-\tau^2 \cdot g/2, -\tau\cdot g)$. The observation matrix just extracts the position coordinate, (1 0), since we are measuring position. We know that the observation variance is 1, and transition covariance is 0 since we will be simulating the data the same way we specified our model. For the initial state, let's feed our model something bogus like (30, 10) and see how our system evolves.
# + id="AtK4cC0jXNSq"
tau = 0.1
# Set up the filter
kf = KalmanFilter(n_dim_obs=1, n_dim_state=2, # position is 1-dimensional, (x,v) is 2-dimensional
initial_state_mean=[30,10],
initial_state_covariance=np.eye(2),
transition_matrices=[[1,tau], [0,1]],
observation_matrices=[[1,0]],
observation_covariance=3,
transition_covariance=np.zeros((2,2)),
transition_offsets=[-4.9*tau**2, -9.8*tau])
# + id="oSgekuuBXNSr"
# Create a simulation of a ball falling for 40 units of time (each of length tau)
times = np.arange(40)
actual = -4.9*tau**2*times**2
# Simulate the noisy camera data
sim = actual + 3*np.random.randn(40)
# Run filter on camera data
state_means, state_covs = kf.filter(sim)
# + id="jjEotpmpXNSs"
plt.figure(figsize=(15,7))
plt.plot(times, state_means[:,0])
plt.plot(times, sim)
plt.plot(times, actual)
plt.legend(['Filter estimate', 'Camera data', 'Actual'])
plt.xlabel('Time')
plt.ylabel('Height');
# + [markdown] id="Z_iif-97XNSs"
# At each point in time we plot the state estimate <i>after</i> accounting for the most recent measurement, which is why we are not at position 30 at time 0. The filter's attentiveness to the measurements allows it to correct for the initial bogus state we gave it. Then, by weighing its model and knowledge of the physical laws against new measurements, it is able to filter out much of the noise in the camera data. Meanwhile the confidence in the estimate increases with time, as shown by the graph below:
# + id="czbgryVzXNSt"
# Plot variances of x and v, extracting the appropriate values from the covariance matrix
plt.figure(figsize=(15,7))
plt.plot(times, state_covs[:,0,0])
plt.plot(times, state_covs[:,1,1])
plt.legend(['Var(x)', 'Var(v)'])
plt.ylabel('Variance')
plt.xlabel('Time');
# + [markdown] id="ZG0Yj32VXNSt"
# The Kalman filter can also do <i>smoothing</i>, which takes in all of the input data at once and then constructs its best guess for the state of the system in each period post factum. That is, it does not provide online, running estimates, but instead uses all of the data to estimate the historical state, which is useful if we only want to use the data after we have collected all of it.
# + id="RfTObl7xXNSu"
# Use smoothing to estimate what the state of the system has been
smoothed_state_means, _ = kf.smooth(sim)
# Plot results
plt.figure(figsize=(15,7))
plt.plot(times, smoothed_state_means[:,0])
plt.plot(times, sim)
plt.plot(times, actual)
plt.legend(['Smoothed estimate', 'Camera data', 'Actual'])
plt.xlabel('Time')
plt.ylabel('Height');
# + [markdown] id="TzZ2XY9_XNSu"
# # Example: Estimating Moving Average
#
# Because the Kalman filter updates its estimates at every time step and tends to weigh recent observations more than older ones, it can be used to estimate rolling parameters of the data. When using a Kalman filter, there's no window length that we need to specify. This is useful for computing the moving average or for smoothing out estimates of other quantities.
#
# Below, we'll use both a Kalman filter and an n-day moving average to estimate the rolling mean of a dataset. We construct the inputs to the Kalman filter as follows:
#
# * The mean is the model's guess for the mean of the distribution from which measurements are drawn. This means our prediction of the next value is equal to our estimate of the mean.
# * Hopefully the mean describes our observations well, hence it shouldn't change significantly when we add an observation. This implies we can assume that it evolves as a random walk with a small error term. We set the transition matrix to 1 and transition covariance matrix is a small number.
# * We assume that the observations have variance 1 around the rolling mean (1 is chosen randomly).
# * Our initial guess for the mean is 0, but the filter realizes that that is incorrect and adjusts.
# + id="mEB_R7_NXNSv"
from pykalman import KalmanFilter
from backtester.dataSource.yahoo_data_source import YahooStockDataSource
# Load pricing data for a security
startDateStr = '2012/12/31'
endDateStr = '2017/12/31'
cachedFolderName = './yahooData/'
dataSetId = 'testPairsTrading'
instrumentIds = ['SPY','MSFT','ADBE']
ds = YahooStockDataSource(cachedFolderName=cachedFolderName,
dataSetId=dataSetId,
instrumentIds=instrumentIds,
startDateStr=startDateStr,
endDateStr=endDateStr,
event='history')
# Get adjusted closing price
data = ds.getBookDataByFeature()['adjClose']
# Data for Adobe
S1 = data['ADBE']
# Data for Microsoft
S2 = data['MSFT']
# Take ratio of the adjusted closing prices
x = S1/S2
# Construct a Kalman filter
kf = KalmanFilter(transition_matrices = [1],
observation_matrices = [1],
initial_state_mean = 0,
initial_state_covariance = 1,
observation_covariance=1,
transition_covariance=.01)
# Use the observed values of the price to get a rolling mean
state_means, _ = kf.filter(x.values)
state_means = pd.Series(state_means.flatten(), index=x.index)
# Compute the rolling mean with various lookback windows
mean30 = x.rolling(window = 10).mean()
mean60 = x.rolling(window = 30).mean()
mean90 = x.rolling(window = 60).mean()
# Plot original data and estimated mean
plt.figure(figsize=(15,7))
plt.plot(state_means[60:], '-b', lw=2, )
plt.plot(x[60:],'-g',lw=1.5)
plt.plot(mean30[60:], 'm', lw=1)
plt.plot(mean60[60:], 'y', lw=1)
plt.plot(mean90[60:], 'c', lw=1)
plt.title('Kalman filter estimate of average')
plt.legend(['Kalman Estimate', 'X', '30-day Moving Average', '60-day Moving Average','90-day Moving Average'])
plt.xlabel('Day')
plt.ylabel('Price');
# + [markdown] id="TF8dd8YIXNSv"
# ### Observations
#
# As you can see, the estimate from Kalman Filter is usually somewhere between day 30 and day 60 moving average. This could be because the Filter updates its knowledge of the world based on the most recent data. The advantage of the Kalman filter is that we don't need to select a window length. It makes predictions based on the underlying model (that we set parameters for) and the data itself. We do open ourselves up to overfitting with some of the initialization parameters for the filter, but those are slightly easier to objectively define. There's no free lunch and we can't eliminate overfitting, but a Kalman Filter is more rigorous than a moving average and generally better.
# + [markdown] id="7OyDf4exXNSw"
# Another interesting application of Kalman Filters, Beta Estimation for Linear Regression can be found here [Dr. <NAME>'s blog.](http://www.thealgoengineer.com/2014/online_linear_regression_kalman_filter/)
#
# + [markdown] id="0oZsNIl_XNSw"
# We'll be using Kalman filters for Pairs trading the subsequent notebook. Make sure you try to run the examples given here with various hyperparameters for the underlying Kalman filter model to get comfortable with the same and developing a better understanding in the process. For example you can try out the following:
# 1. Use multi dimensional transition matrices so as to use more of past information for making predictions at each point
# 2. Try different values of observation and transition covariance
# + [markdown] id="RF5Ue2n2XNSw"
# ## Example: Pairs Trading
#
# In the previous notebook we made use of 60 day window for calculating mean and standard deviation of our time series. Now we'll be replacing that with Kalman filters
# + [markdown] id="ZJivAMn4XNSx"
# ### Let's get the same data that we used in the previous notebook
# + id="KWpYOJ3qXNSx"
startDateStr = '2007/12/01'
endDateStr = '2017/12/01'
cachedFolderName = 'yahooData/'
dataSetId = 'testPairsTrading2'
instrumentIds = ['ADBE','MSFT']
ds = YahooStockDataSource(cachedFolderName=cachedFolderName,
dataSetId=dataSetId,
instrumentIds=instrumentIds,
startDateStr=startDateStr,
endDateStr=endDateStr,
event='history')
data = ds.getBookDataByFeature()['adjClose']
# + [markdown] id="PQFgHGjJXNSy"
# ### A quick visualization of error and standard deviations
# + id="TlGSYYBFXNSy"
S1, S2 = data['ADBE'].iloc[:1762], data['MSFT'].iloc[:1762]
ratios = S1/S2
kf = KalmanFilter(transition_matrices = [1],
observation_matrices = [1],
initial_state_mean = 0,
initial_state_covariance = 1,
observation_covariance=1,
transition_covariance=.0001)
state_means, state_cov = kf.filter(ratios.values)
state_means, state_std = state_means.squeeze(), np.std(state_cov.squeeze())
plt.figure(figsize=(15,7))
plt.plot(ratios.values - state_means, 'm', lw=1)
plt.plot(np.sqrt(state_cov.squeeze()), 'y', lw=1)
plt.plot(-np.sqrt(state_cov.squeeze()), 'c', lw=1)
plt.title('Kalman filter estimate')
plt.legend(['Error: real_value - mean', 'std', '-std'])
plt.xlabel('Day')
plt.ylabel('Value');
# + [markdown] id="LbT3z88yXNSy"
# We'll be using the z score in the same way as before. Our strategy is to go long or short only in the areas where the |error| is greater than one standard deviation. Since 1 day price could be noisy, we'll be using 5 day average for a particular day's price
# + [markdown] id="eG9a-IsdXNSy"
# #### Let's modify our trading function to make use of Kalman Filter while keeping the same logic for carrying out trades
# + id="cBrYt1g4XNSz"
def trade(S1, S2):
# Compute rolling mean and rolling standard deviation
ratios = S1/S2
kf = KalmanFilter(transition_matrices = [1],
observation_matrices = [1],
initial_state_mean = 0,
initial_state_covariance = 1,
observation_covariance=1,
transition_covariance=.001)
state_means, state_cov = kf.filter(ratios.values)
state_means, state_std = state_means.squeeze(), np.std(state_cov.squeeze())
window = 5
ma = ratios.rolling(window=window,
center=False).mean()
zscore = (ma - state_means)/state_std
# Simulate trading
# Start with no money and no positions
money = 0
countS1 = 0
countS2 = 0
for i in range(len(ratios)):
# Sell short if the z-score is > 1
if zscore[i] > 1:
money += S1[i] - S2[i] * ratios[i]
countS1 -= 1
countS2 += ratios[i]
# Buy long if the z-score is < 1
elif zscore[i] < -1:
money -= S1[i] - S2[i] * ratios[i]
countS1 += 1
countS2 -= ratios[i]
# Clear positions if the z-score between -.5 and .5
elif abs(zscore[i]) < 0.5:
money += countS1*S1[i] + S2[i] * countS2
countS1 = 0
countS2 = 0
# print('Z-score: '+ str(zscore[i]), countS1, countS2, S1[i] , S2[i])
return money
# + id="VG8HMh-yXNSz"
trade(data['ADBE'].iloc[:1762], data['MSFT'].iloc[:1762])
# + [markdown] id="YgXW8pavXNSz"
# The strategy is still profitable! You can try changing the hyperparameters of the Kalman Filter and see how it affects the PnL. The results might not be always better than the mean over moving window. You can try this with other instruments as well.
| kalman_filters_crypto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
data_test = np.genfromtxt("Files/0000000000002419_test_ccpp_x_test.csv", delimiter = ',')
data_train = np.genfromtxt("Files/0000000000002419_training_ccpp_x_y_train.csv" , delimiter = ',')
data_test.shape , data_train.shape
x_train = data_train[:,0:4]
y_train = data_train[:,4]
x_train.shape , y_train.shape
from sklearn.ensemble import GradientBoostingRegressor
alg1 = GradientBoostingRegressor(learning_rate=0.1,n_estimators=10000)
alg1.fit(x_train,y_train)
y_pred = alg1.predict(data_test)
y_pred
np.savetxt("Files/predict_ccpp.csv", y_pred, fmt="%.5f")
alg1.score(x_train,y_train)
| .ipynb_checkpoints/combined_cycle_power_plant-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:fastscape_py36]
# language: python
# name: conda-env-fastscape_py36-py
# ---
# # Brief Introduction to Pandas
# ### Limitations of using numpy for tabular data
#
# We have seen how to use numpy to import tabular data stored in a CSV file
# +
import numpy as np
data = np.loadtxt('data.csv', delimiter=',', skiprows=2)
data
# -
# However, there are two limitations in using numpy for tabular data:
#
# - numpy arrays just stores the data, not the metadata (columns names, row index)
# - a numpy array has a single data type (e.g., integer, float), while tables may have columns of data with different types
# ### Here comes Pandas
#
# - Pandas (http://pandas.pydata.org/) is a widely-used Python library to handle tabular data
# - read from / write to different formats (CSV...)
# - analytics, statistics, tansformations, plotting (on top of matplotlib).
#
#
# - Borrows many features from R’s dataframes.
# - A 2-dimenstional table whose columns have names and potentially have different data types.
# We first import the library
import pandas as pd
# ## A real example
#
# First, look at the real dataset of land-surface temperature (region averages) that we will use in the project:
#
# http://berkeleyearth.org/data/
#
# http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/
#
# This is a good example of real dataset used in science: text format, good documentation, human readable but a bit harder to deal with progammatically (e.g., column names as comments instead of strict CSV).
# Start by importing the packages that we will need for the project.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Load the data (a single region/file ; we won't use all the columns available). Note that we can provide an URL to `pandas.read_csv` !
# +
df = pd.read_csv(
"http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/united-states-TAVG-Trend.txt",
delim_whitespace=True,
comment='%',
header=None,
parse_dates=[[0,1]],
index_col=(0),
usecols=(0, 1, 2, 3, 8, 9),
names=("year", "month", "anomaly", "uncertainty", "10-year-anomaly", "10-year-uncertainty")
)
df.index.name = "date"
# -
# ## First exercice: data inspection, basic statistics and plotting
#
# Objectives:
#
# 1. show the head and a sample of the data
# 2. plot the data
# * plot 'anomaly', playing around with `linewidth` and opacity (`alpha`)
# * plot '10-year-anomaly'
# * plot '10-year-uncertainty' around it (`plt.fill_between`)
# * try adjusting the size of the figure
# * statistics of 'anomaly'
# * print descriptive statistics, print the mean of each column
# * plot distribution of 'anomaly' using `hist`, playing around with `bins`
# * plot distribution of 'anomaly' separated by years but in one plot
# 1. 1850-1900
# 2. 1950-2000
#
# ### Solution
# ## Second exercise: more advanced pandas analytics features
#
# Try re-calculating the 10-year anomaly from the anomaly (rolling mean) using pandas. Assign the results to a new column '10-year-anomaly-pandas' in the dataframe. Compare in a plot these results with the '10-year-anomaly' column.
#
# Tip: look at `rolling` and `mean` in the pandas documentation.
#
# ### Solution
# ## Reuse the code for other data (countries)
#
# create a function that take a country name as input.
| notebooks/lectures_ready/pandas_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
import tensorflow as tf
class TestExercise04_01(unittest.TestCase):
def setUp(self):
import Exercise04_01
self.exercises = Exercise04_01
def test_reward(self):
self.assertEqual(self.exercises.episode_rew > 150, True)
# -
suite = unittest.TestLoader().loadTestsFromTestCase(TestExercise04_01)
unittest.TextTestRunner(verbosity=2).run(suite)
| Chapter04/Exercise04_04/TestExercise04_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Experiment Initialization
# Here, I define the terms of my experiment, among them the location of the files in S3 (bucket and folder name), and each of the video prefixes (everything before the file extension) that I want to track.
#
# Note that these videos should be similar-ish: while we can account for differences in mean intensities between videos, particle sizes should be approximately the same, and (slightly less important) particles should be moving at about the same order of magnitude speed. In this experiment, these videos were taken in 0.4% agarose gel at 100x magnification and 100.02 fps shutter speeds with nanoparticles of about 100nm in diameter.
# +
to_track = []
result_futures = {}
start_knot = 100 #Must be unique number for every run on Cloudknot.
remote_folder = '10_05_18_coverage' #Folder in AWS S3 containing files to be analyzed
bucket = 'evanepst.data'
vids = 10
types = ['0_10xs', '0_15xs', '0_20xs', '0_25xs', '0_40xs', '0_50xs', '0_60xs', '0_75xs', '1xs', 'PSCOOH']
for typ in types:
for num in range(1, vids+1):
#to_track.append('100x_0_4_1_2_gel_{}_bulk_vid_{}'.format(vis, num))
to_track.append('5mM_{}_XY{}'.format(typ, '%02d' % num))
# -
to_track
# The videos used with this analysis are fairly large (2048 x 2048 pixels and 651 frames), and in cases like this, the tracking algorithm can quickly eat up RAM. In this case, we chose to crop the videos to 512 x 512 images such that we can run our jobs on smaller EC2 instances with 16GB of RAM.
#
# Note that larger jobs can be made with user-defined functions such that splitting isn't necessary-- or perhaps an intermediate amount of memory that contains splitting, tracking, and msd calculation functions all performed on a single EC2 instance.
#
# The compiled functions in the knotlets module require access to buckets on AWS. In this case, we will be using a publicly (read-only) bucket. If users want to run this notebook on their own, will have to transfer files from nancelab.publicfiles to their own bucket, as it requires writing to S3 buckets.
import diff_classifier.knotlets as kn
for prefix in to_track:
kn.split(prefix, remote_folder=remote_folder, bucket=bucket)
# ## Tracking predictor
# Tracking normally requires user input in the form of tracking parameters e.g. particle radius, linking max distance, max frame gap etc. When large datasets aren't required, each video can be manageably manually tracked using the TrackMate GUI. However, when datasets get large e.g. >20 videos, this can become extremely arduous. For videos that are fairly similar, you can get away with using similar tracking parameters across all videos. However, one parameter that is a little more noisy that the others is the quality filter value. Quality is a numerical value that approximate how likely a particle is to be "real."
#
# In this case, I built a predictor that estimates the quality filter value based on intensity distributions from the input images. Using a relatively small training dataset (5-20 videos), users can get fairly good estimates of quality filter values that can be used in parallelized tracking workflows.
#
# Note: in the current setup, the predictor should be run in Python 3. While the code will run in Python 3, there are differences between the random number generators in Python2 and Python3 that I was not able to control for.
import os
import diff_classifier.imagej as ij
import boto3
import os.path as op
import diff_classifier.aws as aws
import diff_classifier.knotlets as kn
import numpy as np
from sklearn.externals import joblib
# The regress_sys function should be run twice. When have_output is set to False, it generates a list of files that the user should manually track using Trackmate. Once the quality filter values are found, they can be used as input (y) to generate a regress object that can predict quality filter values for additional videos. Once y is assigned, set have_output to True and re-run the cell.
# +
tnum=15 #number of training datasets
pref = []
for num in to_track:
for row in range(0, 4):
for col in range(0, 4):
pref.append("{}_{}_{}".format(num, row, col))
y = 20+1.5*np.random.rand(tnum)
# Creates regression object based of training dataset composed of input images and manually
# calculated quality cutoffs from tracking with GUI interface.
regress = ij.regress_sys(remote_folder, pref, y, tnum, randselect=True,
have_output=True, bucket_name=bucket)
#Read up on how regress_sys works before running.
# +
#Pickle object
filename = 'regress1.obj'
with open(filename,'wb') as fp:
joblib.dump(regress,fp)
import boto3
s3 = boto3.client('s3')
aws.upload_s3(filename, remote_folder+'/'+filename, bucket_name=bucket)
# -
# Users should input all tracking parameters into the tparams object. Note that the quality value will be overwritten by values found using the quality predictor found above.
# +
tparams1 = {'radius': 3.5, 'threshold': 0.0, 'do_median_filtering': False,
'quality': 5.0, 'xdims': (0, 511), 'ydims': (1, 511),
'median_intensity': 300.0, 'snr': 0.0, 'linking_max_distance': 10.0,
'gap_closing_max_distance': 15.0, 'max_frame_gap': 7,
'track_duration': 20.0}
# tparams2 = {'radius': 4.0, 'threshold': 0.0, 'do_median_filtering': False,
# 'quality': 10.0, 'xdims': (0, 511), 'ydims': (1, 511),
# 'median_intensity': 300.0, 'snr': 0.0, 'linking_max_distance': 8.0,
# 'gap_closing_max_distance': 12.0, 'max_frame_gap': 6,
# 'track_duration': 20.0}
# -
# ## Cloudknot setup
# Cloudknot requires the user to define a function that will be sent to multiple computers to run. In this case, the function knotlets.tracking will be used. We create a docker image that has the required installations (defined by the requirements.txt file from diff_classifier on Github, and the base Docker Image below that has Fiji pre-installed in the correct location.
#
# Note that I modify the Docker image below such that the correct version of boto3 is installed. For some reason, versions later than 1.5.28 error out, so I specified 5.28 as the correct version. Run my_image.build below to double-check that the Docker image is successfully built prior to submitting the job to Cloudknot.
# +
import cloudknot as ck
import os.path as op
github_installs=('https://github.com/ccurtis7/diff_classifier.git@Chad')
#my_image = ck.DockerImage(func=kn.tracking, base_image='arokem/python3-fiji:0.3', github_installs=github_installs)
my_image = ck.DockerImage(func=kn.assemble_msds, base_image='arokem/python3-fiji:0.3', github_installs=github_installs)
docker_file = open(my_image.docker_path)
docker_string = docker_file.read()
docker_file.close()
req = open(op.join(op.split(my_image.docker_path)[0], 'requirements.txt'))
req_string = req.read()
req.close()
new_req = req_string[0:req_string.find('\n')-4]+'5.28'+ req_string[req_string.find('\n'):]
req_overwrite = open(op.join(op.split(my_image.docker_path)[0], 'requirements.txt'), 'w')
req_overwrite.write(new_req)
req_overwrite.close()
# -
my_image.build("0.1", image_name="test_image")
# The object all_maps is an iterable containing all the inputs sent to Cloudknot. This is useful, because if the user needs to modify some of the tracking parameters for a single video, this can be done prior to submission to Cloudknot.
# +
names = []
all_maps = []
for prefix in to_track:
for i in range(0, 4):
for j in range(0, 4):
names.append('{}_{}_{}'.format(prefix, i, j))
all_maps.append(('{}_{}_{}'.format(prefix, i, j), remote_folder, bucket, 'regress.obj',
4, 4, (512, 512), tparams1))
all_maps
# -
# The Cloudknot knot object sets up the compute environment which will run the code. Note that the name must be unique. Every time you submit a new knot, you should change the name. I do this with the variable start_knot, which I vary for each run.
#
# If larger jobs are anticipated, users can adjust both RAM and storage with the memory and image_id variables. Memory specifies the amount of RAM to be used. Users can build a customized AMI with as much space as they need, and enter the ID into image_ID. Read the Cloudknot documentation for more details.
ck.aws.set_region('us-east-1')
knot = ck.Knot(name='download_and_track_{}_b{}'.format('Evan', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures = knot.map(all_maps, starmap=True)
# +
missing = []
all_maps3 = []
import boto3
import botocore
s3 = boto3.resource('s3')
for name in names:
try:
s3.Object(bucket, '{}/Traj_{}.csv'.format(remote_folder, name)).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
missing.append(name)
all_maps3.append((name, remote_folder, bucket, 'regress1.obj',
4, 4, (512, 512), tparams1))
else:
print('Something else has gone wrong')
# -
all_maps3
knot2 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan3', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 20,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures2 = knot2.map(all_maps3[0:200], starmap=True)
knot2.clobber()
knot6 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan6', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 50,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures6 = knot6.map(all_maps3[200:400], starmap=True)
knot6.clobber()
knot7 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan7', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 80,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures7 = knot7.map(all_maps3[400:600], starmap=True)
knot7.clobber()
ck.aws.set_region('us-west-1')
knot8 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan8', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures8 = knot8.map(all_maps3[600:], starmap=True)
result_futures9 = knot8.map(all_maps3[400:600], starmap=True)
ck.aws.set_region('us-east-2')
knot10 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan10', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures10 = knot10.map(all_maps3[0:200], starmap=True)
ck.aws.set_region('us-east-1')
# +
names = []
all_maps2 = []
for prefix in to_track:
all_maps2.append((prefix, remote_folder, bucket, (512, 512), 651, 4, 4))
all_maps2
# -
knot11 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan11', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures11 = knot11.map(all_maps2[0:10], starmap=True)
ck.aws.set_region('us-east-1')
knot11.clobber()
ck.aws.set_region('us-east-1')
result_futures19 = knot11.map(all_maps2[10:20], starmap=True)
ck.aws.set_region('us-east-1')
result_futures19 = knot11.map(all_maps2[30:50], starmap=True)
len(all_maps2)
ck.aws.set_region('us-east-2')
knot12 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan12', start_knot),
docker_image = my_image,
memory = 32000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures12 = knot12.map(all_maps2[10:20], starmap=True)
ck.aws.set_region('us-east-2')
knot12.clobber()
ck.aws.set_region('us-west-1')
knot13 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan13', start_knot),
docker_image = my_image,
memory = 32000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures13 = knot13.map(all_maps2[20:30], starmap=True)
knot13.clobber()
# +
ck.aws.set_region('us-east-1')
knot14 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan14', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures14 = knot14.map(all_maps2[30:50], starmap=True)
# -
ck.aws.set_region('us-east-1')
knot14.clobber()
# +
ck.aws.set_region('us-east-2')
knot16 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan16', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures16 = knot16.map(all_maps2[50:70], starmap=True)
# -
ck.aws.set_region('us-east-2')
result_futures30 = knot16.map(all_maps2[40:50], starmap=True)
result_futures31 = knot16.map(all_maps2[20:30], starmap=True)
ck.aws.set_region('us-east-2')
knot16.clobber()
# +
ck.aws.set_region('us-west-1')
knot17 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan17', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures17 = knot17.map(all_maps2[70:100], starmap=True)
# -
ck.aws.set_region('us-west-1')
knot17.clobber()
# +
ck.aws.set_region('us-west-1')
knot18 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan18', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures18 = knot18.map(all_maps2[20:30], starmap=True)
# -
ck.aws.set_region('us-west-1')
knot18.clobber()
# +
ck.aws.set_region('us-west-1')
knot20 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan20', start_knot),
docker_image = my_image,
memory = 16000,
#resource_type = "SPOT",
#bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures20 = knot20.map(all_maps2[72:76]+all_maps2[77:80]+all_maps2[86:88]+all_maps2[90:100], starmap=True)
# -
ck.aws.set_region('us-west-1')
result_futures32 = knot20.map(all_maps2[0:10], starmap=True)
result_futures33 = knot20.map(all_maps2[10:20], starmap=True)
ck.aws.set_region('us-west-1')
knot20.clobber()
knot.clobber()
knot2.clobber()
# +
names = []
all_maps2 = []
for prefix in to_track:
all_maps2.append((prefix, remote_folder, bucket, (512, 512), 651, 4, 4))
all_maps2
# -
knot3 = ck.Knot(name='download_and_track_{}_b{}'.format('Evan3', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures3 = knot3.map(all_maps2, starmap=True)
knot3.clobber()
import diff_classifier.heatmaps as hm
hm.plot_trajectories('5mM_0_15xs_XY01', upload=True, bucket=bucket, remote_folder=remote_folder)
ck.aws.set_region('us-west-1')
ck.aws.set_region('us-east-1')
result_futures3 = knot3.map(all_maps2, starmap=True)
knot3.clobber()
knot4 = ck.Knot(name='download_and_track_{}_b{}'.format('chadj', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures4 = knot4.map(all_maps2, starmap=True)
for i in range(0, 4):
for j in range(0, 4):
names.append('{}_{}_{}.tif'.format(prefix, i, j))
print(prefix)
for name in names:
row = int(name.split(prefix)[1].split('.')[0].split('_')[1])
all_maps2
ck.aws.get_region()
knot3 = ck.Knot(name='download_and_track_{}_b{}'.format('chad33', start_knot),
docker_image = my_image,
memory = 64000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-015a1b4cd3895860b', #May need to change this line
pars_policies=('AmazonS3FullAccess',),
)
result_futures5 = knot3.map(all_maps2, starmap=True)
# +
missing = []
all_maps2 = []
import boto3
import botocore
s3 = boto3.resource('s3')
for name in names:
try:
s3.Object(bucket, '{}/Traj_{}.csv'.format(remote_folder, name)).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
missing.append(name)
#all_maps2.append((name, remote_folder, bucket, 'regress.obj',
# 4, 4, (512, 512), tparams2))
else:
print('Something else has gone wrong')
# -
knot3.clobber()
ck.aws.get_region()
ck.aws.set_region('us-east-1')
ck.aws.get_region()
knot2 = ck.Knot(name='download_and_track_{}_b{}'.format('chad2', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-0e00afdf500081a0d', #May need to change this line
pars_policies=('AmazonS3FullAccess',))
result_futures2 = knot2.map(all_maps, starmap=True)
knot3 = ck.Knot(name='download_and_track_{}_b{}'.format('chad3', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-0e00afdf500081a0d', #May need to change this line
pars_policies=('AmazonS3FullAccess',))
result_futures3 = knot3.map(all_maps, starmap=True)
knot4 = ck.Knot(name='download_and_track_{}_b{}'.format('chad4', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-0e00afdf500081a0d', #May need to change this line
pars_policies=('AmazonS3FullAccess',))
result_futures4 = knot4.map(all_maps, starmap=True)
knot5 = ck.Knot(name='download_and_track_{}_b{}'.format('chad5', start_knot),
docker_image = my_image,
memory = 144000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-0e00afdf500081a0d', #May need to change this line
pars_policies=('AmazonS3FullAccess',))
result_futures5 = knot5.map(all_maps, starmap=True)
ck.aws.set_region('eu-west-1')
knot5.clobber()
tparams2 = {'radius': 3.5, 'threshold': 0.0, 'do_median_filtering': False,
'quality': 10.0, 'xdims': (0, 511), 'ydims': (1, 511),
'median_intensity': 300.0, 'snr': 0.0, 'linking_max_distance': 15.0,
'gap_closing_max_distance': 22.0, 'max_frame_gap': 5,
'track_duration': 20.0}
all_maps3
import diff_classifier.aws as aws
# +
old_folder = 'Gel_Studies/08_14_18_gel_validation/old_msds2'
for name in missing:
filename = 'Traj_{}.csv'.format(name)
aws.download_s3('{}/{}'.format(old_folder, filename), filename, bucket_name=bucket)
aws.upload_s3(filename, '{}/{}'.format(remote_folder, filename), bucket_name=bucket)
# -
# Users can monitor the progress of their job in the Batch interface. Once the code is complete, users should clobber their knot to make sure that all AWS resources are removed.
knot.clobber()
# ## Downstream analysis and visualization
# The knotlet.assemble_msds function (which can also potentially be submitted to Cloudknot as well for large jobs) calculates the mean squared displacements and trajectory features from the raw trajectory csv files found from the Cloudknot submission. It accesses them from the S3 bucket to which they were saved.
for prefix in to_track:
kn.assemble_msds(prefix, remote_folder, bucket=bucket)
print('Successfully output msds for {}'.format(prefix))
for prefix in to_track[5:7]:
kn.assemble_msds(prefix, remote_folder, bucket='ccurtis.data')
print('Successfully output msds for {}'.format(prefix))
all_maps2 = []
for prefix in to_track:
all_maps2.append((prefix, remote_folder, bucket, 'regress100.obj',
4, 4, (512, 512), tparams))
knot = ck.Knot(name='download_and_track_{}_b{}'.format('chad', start_knot),
docker_image = my_image,
memory = 16000,
resource_type = "SPOT",
bid_percentage = 100,
#image_id = 'ami-0e00afdf500081a0d', #May need to change this line
pars_policies=('AmazonS3FullAccess',))
# Diff_classifier includes some useful imaging tools as well, including checking trajectories, plotting heatmaps of trajectory features, distributions of diffusion coefficients, and MSD plots.
import diff_classifier.heatmaps as hm
import diff_classifier.aws as aws
# +
prefix = to_track[1]
msds = 'msd_{}.csv'.format(prefix)
feat = 'features_{}.csv'.format(prefix)
aws.download_s3('{}/{}'.format(remote_folder, msds), msds, bucket_name=bucket)
aws.download_s3('{}/{}'.format(remote_folder, feat), feat, bucket_name=bucket)
# -
hm.plot_trajectories(prefix, upload=False, figsize=(8, 8))
geomean, geoSEM = hm.plot_individual_msds(prefix, x_range=10, y_range=300, umppx=1, fps=1, upload=False)
hm.plot_heatmap(prefix, upload=False)
hm.plot_particles_in_frame(prefix, y_range=6000, upload=False)
missing = ['PS_COOH_2mM_XY05_1_1', 'PS_NH2_2mM_XY04_2_1']
all_maps
kn.tracking(missing[0], remote_folder, bucket=bucket, tparams=tparams1)
kn.tracking(missing[1], remote_folder, bucket=bucket, tparams=tparams1)
| notebooks/development/10_08_18_cloudknot_Evan_coverage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 1
# +
from collections import defaultdict
import os, sys
sys.path.insert(0, "/mnt/f/dev/git/miRExplore/python/")
import scispacy
import spacy
from spacy import displacy
#nlp = spacy.load('/mnt/d/spacy/models/en_core_sci_lg-0.2.4/en_core_sci_lg/en_core_sci_lg-0.2.4/')
# -
nlp_weblg = spacy.load('/mnt/d/spacy/models/en_core_web_lg-2.2.0/en_core_web_lg/en_core_web_lg-2.2.0/')
nlp = spacy.load('/mnt/f/spacy/en_core_sci_lg-0.2.4/en_core_sci_lg/en_core_sci_lg-0.2.4/')
nlp_ent = spacy.load("/mnt/f/spacy/en_ner_bionlp13cg_md-0.2.4/en_ner_bionlp13cg_md/en_ner_bionlp13cg_md-0.2.4")
nlp = spacy.load('/mnt/f/spacy/en_core_sci_lg-0.2.5/en_core_sci_lg/en_core_sci_lg-0.2.5/')
nlp_ent = spacy.load("/mnt/f/spacy/en_ner_bionlp13cg_md-0.2.5/en_ner_bionlp13cg_md/en_ner_bionlp13cg_md-0.2.5")
# !ls /mnt/d/spacy/models/en_core_web_lg-2.2.0/en_core_web_lg/en_core_web_lg-2.2.0
import textmining.MirGeneRelCheck
# +
def analyse(testCase, dist=80):
relCheck = textmining.MirGeneRelCheck.MirGeneRelCheck()
doc = nlp(testCase[0])
lWord = doc[testCase[1]]
rWord = doc[testCase[2]]
for t in doc:
print(t.idx, t.i, t, t.pos_, t.dep_, t.head, t.head.idx)
print(lWord, rWord)
if not "mir" in str(lWord).lower() and not "let" in str(lWord).lower():
tmp = lWord
lWord = rWord
rWord = tmp
print(lWord, rWord)
checkResults = relCheck.checkRelation(doc, lWord, rWord, verbose=True)
print(checkResults)
displacy.render(doc, style="dep", options={"compact": True, "distance": dist})
# +
# %aimport textmining.MirGeneRelCheck
testcase = (u"Functional association between miR 124a and CDK6 in medulloblastoma was established using luciferase assays.", 4, 6)
analyse(testcase)
# +
# %aimport textmining.MirGeneRelCheck
testcase = (u"Overexpression of mir-7 and mir-153 significantly reduces endogenous alpha-synuclein levels, whereas inhibition of mir-7 and mir-153 enhances translation of a luciferase construct bearing the alpha-s", 6, 10)
analyse(testcase)
# -
analyse((u"The clinical relevance of our findings was evaluated in HIV-encephalitis (HIVE) brain samples in which decreased levels of MCP-2 and increased levels of mir-146a were observed, suggesting a role for mir-146a in the maintenance of HIV-mediated chronic inflammation of the brain.", 20, 25)
)
# +
doc = nlp(testcase[0])
allTokens = {t.i: t for t in doc}
# -
[x for x in allTokens[23].children]
allRels[:5]
# +
# %aimport textmining.MirGeneRelCheck
testCase = (u"We show here that miR-29a, -29b-1, and -9 can regulate BACE1 expression in vitro.", 4, 12)
testCase = (u"It has been predicted that miR-146a may target Tata Binding Protein (TBP).",5, 12)
testCase = (u"Our results showed that the expression of miR-106b was inversely correlated with TβRII protein levels and miR-106b can directly inhibit the TβRII translation in vitro.", 7, 21)
relCheck = textmining.MirGeneRelCheck.MirGeneRelCheck()
doc = nlp(testCase[0])
mirword = doc[testCase[1]]
geneword = doc[testCase[2]]
verbose=False
# -
conjResult, conjs = relCheck.checkCommonConj(doc, mirword, geneword, verbose)
sdpPass, passive, negated = relCheck.checkSDP(doc, mirword, geneword, verbose)
compPass = relCheck.checkCompartments(doc, mirword, geneword, verbose)
sigPathway = relCheck.checkSurContext(doc, mirword, geneword, verbose)
sdpRes = relCheck._get_sdp_path(doc, mirword.i, geneword.i)
sdpRes
# +
# %aimport textmining.MirGeneRelCheck
testCase = (u"We show here that miR-29a, -29b-1, and -9 can regulate BACE1 expression in vitro.", 4, 12)
testCase = (u"It has been predicted that miR-146a may target Tata Binding Protein (TBP).",5, 12)
testCase = (u"Our results showed that the expression of miR-106b was inversely correlated with TβRII protein levels and miR-106b can directly inhibit the TβRII translation in vitro.", 7, 21)
testCase = (u"The reduction of the affinity of miR-433 to the 3' UTR would result in increased FGF20 expression and upregulation of alpha-synuclein, which could in turn promote dopaminergic neurons degeneration.", 6, 16)
testCase = (u"These findings identify miR-29b as a novel posttranscriptional regulator of PGRN expression, raising the possibility that miR-29b or other miRNAs might be targeted therapeutically to increase hPGRN levels in some FTD patients.", 3,10)
relCheck = textmining.MirGeneRelCheck.MirGeneRelCheck()
doc = nlp(testCase[0])
mirword = doc[testCase[1]]
geneword = doc[testCase[2]]
verbose=False
from collections import Counter
# +
def loadStems():
print("Loading stems")
allRels = []
stem2class = {}
renameClass = {
'INT': 'NEU',
"BOOST": "POS",
"COMBINE": "NEU",
"CHANGE": "NEU",
}
with open("/mnt/d/dev/git/miRExplore/python/allrels.csv") as fin:
for line in fin:
aline = line.strip().split(",", 1)
aregSyns = aline[1].split(":")
regSyns = []
if len(aregSyns) > 1:
regSyns = aregSyns[1].split("|")
aline[1] = aline[1].split(":")[0]
regClass = aline[0]
regStem = aline[1]
if regClass in renameClass:
regClass = renameClass[regClass]
stem2class[regStem] = regClass
for regSyn in regSyns:
if not regSyn in stem2class:
stem2class[regSyn] = regClass
allRels.append((regClass, regStem))
allStems = [x for x in stem2class]
return allStems, allRels, stem2class
allStems, allRels, stem2class = loadStems()
# -
from textmining.MirGeneRelCheck import SentenceRelationChecker, SentenceRelationClassifier
from utils.tmutils import normalize_gene_names
from collections import Counter
from lxml import etree
from sklearn.metrics import multilabel_confusion_matrix, classification_report
from itertools import chain, combinations
# +
scaiBase = "/mnt/d/owncloud/data/miRExplore/scai_corpus/"
scaiBase = "/mnt/f/dev/git/miRExplore/python/scai_corpus/"
base = "TRAIN"
if base == "TRAIN":
scaiFile = "miRNA_train_fixed.xml"
relsFile = "scai_train_rels.tsv"
elif base == "TEST":
scaiFile = "miRNA_test_fixed.xml"
relsFile = "scai_test_rels.tsv"
normGeneSymbols= normalize_gene_names(path="/mnt/d/owncloud/data/miRExplore/obodir/" + "/hgnc_no_withdrawn.syn")
relChecker = SentenceRelationChecker(nlp)
correctIdentified = 0
incorrectIdentified = 0
totalChecks = 0
incorrectClass = Counter()
# -
relClasses = loadRelations()
# +
nlp = spacy.load('/mnt/f/spacy/en_core_sci_lg-0.2.4/en_core_sci_lg/en_core_sci_lg-0.2.4/')
nlp_ent = spacy.load("/mnt/f/spacy/en_ner_bionlp13cg_md-0.2.4/en_ner_bionlp13cg_md/en_ner_bionlp13cg_md-0.2.4")
# +
# %aimport textmining.MirGeneRelCheck
scaiBase = "/mnt/d/owncloud/data/miRExplore/scai_corpus/"
normGeneSymbols= normalize_gene_names(path=scaiBase + "/../obodir/" + "/hgnc_no_withdrawn.syn")
def loadRelations(relsFile):
reldir2new = {}
reldir2new["TARGET"] = "NEU"
reldir2new["INVCORR"] = "NEU"
reldir2new["REGULATE"] = "NEU"
relClasses = {}
with open(os.path.join(scaiBase, relsFile)) as fin:
print("Loading relations", relsFile)
for lidx, line in enumerate(fin):
if lidx == 0:
continue
line = line.strip().split("\t")
interactionID = line[0].strip()
relType = line[1].strip()
relRDir = line[2].strip()
relIndirect = line[3].strip()
relPassive = line[4].strip()
if relRDir in reldir2new:
relRDir = reldir2new[relRDir]
relClasses[interactionID] = (relType, relRDir, relIndirect, relPassive)
print("Interactions", len(relClasses))
print("Non NA Interactions", len([x for x in relClasses if not relClasses[x][0] == "NA"]))
return relClasses
def all_subsets(ss):
return chain(*map(lambda x: combinations(ss, x), range(0, len(ss)+1)))
def runCheck(numelems, base):
relChecker = SentenceRelationChecker(nlp, nlp_ent=nlp_ent)
relClassifier = SentenceRelationClassifier()
if base == "TRAIN":
scaiFile = "miRNA_train_fixed.xml"
relsFile = "scai_train_rels.tsv"
elif base == "TEST":
scaiFile = "miRNA_test_fixed.xml"
relsFile = "scai_test_rels.tsv"
relClasses = loadRelations(relsFile)
check2results = {}
checkSubsets = [x for x in all_subsets(relClassifier.major_checks)]
checkSubsets = [checkSubsets[-1]]
print("Will test {} subsets.".format(len(checkSubsets)))
for subset_checks in checkSubsets:
print("Testing checks", subset_checks)
#relClassifier.active_checks = subset_checks
#allStems, allRels, stem2class = loadStems()
with open(os.path.join(scaiBase, scaiFile), 'r') as fin:
tree = etree.parse(fin)
root = tree.getroot()
scaiPairs = []
totalChecks = 0
correctIdentified = 0
incorrectIdentified = 0
incorrectClassified = 0
correctClassified = 0
totalClassified = 0
correctInteractClassified = 0
totalValidClassified = 0
totalClassifiable = 0
totalMirGeneDownClassified = 0
errorByDetect = Counter()
classifiedByDetect = Counter()
elemCaseCounter = Counter()
incorrectClass = Counter()
classifyTrue = []
classifyPred = []
for elem in root.findall(".//document"):
pmid = elem.attrib['origId']
for sentElem in elem:
allEntities = sentElem.findall(".//entity")
allPairs = sentElem.findall(".//pair")
sentText = sentElem.attrib["text"]
entId2elem = {}
for entity in allEntities:
entId = entity.attrib['id']
entText = entity.attrib['text']
entType = entity.attrib['type']
entOffset = tuple([int(x) for x in entity.attrib['charOffset'].split("-")])
if entType in ["Specific_miRNAs", "Genes/Proteins"]:
if "Genes" in entType:
if entText in normGeneSymbols:
entText = normGeneSymbols[entText]
elif entText.upper() in normGeneSymbols:
gene = normGeneSymbols[entText.upper()]
else:
try:
entText = miRNA(entText).getStringFromParts([miRNAPART.MATURE, miRNAPART.ID, miRNAPART.PRECURSOR])
except:
pass
entTuple = (entText, entType, (entOffset[0], entOffset[1]+1))
entId2elem[entId] = entTuple
sentEntText = sentText[entTuple[2][0]:entTuple[2][1]]
for pair in allPairs:
validInteraction = pair.attrib['interaction'].lower() == "true"
pairE1 = pair.attrib['e1']
pairE2 = pair.attrib['e2']
#if pairInt == 'true':
if pairE1 in entId2elem and pairE2 in entId2elem:
totalChecks += 1
e1 = entId2elem[pairE1]
e2 = entId2elem[pairE2]
if not e1[1] in ["Specific_miRNAs"]:
tmp=e1
e1=e2
e2=tmp
relRes = relChecker.check_sentence(sentText
, {"entity_type": "mirna", "entity_type_token": "e1", "entity_location": e1[2]}
, {"entity_type": "gene", "entity_type_token": "e2", "entity_location": e2[2]}
, fix_special_chars=False
, relClassifier=relClassifier.classify, verbose=False
)
fullsentence = relRes['full_sentence']
acceptInteraction = relRes['accept_relation']
if not acceptInteraction == validInteraction:
incorrectIdentified += 1
"""
relResV=relChecker.check_sentence(sentText
, {"entity_type": "mirna", "entity_type_token": "e1", "entity_location": e1[2]}
, {"entity_type": "gene", "entity_type_token": "e2", "entity_location": e2[2]}
, fix_special_chars=False
, relClassifier=relClassifier.classify, verbose=True
)
print(relResV)
"""
else:
correctIdentified += 1
elemCaseCounter[(validInteraction, acceptInteraction)]+=1
totalClassified += 1
if validInteraction:# and acceptInteraction:
#print(relRes["check_results"])
totalValidClassified += 1
foundClasses = relRes["check_results"]["classification"] #{'regulation_dir': 'NEU', 'interaction_dir': 'MIR_GENE', 'regulation_keys': set()}
foundTuple = (foundClasses["interaction_dir"], foundClasses["regulation_dir"])
relationID = pair.attrib["id"]
testTuple = relClasses[relationID]
classifyTrue.append((testTuple[0], testTuple[1]))
classifyPred.append((foundTuple[0], foundTuple[1]))
if testTuple[0] == "MIR_GENE" and testTuple[1] == "DOWN":
totalMirGeneDownClassified += 1
classifiedByDetect[foundClasses["reg_detect"]] += 1
if testTuple[0] != foundTuple[0] or testTuple[1] != foundTuple[1]:
#print(sentText)
#print(e1, e2)
#print(foundTuple, foundClasses["reg_detect"], "passive?", foundClasses["passive"])
#print(testTuple)
print()
print()
print()
print(relationID)
print("IS: ", foundTuple)
print("SB: ", testTuple)
print(relRes)
relResV=relChecker.check_sentence(sentText
, {"entity_type": "mirna", "entity_type_token": "e1", "entity_location": e1[2]}
, {"entity_type": "gene", "entity_type_token": "e2", "entity_location": e2[2]}
, fix_special_chars=False
, relClassifier=relClassifier.classify, verbose=True
)
print(relResV)
print()
print()
print()
errorByDetect[foundClasses["reg_detect"]] += 1
incorrectClassified += 1
#numelems -= 1
#if numelems == 0:
# return
else:
if foundClasses["reg_detect_major"] == "return":
print(foundClasses)
print(sentText)
print(e1, e2)
print(foundTuple, foundClasses["reg_detect"], "passive?", foundClasses["passive"])
print(testTuple)
print()
print()
print()
correctClassified += 1
if acceptInteraction:
correctInteractClassified += 1
def printStats(outfile):
print("Total: ", totalChecks, file=outfile)
print("Correct: ", correctIdentified, correctIdentified/totalChecks, file=outfile)
print("Incorrect: ", incorrectIdentified, incorrectIdentified/totalChecks, file=outfile)
print("classes", incorrectClass, file=outfile)
precision = elemCaseCounter[(True, True)] / (elemCaseCounter[(True, True)]+elemCaseCounter[(True, False)])
recall = elemCaseCounter[(True, True)] / (elemCaseCounter[(True, True)]+elemCaseCounter[(False, True)])
f1 = 2* precision * recall / (precision+recall)
specificity = elemCaseCounter[(False, False)] / (elemCaseCounter[(True, False)] + elemCaseCounter[(False, False)])
print("precision", precision, file=outfile)
print("recall", recall, file=outfile)
print("specificity", specificity, file=outfile)
print("f1", f1, file=outfile)
print("Correct classified: ", correctClassified, correctClassified/totalClassified, correctClassified/totalValidClassified,file=outfile)
print("Incorrect classified: ", incorrectClassified, incorrectClassified/totalClassified, incorrectClassified/totalValidClassified,file=outfile)
print("Random classified: ", totalMirGeneDownClassified, totalMirGeneDownClassified / totalClassified, totalMirGeneDownClassified/totalValidClassified, file=outfile)
print(errorByDetect)
print("Correct interaction&classified: ", correctInteractClassified, correctInteractClassified/totalClassified, correctInteractClassified/totalValidClassified,file=outfile)
print("Classified by: ", classifiedByDetect)
lClassifyTrue = ["_".join(x) for x in classifyTrue]
lClassifyPred = ["_".join(x) for x in classifyPred]
allLabels = sorted(set(lClassifyTrue+lClassifyPred))
matrix = multilabel_confusion_matrix(lClassifyTrue, lClassifyPred, labels=allLabels)
print(matrix)
print(classification_report(lClassifyTrue,lClassifyPred))
return classification_report(lClassifyTrue, lClassifyPred, output_dict=True)
rep = printStats(sys.stdout)
check2results[subset_checks] = rep
#printStats(sys.stderr)
return check2results
# +
# %aimport textmining.MirGeneRelCheck
relClassifier = SentenceRelationClassifier()
test_results = runCheck(-1, "TEST")
# -
# +
# %aimport textmining.MirGeneRelCheck
relClassifier = SentenceRelationClassifier()
test_results = runCheck(-1, "TEST")
# -
# +
# %aimport textmining.MirGeneRelCheck
relClassifier = SentenceRelationClassifier()
test_results = runCheck(-1, "TEST")
# -
analyse(
#("We also found that miR-326 promoted T(H)-17 differentiation by targeting Ets-1, a negative regulator of T(H)-17 differentiation.", 4, 10)
#("In CGNPs, the Shh effector N-myc, but not Gli1, induced miR-17/92 expression.", 4, 10),
#("miR-23a directly bound the 3' UTR of XIAP, and miR-23a inhibition led to an increase in XIAP mRNA in vitro, demonstrating that XIAP is a previously uncharacterized target for miR-23a.", 11, 18),
#("Downregulation of Pdcd4 by mir-21 facilitates glioblastoma proliferation in vivo.", 4,2),
("We show that SRF and myocardin regulate a cardiovascular-specific microRNA (miRNA) cluster encoding miR-143 and miR-145. ", 3, 15),
100
)
# +
import pickle
with open("scai_test_f1.pickle", "wb") as fout:
pickle.dump(test_results, fout)
# -
# +
# %aimport textmining.MirGeneRelCheck
relClassifier = SentenceRelationClassifier()
runCheck(-1, "TRAIN")
# +
def getTokenDist(tk1, tk2):
return abs(tk1.i-tk2.i)
def countStems(tokens, geneword, mirword, keepNEU=False):
stemTypes2Count = Counter()
mirgene2stem = defaultdict(lambda: Counter())
for tidx,x in enumerate(tokens):
tknStem, matchStem, wasFound = getstem(x)
if wasFound and (tknStem != "NEU" or keepNEU):
if tidx >= len(tokens)-2 and str(tokens[tidx-1]) == "to":
continue
geneDist = getTokenDist(x, geneword)
mirDist = getTokenDist(x, mirword)
if geneDist < mirDist and geneDist < 3:
mirgene2stem["gene"][tknStem] += 1
elif mirDist < geneDist and mirDist < 3:
mirgene2stem["mir"][tknStem] += 1
if wasFound and (tknStem != "NEU" or keepNEU):
stemTypes2Count[tknStem] += 1
if len(stemTypes2Count) == 0:
if "regulated" in str(geneword):
stemTypes2Count["NEU"] += 1
return stemTypes2Count,mirgene2stem
def getRelsForTokens(tokens, desc,verbose):
rels = []
verbSet = set(["VERB"])
for x in tokens:
tokenStemClass, s, _ = getstem(x)
rels.append((s, None, tokenStemClass, x.pos_, x))
if verbose:
print(desc, x, s, tokenStemClass, x.pos_, x)
hasVerb = len(list(set([x[3] for x in rels]) & verbSet)) > 0
if hasVerb:
rels = [x for x in rels if x[3] in verbSet]
return rels
def evalCounts(dirReg, mirPos, mirNeg, genePos, geneNeg):
invcor = False
regdir = None
#if dirReg == 0:
if (mirPos > 0 and geneNeg > 0) or (genePos > 0 and mirNeg > 0):
return "DOWN", True
#else:
if ((mirPos > 0 and genePos > 0) and(mirNeg == 0 and geneNeg == 0)) or ((mirNeg > 0 and geneNeg >0) and(mirPos == 0 and genePos == 0)):
return "UP", False
return None, None
def scoreWord(tkn):
jtkstem, s, _ = getstem(tkn)
if jtkstem == None:
if jtk.startswith(("loss", "decreas", "low")):
return 0,1
elif jtk.startswith(("increas", "high", "posit")):
return 1,0
else:
if jtkstem == "POS":
return 1,0
elif jtkstem == "NEG":
return 0,1
return 0,0
def getTextBetween(tks, lword, rword):
tokenBetween = [x for x in tks if lword.i-3 < x.i < rword.i+3]
lit = lword
while lit.pos_ in ["nmod", "conj"]:
if not lit in tokenBetween:
tokenBetween.append(lit)
lit = lit.head
rit = rword
while rit.pos_ in ["nmod", "conj"]:
if not rit in tokenBetween:
tokenBetween.append(rit)
rit = rit.head
tokenBetween = list(sorted(tokenBetween, key=lambda x: x.i))
tokenBetweenStrict = list(sorted([x for x in tks if lword.i < x.i < rword.i], key=lambda x: x.i))
textBetween = ' '.join(" ".join([str(x) for x in tokenBetween]).split())
textAfter = ' '.join(" ".join([str(x) for x in [y for y in rword.doc if rword.i <= y.i < rword.i+5]]).split())
textBefore = ' '.join(" ".join([str(x) for x in [y for y in rword.doc if lword.i-5 <= y.i < lword.i]]).split())
return tokenBetween, tokenBetweenStrict, textBetween, textBefore, textAfter
dir2opp = {"MIR_GENE": "GENE_MIR", "GENE_MIR": "MIR_GENE"}
idir2opp = {"POS": "NEG", "NEG": "POS"}
rdir2opp = {"DOWN": "UP", "UP": "DOWN"}
def testRelClassifier2(doc, mirword, geneword, relchecker, verbose):
isPassive = relchecker.checkPassive(doc, mirword, geneword, verbose=False)
compartment = relchecker.getCompartment(doc, mirword, geneword, verbose=False)
if verbose:
print("passive?", isPassive)
sdpRes = relchecker._get_sdp_path(doc, mirword.i, geneword.i)
stemTypes2Count = Counter()
for x in sdpRes:
tknStem, matchStem, _ = getstem(x[0])
if tknStem != "NEU":
stemTypes2Count[tknStem] += 1
print(stemTypes2Count)
if len(stemTypes2Count) > 0:
regStem = stemTypes2Count.most_common(1)[0][0]
if regStem == "NEG":
regStem = "DOWN"
elif regStem == "POS":
regStem = "UP"
if stemTypes2Count["POS"] == stemTypes2Count["NEG"]:
regStem = "DOWN"
idir = "MIR_GENE"
if isPassive and mirword.i < geneword.i:
idir = "GENE_MIR"
#return {"regulation_dir": regStem, "interaction_dir": idir, "passive": isPassive, "reg_detect": "sdp counts"}
targetWords = ["targets of", "target of", "regulator of", "targeting", "direct regulator"]
if compartment != None:
tokenBetween = compartment
textBetween = ' '.join(" ".join([str(x) for x in tokenBetween]).split())
if geneword.i < mirword.i:
if any([x in textBetween for x in ["positive correlation"]]):
return {"regulation_dir": "UP", "interaction_dir": "GENE_MIR", "passive": isPassive, "reg_detect": "compartment gene mir pos corr"}
if any([x in textBetween for x in ["inverse correlation", "inversely correlated", "inversely regulated"]]):
return {"regulation_dir": "DOWN", "interaction_dir": "GENE_MIR", "passive": isPassive, "reg_detect": "compartment gene mir neg corr"}
if any([x in textBetween for x in targetWords]):
return {"regulation_dir": "NEU", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "compartment gene mir"}
elif mirword.i < geneword.i:
if any([x in textBetween for x in ["positive correlation"]]):
return {"regulation_dir": "UP", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "compartment mir gene pos corr"}
if any([x in textBetween for x in ["inverse correlation", "inversely correlated", "inversely regulated"]]):
return {"regulation_dir": "DOWN", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "compartment mir gene neg corr"}
if any([x in textBetween for x in targetWords]):
return {"regulation_dir": "NEU", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "compartment mir gene"}
if True:
tokSrc = compartment if compartment != None else doc
if geneword.i < mirword.i:
tokenBetween, tokenBetweenStrict, textBetween, textBefore, textAfter = getTextBetween(tokSrc, geneword, mirword)
print(textBetween)
if any([x in textBetween for x in ["positive correlation"]]):
return {"regulation_dir": "UP", "interaction_dir": "GENE_MIR", "passive": isPassive, "reg_detect": "between gene mir pos corr"}
if any([x in textBetween for x in ["inverse correlation", "inversely correlated", "inversely regulated"]]):
return {"regulation_dir": "DOWN", "interaction_dir": "GENE_MIR", "passive": isPassive, "reg_detect": "between gene mir neg corr"}
if any([x in textBetween for x in ["negative regulation of"]]):
return {"regulation_dir": "UP", "interaction_dir": "GENE_MIR", "passive": isPassive, "reg_detect": "between gene mir reg corr"}
if any([x in textBefore for x in ["binding site", "binding sites", "binding to", "binding directly to"]]):
return {"regulation_dir": "NEU", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between gene mir sites"}
if any([x in textBetween for x in targetWords]) or any([x in textBetween for x in ["target", "targeted by", "binding efficiency", "binding site for", "recognizing"]]):
return {"regulation_dir": "NEU", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between gene mir"}
#stemTypes2Count = Counter()
#for x in tokenBetween:
# tknStem, matchStem = getstem(x)
# if tknStem != "NEU":
# stemTypes2Count[tknStem] += 1
if False and len(stemTypes2Count) > 0:
regStem = stemTypes2Count.most_common(1)[0][0]
if regStem == "NEG":
regStem = "DOWN"
elif regStem == "POS":
regStem = "UP"
if stemTypes2Count["POS"] == stemTypes2Count["NEG"]:
regStem = "DOWN"
idir = "MIR_GENE"
if isPassive and geneword.i < mirword.i:
idir = "GENE_MIR"
return {"regulation_dir": regStem, "interaction_dir": idir, "passive": isPassive, "reg_detect": "counts between gene mir"}
else:
#mirword.i < geneword.i
tokenBetween, tokenBetweenStrict, textBetween, textBefore, textAfter = getTextBetween(tokSrc, mirword, geneword)
print(textBetween)
if any([x in textBetween for x in ["positive correlation"]]) and stemTypes2Count.get("NEG", 0) == 0:
return {"regulation_dir": "UP", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between mir gene pos corr"}
if any([x in textBetween for x in ["negative regulation of"]]):
return {"regulation_dir": "UP", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between mir gene reg corr"}
if any([x in textBetween for x in ["inverse correlation", "inversely correlated", "inversely regulated"]]):
return {"regulation_dir": "DOWN", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between mir gene neg corr"}
if any([x in textBetween for x in ["directly inhibits", "directly represses", "translational inhibition" ]]):
return {"regulation_dir": "DOWN", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between mir gene"}
if any([x in textBetween for x in targetWords + ["directly regulated", "its target gene", "binding site"]]):
return {"regulation_dir": "NEU", "interaction_dir": "MIR_GENE", "passive": isPassive, "reg_detect": "between mir gene"}
if any([x in textBetween for x in ["negatively regulated by", "negative regulation of"]]):
return {"regulation_dir": "DOWN", "interaction_dir": "GENE_MIR", "passive": isPassive, "reg_detect": "between mir gene nrb"}
if len(tokenBetween) > 0:
stemTypes2Count,mirgene2stem = countStems(tokenBetween, geneword, mirword)
if len(stemTypes2Count) > 0:
regStem = stemTypes2Count.most_common(1)[0][0]
if len(stemTypes2Count) >= 2 and len(mirgene2stem["gene"]) >0 and len(mirgene2stem["mir"])>0 and mirgene2stem["gene"].most_common(1)[0][0] == idir2opp[mirgene2stem["mir"].most_common(1)[0][0]]:
print(mirgene2stem)
if geneword.i < mirword.i:
if mirgene2stem["gene"].most_common(1)[0][0] == "POS":
regDir = "MIR_GENE"
regStem = "DOWN"
else:
regDir = "MIR_GENE"
regStem = "DOWN"
else:
if mirgene2stem["mir"].most_common(1)[0][0] == "NEG":
regDir = "MIR_GENE"
regStem = "DOWN"
else:
regDir = "MIR_GENE"
regStem = "DOWN"
if "after" in textBetween:
regDir = dir2opp[regDir]
if isPassive:
regDir = dir2opp[regDir]
regStem = rdir2opp[regStem]
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "counts opp"}
else:
stemTypes2Count,mirgene2stem = countStems(tokenBetween, geneword, mirword, keepNEU=True)
print("counts between 1", stemTypes2Count)
regStem = stemTypes2Count.most_common(1)[0][0]
if stemTypes2Count["POS"] == stemTypes2Count["NEG"] == 1 and stemTypes2Count["NEU"] <= 3:
regDir = "MIR_GENE"
regStem = "DOWN"
if any(x in textBetween for x in ["after"]):
regDir = dir2opp[regDir]
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "counts between equal"}
if stemTypes2Count["POS"] == 2 and stemTypes2Count["NEG"] == 1 and stemTypes2Count["NEU"] <= 2:
regDir = "MIR_GENE"
regStem = "DOWN"
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "counts between alternating"}
if regStem == "NEU":
if stemTypes2Count["POS"] > 0 and stemTypes2Count["POS"] > stemTypes2Count["NEG"]:
regStem = "POS"
elif stemTypes2Count["NEG"] > 0 and stemTypes2Count["NEG"] > stemTypes2Count["POS"]:
regStem = "NEG"
#regStem = "NEU"
if mirword.i < geneword.i:
regDir = "MIR_GENE"
else:
regDir = "GENE_MIR"
if mirword.i < geneword.i and "-stressed" in textBetween:
regDir = "GENE_MIR"
if mirword.i < geneword.i and str(doc[geneword.i-1]) in ["by", "with"]:
regDir = "GENE_MIR"
stemTypes2Count,mirgene2stem = countStems(tokenBetweenStrict, geneword, mirword)
if len(stemTypes2Count) > 0:
regStem = stemTypes2Count.most_common(1)[0][0]
#else:
# regStem = "NEU"
print("counts between 2", stemTypes2Count, regStem)
elif geneword.i < mirword.i and any([str(doc[mirword.i-j]) in ["by", "with"] for j in [1,2]]):
regDir = "MIR_GENE"
stemTypes2Count,mirgene2stem = countStems(tokenBetweenStrict, geneword, mirword)
if len(stemTypes2Count) > 0:
regStem = stemTypes2Count.most_common(1)[0][0]
#else:
# regStem = "NEU"
print("counts between 2", stemTypes2Count, regStem)
elif isPassive and geneword.i < mirword.i:
regDir = dir2opp[regDir]
if stemTypes2Count["POS"] > 0 and stemTypes2Count["POS"] == stemTypes2Count["NEG"]:
regStem = "NEU"
#regStem = idir2opp[regStem]
if geneword.i < mirword.i:
if str(geneword).endswith("-regulating"):
regStem = "NEU"
if mirword.i < geneword.i and "after the addition" in textBetween:
regStem = "DOWN"
regDir = "GENE_MIR"
if regStem == "NEG":
regStem = "DOWN"
elif regStem == "POS":
regStem = "UP"
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "counts between"}
regDir = "MIR_GENE"
regStem = "NEU"
regeluateWords = ["mediated",]
if mirword.i < geneword.i and "mediated" in str(mirword):
regStem = "DOWN"
if geneword.i < mirword.i and ("mediated" in str(mirword) or any([str(doc[mirword.i-j]) in ["mediated",] for j in [1]])):
regStem = "NEU"
regDir = "MIR_GENE"
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "return mediated"}
if mirword.i<geneword.i and any([str(doc[geneword.i-j]) in ["by", "with"] for j in [1,2,3]]):
regStem = "NEU"
regDir = "MIR_GENE"
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "return m g by"}
if geneword.i < mirword.i:
regStem = "NEU"
regDir = "GENE_MIR"
if geneword.i < mirword.i and any([str(doc[mirword.i-j]) in ["by", "with"] for j in [1,2,3]]):
regStem = "NEU"
regDir = "MIR_GENE"
if mirword.i < geneword.i and str(doc[mirword.i+1]) == "target":
regStem = "NEU"
regDir = "MIR_GENE"
#if mirword.i < geneword.i:
# regDir = "MIR_GENE"
#else:
# regDir = "GENE_MIR"
#if isPassive:
# if mirword.i < geneword.i:
# regDir = "GENE_MIR"
# else:
# regDir = "MIR_GENE"
return {"regulation_dir": regStem, "interaction_dir": regDir, "passive": isPassive, "reg_detect": "return"}
# -
def getstem(tkn):
tknstr = str(tkn).lower()
for sidx, s in enumerate(stem2class):
if tknstr.startswith(s):
if tknstr in ["enhancer","inhibitor"]:
continue
tokenStemClass = stem2class[s]
if tokenStemClass == "NEU":
for i in range(1,2):
if tkn.i-i<0:
continue
#tknPre = str(tkn.doc[tkn.i-i])
#if tknPre.startswith(("high", "elevat", "increas")):
# tokenStemClass = "POS"
#if tknPre.startswith(("low", "decreas")):
# tokenStemClass = "NEG"
return tokenStemClass, s, True
elif tknstr.endswith(("-stressed")):
return "POS", s, True
return "NEU", str(tkn), False
# +
allStems, allRels, stem2class = loadStems()
print(stem2class["underexpress"])
print("underexpression".startswith("underexpress"))
doct = nlp("expressed , miR-34a , was studied further because sequence analysis suggested a likely interaction with the 3'-untranslated region of bcl2 mRNA")
toks = [x for x in doct]
for tok in toks:
print(tok, getstem(tok))
countStems(toks, toks[0], toks[-1])
# +
# %aimport textmining.MirGeneRelCheck
relClassifier = SentenceRelationClassifier()
runCheck(-1, "TRAIN")
# +
# %aimport textmining.MirGeneRelCheck
relClassifier = SentenceRelationClassifier()
runCheck(-1, "TEST")
# -
# +
def testRelClassifier(doc, mirword, geneword, relchecker, verbose):
if verbose:
print()
print()
print()
print("Rel Classifier")
print(doc.text_with_ws)
foundStems = set()
sdpRes = relchecker._get_sdp_path(doc, mirword.i, geneword.i)
if verbose:
for x in sdpRes:
print(x)
sdpTokens = [x[0] for x in sdpRes]
#print(sdpRes)
sdpVerbRels = set()
for sidx, x in enumerate(sdpRes):
if x[1] == "VERB":
for s in allStems:
if s in str(x[0]):
sdpVerbRels.add((s, sidx, stem2class[s]))
if verbose:
print("sdpVerbRels", x[0], s, sidx, stem2class[s])
docRels = set()
for tkn in doc:
if tkn in sdpTokens:
continue
for s in allStems:
if str(tkn).lower().startswith(s):
docRels.add((s, sidx, stem2class[s]))
if verbose:
print("docRels", tkn, s, sidx, stem2class[s])
mirAncests = [x for x in mirword.ancestors]
geneAncests = [x for x in geneword.ancestors if not x in mirAncests]
if len(mirAncests) > 1:
useMirAncests = mirAncests[0:1]
else:
useMirAncests = mirAncests
if len(geneAncests) > 1:
useGeneAncests = geneAncests[0:1]
else:
useGeneAncests = geneAncests
useGeneAncests = [x for x in useGeneAncests if not x in useMirAncests]
if verbose:
print("mirc", mirAncests)
print("genec", geneAncests)
invCor = False
regdir = "NEU"
intdir = "MIR_GENE"
isPassive = relchecker.checkPassive(doc, mirword, geneword, verbose=False)
if verbose:
print("passive?", isPassive)
ancestHandled = False
#if len(mirAncests) > 0 and len(geneAncests) > 0:
if True:
mirRels = []
geneRels = []
mirRels = getRelsForTokens(useMirAncests, "ancestRels mir", verbose)
neuMirs = [x for x in mirRels if x[2] in ["NEU"]]
#if len(neuMirs) > 0 or len(mirRels) == 0:
# mirRels = getRelsForTokens(mirAncests, "ancestRels all mir", verbose)
geneRels = getRelsForTokens(useGeneAncests, "ancestRels gene", verbose)
neuGenes = [x for x in geneRels if x[2] in ["NEU"]]
if False and ((len(neuGenes) == len(geneRels)) or len(geneRels) == 0):
geneRels = getRelsForTokens([x for x in geneAncests if not x in useMirAncests], "ancestRels all gene", verbose)
neuGenes = [x for x in geneRels if x[2] in ["NEU"]]
if (len(neuGenes) == len(geneRels)) or len(geneRels) == 0:
if mirword.i < geneword.i:
tks = [x for x in doc if geneword.i -8 < x.i < geneword.i if not x in useMirAncests]
else:
tks = [x for x in doc if geneword.i < x.i < geneword.i+5 if not x in useMirAncests]
geneRels = getRelsForTokens(tks, "ancestRels tks gene", verbose)
posRegs = [x for x in mirRels+geneRels if x[2] in ["POS"]]
negRegs = [x for x in mirRels+geneRels if x[2] in ["NEG"]]
mirPos = len([x for x in mirRels if x[2] in ["POS"]])
mirNeu = [x for x in mirRels if x[2] in ["NEU"]]
mirNeg = len([x for x in mirRels if x[2] in ["NEG"]])
genePos = len([x for x in geneRels if x[2] in ["POS"]])
geneNeu = [x for x in geneRels if x[2] in ["NEU"]]
geneNeg = len([x for x in geneRels if x[2] in ["NEG"]])
dirReg = (mirPos - geneNeg) + (genePos-mirNeg)
invCor = False
if verbose:
print(mirword, geneword)
print("ancestRel counts", mirPos, mirNeg, genePos, geneNeg)
nregdir, ninvcor = evalCounts(dirReg, mirPos, mirNeg, genePos, geneNeg)
if verbose:
print("evalcount")
print(nregdir, ninvcor)
if nregdir != None:
regdir = nregdir
invcor = ninvcor
ancestHandled = True
if verbose:
print("accepted by evalcount")
else:
lookBehind = 8
if len(mirNeu) > 0 or len(geneNeu) > 0:
if verbose:
print("ancestRel neu part")
if mirword.i < geneword.i:
for x in mirNeu:
if x[0].startswith(("correlat", "affect")):
for j in range(1,lookBehind):
if str(doc[x[4].i-j]).startswith(("posit", "high")):
mirPos += 1
elif str(doc[x[4].i-j]).startswith(("negat", "low")):
mirNeg += 1
else:
for x in geneNeu:
if x[0].startswith(("correlat")):
for j in range(1,lookBehind):
if str(doc[x[4].i-j]).startswith(("posit", "high")):
genePos += 1
elif str(doc[x[4].i-j]).startswith(("negat", "low")):
geneNeg += 1
if not ancestHandled:
if verbose:
print("ancestRel counts stem check", mirPos, mirNeg, genePos, geneNeg)
nregdir, ninvcor = evalCounts(dirReg, mirPos, mirNeg, genePos, geneNeg)
if nregdir != None:
regdir = nregdir
invcor = ninvcor
ancestHandled = True
if verbose:
print("accept after before stem check")
if not ancestHandled:
for j in range(1,lookBehind):
if mirword.i-j < 0:
continue
jtk = doc[mirword.i-j]
if verbose:
print("word check mir", j, jtk)
pos, neg = scoreWord(jtk)
mirPos += pos
mirNeg += neg
for j in range(1,lookBehind):
if geneword.i-j < 0:
continue
jtk = doc[geneword.i-j]
if verbose:
print("word check gene", j, jtk)
pos, neg = scoreWord(jtk)
genePos += pos
geneNeg += neg
if verbose:
print("ancestRel counts word check", mirPos, mirNeg, genePos, geneNeg)
nregdir, ninvcor = evalCounts(dirReg, mirPos, mirNeg, genePos, geneNeg)
if nregdir != None:
regdir = nregdir
invcor = ninvcor
ancestHandled = True
if verbose:
print("accept after before word check")
if verbose:
print("ancestRels", dirReg, invCor)
if ancestHandled:
if isPassive:
intdir = "GENE_MIR"
if not ancestHandled:
if verbose:
print("ancestHandled part")
if geneword.i < mirword.i:
lword = geneword
rword = mirword
useTks = [x for x in doc if geneword.i < x.i < mirword.i]
else:
lword = mirword
rword = geneword
useTks = [x for x in doc if mirword.i < x.i < geneword.i]
if verbose:
print(useTks)
betRels = getRelsForTokens(useTks, "ancestRels useTks", verbose)
if verbose:
print(betRels)
betPos = len([x for x in betRels if x[2] in ["POS"]])
betNeg = len([x for x in betRels if x[2] in ["NEG"]])
if betPos + betNeg == 1:
ancestHandled = True
if verbose:
print("ancest bet handled", mirword.i, geneword.i, isPassive)
if geneword.i < mirword.i:
if isPassive:
intdir = "MIR_GENE"
regdir = "DOWN" if betNeg > 0 else "UP"
else:
intdir = "GENE_MIR"
regdir = "UP" if betPos > 0 else "DOWN"
else:
if isPassive:
intdir = "GENE_MIR"
regdir = "UP" if betPos > 0 else "DOWN"
else:
intdir = "MIR_GENE"
regdir = "UP" if betPos > 0 else "DOWN"
else:
isMediated = False
for x in [lword] + useTks[0:2]:
for y in ["mediated", "-mediated"]:
if y in str(x):
isMediated = True
break
if isMediated:
break
if isMediated:
ancestHandled = True
if verbose:
print("ancest mediated handled")
if geneword.i < mirword.i:
intdir = "GENE_MIR"
else:
intdir = "MIR_GENE"
if betPos > betNeg:
regdir = "UP"
elif betNeg > betPos:
regdir = "DOWN"
#classCounter = Counter()
#if len(classCounter) > 0:
# regdir = classCounter.most_common(1)[0][0]
#else:
# regdir = "NEU"
#if invCor:
# regdir = "DOWN"
if not ancestHandled:
if verbose:
print("in final resolution")
if geneword.i < mirword.i:
textBetween = " ".join([str(x) for x in doc if geneword.i < x.i < mirword.i])
textBetween = ' '.join(textBetween.split())
if verbose:
print(textBetween)
if len(textBetween) < 10 and any([x in textBetween for x in ["by"]]):
sdpRels = set()
for sidx,x in enumerate(sdpRes):
for s in allStems:
if str(x[0]).lower().startswith(s):
sdpRels.add((s, sidx, stem2class[s]))
if verbose:
print("sdpRels", x[0], s, sidx, stem2class[s])
posRegs = [x for x in sdpRels if x[2] in ["POS"]]
negRegs = [x for x in sdpRels if x[2] in ["NEG"]]
dirReg = len(posRegs)-len(negRegs)
if dirReg > 0:
regdir = "UP"
elif dirReg < 0:
regdir = "DOWN"
else:
regdir = "NEU"
intdir = "MIR_GENE"
elif any([x in textBetween for x in ["target of", "target for", "targeting"]]):
intdir = "MIR_GENE"
regdir = "NEU"
else:
intdir = "GENE_MIR"
elif mirword.i < geneword.i:
textBetween = " ".join([str(x) for x in doc if geneword.i-5 < x.i < geneword.i])
if any([x in textBetween for x in ["by"]]):
intdir = "GENE_MIR"
regdir = "UP"
regstems = foundStems
#displacy.render(doc, style="dep")
if verbose:
print((intdir, regdir, "NA", isPassive))
return {"regulation_dir": regdir, "interaction_dir": intdir, "regulation_keys": regstems}
| python/nbs/TestClassifications.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Learning Final Assignment – <NAME>
#
# To properly run this notebook, the data has to be downloaded from https://drive.google.com/file/d/1G_Exgw9WXI6swzGQEyzueOQvB_01mUJt/view to be in the correct form and folder structure
# +
## Preamble
import numpy as np
from scipy.io import loadmat
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import h5py
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
from scipy.signal import decimate
import os
from os import listdir
from os.path import isfile, join
tf.random.set_seed(123)
np.random.seed(123)
def get_dataset_name(file_name_with_dir): # Function used to load files
filename_without_dir = file_name_with_dir.split('/')[-1]
temp = filename_without_dir.split('_')[:-1]
dataset_name = "_".join(temp)
return dataset_name
def load_intra():
## Train data
x = []
y = []
paths = ["Final_Project_DL/Intra/train/rest_105923_%.0f.h5","Final_Project_DL/Intra/train/task_motor_105923_%.0f.h5",
"Final_Project_DL/Intra/train/task_story_math_105923_%.0f.h5",
"Final_Project_DL/Intra/train/task_working_memory_105923_%.0f.h5"]
for path in paths:
for k in range(1,9):
filename_path= path %k
with h5py.File(filename_path,'r') as f:
dataset_name = get_dataset_name(filename_path)
matrix = f.get(dataset_name)[()]
x.append(matrix) #create x frame
# transfrom class to number code
if dataset_name.split("_")[-2] == 'rest':
Class = 0
elif dataset_name.split("_")[-2] == 'motor':
Class = 1
elif dataset_name.split("_")[-2] == 'math':
Class = 2
elif dataset_name.split("_")[-2] == 'memory':
Class = 3
else: print('Error in adding classes')
y.append([int(dataset_name.split("_")[-1]),Class])
intra_train_x = np.array(x)
intra_train_y = np.array(y)
del x,y
## Test data
x = []
y = []
paths = ["Final_Project_DL/Intra/test/rest_105923_%.0f.h5","Final_Project_DL/Intra/test/task_motor_105923_%.0f.h5",
"Final_Project_DL/Intra/test/task_story_math_105923_%.0f.h5",
"Final_Project_DL/Intra/test/task_working_memory_105923_%.0f.h5"]
for path in paths:
for k in range(9,11):
filename_path= path %k
with h5py.File(filename_path,'r') as f:
dataset_name = get_dataset_name(filename_path)
matrix = f.get(dataset_name)[()]
x.append(matrix) #create x frame
# transfrom class to number code
if dataset_name.split("_")[-2] == 'rest':
Class = 0
elif dataset_name.split("_")[-2] == 'motor':
Class = 1
elif dataset_name.split("_")[-2] == 'math':
Class = 2
elif dataset_name.split("_")[-2] == 'memory':
Class = 3
else: print('Error in adding classes')
y.append([int(dataset_name.split("_")[-1]),Class])
intra_test_x = np.array(x)
intra_test_y = np.array(y)
del x,y
print("Intra: ",intra_train_x.shape, intra_train_y.shape, intra_test_x.shape, intra_test_y.shape)
return intra_train_x, intra_train_y, intra_test_x, intra_test_y
def path_list(file_path):
train_paths = []
d = file_path
for path in os.listdir(d):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
train_paths.append(full_path)
return train_paths
def concatenate_data(data_path):
data = []
labels = []
for path in data_path:
with h5py.File(path,'r') as f:
dataset_name = get_dataset_name(path)
matrix = f.get(dataset_name)[()]
data.append(matrix)
if dataset_name.split("_")[-2] == 'rest':
Class = 0
elif dataset_name.split("_")[-2] == 'motor':
Class = 1
elif dataset_name.split("_")[-2] == 'math':
Class = 2
elif dataset_name.split("_")[-2] == 'memory':
Class = 3
labels.append([int(dataset_name.split("_")[-1]),Class])
return np.array(data), np.array(labels)
def load_cross():
train_path = path_list('Final_Project_DL/Cross/train/')
train_data, train_data_labels = concatenate_data(train_path)
old_test1_path = sorted(path_list('Final_Project_DL/Cross/test1/'))
test1_order = [0, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
test1_path = [old_test1_path[i] for i in test1_order]
test1_data, test1_data_labels = concatenate_data(test1_path)
old_test2_path = sorted(path_list('Final_Project_DL/Cross/test2/'))
test2_order = [0, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
test2_path = [old_test2_path[i] for i in test2_order]
test2_data, test2_data_labels = concatenate_data(test2_path)
old_test3_path = sorted(path_list('Final_Project_DL/Cross/test3/'))
test3_order = [0, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
test3_path = [old_test3_path[i] for i in test3_order]
test3_data, test3_data_labels = concatenate_data(test3_path)
print(train_data.shape,train_data_labels.shape,test1_data.shape,test1_data_labels.shape,
test2_data.shape,test2_data_labels.shape,test3_data.shape,test3_data_labels.shape)
return train_data,train_data_labels,test1_data,test1_data_labels,test2_data,test2_data_labels,test3_data,test3_data_labels
def data_wrangle(train_x,train_y,DOWNSAMPLE_RATE):
train_x = decimate(train_x,DOWNSAMPLE_RATE) # decrease the sampling rate
train_labels = tf.keras.utils.to_categorical(train_y[:,1]) # make labels to 1-hot encoding
## Scaling per chunk to [0,1], so all channels are scaled using the same min and max per chunk
train_x_scaled = np.empty(train_x.shape)
for chunk in range(train_x.shape[0]):
maxval = np.amax(train_x[chunk,:,:])
minval = np.amin(train_x[chunk,:,:])
train_x_scaled[chunk,:,:] = ((train_x[chunk,:,:] - minval)/(maxval - minval))
train_x, train_labels = shuffle(train_x_scaled,train_labels,random_state = 0) # shuffle samples and labels
train_x = np.swapaxes(train_x,1,2) # change to shape, where features are last
print("Wrangle: ",train_x.shape, train_labels.shape)
return train_x, train_labels
def data_windows(x,y, window_size): #Split data into windows of certain length,
data = np.empty((1,WINDOW_SIZE,248)) # Ommit end part to have equal length
labels = np.empty((1,4))
for chunk in range(x.shape[0]):
for k in range(WINDOW_SIZE,int(np.floor(x.shape[1]/WINDOW_SIZE)*WINDOW_SIZE)+1,WINDOW_SIZE):
indices = range(k - WINDOW_SIZE,k)
sample = x[chunk,indices,:]
data = np.append(data,sample.reshape(1,WINDOW_SIZE,x.shape[2]),axis = 0)
labels = np.append(labels,y[chunk].reshape((1,4)),axis=0)
print("WINDOW: ",data[1:,:,:].shape, labels[1:,:].shape)
X, Y = shuffle(data[1:,:,:], labels[1:,:],random_state = 0)
return X, Y
#### MODEL
def train_lstm(train_x,train_labels,epochs,batch_size):
mc = tf.keras.callbacks.ModelCheckpoint('best_model_lstm.h5', monitor='val_accuracy', mode='max',verbose = 1, save_best_only=True)
model = tf.keras.models.Sequential([
#tf.keras.layers.BatchNormalization(),
tf.keras.layers.LSTM(200,input_shape=train_x.shape[-2:]),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(train_labels.shape[1], activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
history = model.fit(train_x, train_labels, epochs=epochs, batch_size=batch_size,
validation_split = 0.2,callbacks = [mc],verbose = 0)
model = tf.keras.models.load_model('best_model_lstm.h5')
print("---------------------------------------- \n")
print(model.summary())
loss = history.history['loss']
val_loss = history.history['val_loss']
accuracy_plot = history.history['accuracy']
plt.figure(figsize= (20,10))
plt.plot(range(len(loss)), loss, 'b', label='Training loss')
plt.plot(range(len(loss)), accuracy_plot, 'r', label='Accuracy')
plt.plot(range(len(loss)), val_loss, 'orange', label='Validation loss')
plt.legend()
plt.title("Loss for LSTM Model")
plt.savefig('loss_lstm.pdf',bbox_inches = 'tight')
plt.show()
return model
def evaluate_lstm(model,test_x,test_labels,batch_size):
_, accuracy = model.evaluate(test_x, test_labels, batch_size=batch_size, verbose=0)
print("---------------------------------------- \n")
predictions = tf.argmax(model.predict(test_x), axis = 1)
labels = tf.argmax(test_labels, axis = 1)
confusion_matrix = tf.math.confusion_matrix(
labels, predictions, num_classes=None, weights=None, dtype=tf.dtypes.int32, name=None)
return accuracy, confusion_matrix
def train_cnn(train_x,train_labels,epochs,batch_size,filters,kernels):
# define model
mc = tf.keras.callbacks.ModelCheckpoint('best_model_cnn.h5', monitor='val_accuracy', mode='max',verbose = 1, save_best_only=True)
model = tf.keras.models.Sequential([
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv1D(filters=filters, kernel_size=kernels, activation='relu', input_shape=train_x.shape[-2:]),
tf.keras.layers.Conv1D(filters=filters, kernel_size=kernels, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(train_labels.shape[1], activation='softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(train_x, train_labels, epochs=epochs, batch_size=batch_size,
validation_split = 0.2,callbacks = [mc],verbose = 0)
model = tf.keras.models.load_model('best_model_cnn.h5')
print("---------------------------------------- \n")
print(model.summary())
loss = history.history['loss']
val_loss = history.history['val_loss']
accuracy_plot = history.history['accuracy']
plt.figure(figsize= (20,10))
plt.plot(range(len(loss)), loss, 'b', label='Training loss')
plt.plot(range(len(loss)), accuracy_plot, 'r', label='Accuracy')
plt.plot(range(len(loss)), val_loss, 'orange', label='Validation loss')
plt.legend()
plt.title("Loss for CNN Model")
plt.savefig('loss_cnn.pdf',bbox_inches = 'tight')
plt.show()
return model
def evaluate_cnn(model,test_x,test_labels,batch_size):
# evaluate model
_, accuracy = model.evaluate(test_x, test_labels, batch_size=batch_size, verbose=0)
print("---------------------------------------- \n")
predictions = tf.argmax(model.predict(test_x), axis = 1)
labels = tf.argmax(test_labels, axis = 1)
confusion_matrix = tf.math.confusion_matrix(
labels, predictions, num_classes=None, weights=None, dtype=tf.dtypes.int32, name=None)
return accuracy,confusion_matrix
accuracy_lstm1 = None
accuracy_cnn1 = None
accuracy_cnn = None
accuracy_lstm = None
# -
##### RUN
### HYPERPARAMETER COLLECTION
DOWNSAMPLE_RATE = 10
EPOCHS_LSTM = 50
EPOCHS_CNN = 100
BATCH_SIZE = 68
WINDOW_SIZE = 200
# +
intra_train_x, intra_train_y, intra_test_x, intra_test_y = load_intra()
train_x, train_labels = data_wrangle(intra_train_x, intra_train_y,DOWNSAMPLE_RATE = DOWNSAMPLE_RATE)
test_x, test_labels = data_wrangle(intra_test_x, intra_test_y,DOWNSAMPLE_RATE = DOWNSAMPLE_RATE)
intra_train_x, intra_train_labels = data_windows(train_x,train_labels,window_size = WINDOW_SIZE)
intra_test_x, intra_test_labels = data_windows(test_x, test_labels,window_size = WINDOW_SIZE)
del intra_train_y,intra_test_y
# -
cross_train_x, cross_train_y, cross_test1_x, cross_test1_y, cross_test2_x, cross_test2_y, cross_test3_x, cross_test3_y = load_cross()
train_x, train_labels = data_wrangle(cross_train_x, cross_train_y,DOWNSAMPLE_RATE = DOWNSAMPLE_RATE)
train_x, train_labels = data_windows(train_x,train_labels,window_size = WINDOW_SIZE)
del cross_train_x, cross_train_y
# +
test_x, test_labels = data_wrangle(cross_test1_x, cross_test1_y,DOWNSAMPLE_RATE = DOWNSAMPLE_RATE)
test1_x, test1_labels = data_windows(test_x, test_labels,window_size = WINDOW_SIZE)
test_x, test_labels = data_wrangle(cross_test2_x, cross_test2_y,DOWNSAMPLE_RATE = DOWNSAMPLE_RATE)
test2_x, test2_labels = data_windows(test_x, test_labels,window_size = WINDOW_SIZE)
test_x, test_labels = data_wrangle(cross_test3_x, cross_test3_y,DOWNSAMPLE_RATE = DOWNSAMPLE_RATE)
test3_x, test3_labels = data_windows(test_x,test_labels,window_size = WINDOW_SIZE)
del test_x, test_labels
# +
print("-------------------------------------------------------------------------------- \n")
model_lstm = train_lstm(intra_train_x,intra_train_labels,epochs = EPOCHS_LSTM,batch_size = BATCH_SIZE)
accuracy_lstm,confusion_matrix_lstm = evaluate_lstm(model_lstm,intra_test_x,intra_test_labels,batch_size = BATCH_SIZE)
model_cnn = train_cnn(intra_train_x,intra_train_labels,epochs = EPOCHS_CNN,batch_size = BATCH_SIZE,filters = 64,kernels = 3)
accuracy_cnn,confusion_matrix_cnn = evaluate_cnn(model_cnn,intra_test_x,intra_test_labels,batch_size = BATCH_SIZE)
print("---------------------------------------- \n")
if accuracy_lstm != None:
print("RESULTS LSTM: \n")
print("epochs: %.0f" %EPOCHS_LSTM)
print("Accuracy on the test set for LSTM %.4f \n" %accuracy_lstm)
print("\n Confusion Matrix LSTM:")
print(confusion_matrix_lstm)
if accuracy_cnn != None:
print("RESULTS CNN: \n")
print("epochs: %.0f \n" %EPOCHS_CNN)
print("Accuracy on the test set for CNN %.4f" %accuracy_cnn)
print("\n Confusion Matrix CNN:")
print(confusion_matrix_cnn)
# +
model_lstm = train_lstm(train_x,train_labels,epochs = EPOCHS_LSTM,batch_size = BATCH_SIZE)
model_cnn = train_cnn(train_x,train_labels,epochs = EPOCHS_CNN,batch_size = BATCH_SIZE, filters = 64,kernels = 3)
accuracy_lstm1,confusion_matrix_lstm1 = evaluate_lstm(model_lstm,test1_x,test1_labels,batch_size = BATCH_SIZE)
accuracy_cnn1,confusion_matrix_cnn1 = evaluate_cnn(model_cnn,test1_x,test1_labels,batch_size = BATCH_SIZE)
accuracy_lstm2,confusion_matrix_lstm2 = evaluate_lstm(model_lstm,test2_x,test2_labels,batch_size = BATCH_SIZE)
accuracy_cnn2,confusion_matrix_cnn2 = evaluate_cnn(model_cnn,test2_x,test2_labels,batch_size = BATCH_SIZE)
accuracy_lstm3,confusion_matrix_lstm3 = evaluate_lstm(model_lstm,test3_x,test3_labels,batch_size = BATCH_SIZE)
accuracy_cnn3,confusion_matrix_cnn3 = evaluate_cnn(model_cnn,test3_x,test3_labels,batch_size = BATCH_SIZE)
print("---------------------------------------- \n")
if accuracy_lstm1 != None:
print("RESULTS LSTM: \n")
print("epochs: %.0f" %EPOCHS_LSTM)
print("Accuracy on test set 1 for LSTM %.4f \n" %accuracy_lstm1)
print("Accuracy on test set 2 for LSTM %.4f \n" %accuracy_lstm2)
print("Accuracy on test set 3 for LSTM %.4f \n" %accuracy_lstm3)
print("\n Confusion Matrix LSTM 1:")
print(confusion_matrix_lstm1)
print("\n Confusion Matrix LSTM 2:")
print(confusion_matrix_lstm2)
print("\n Confusion Matrix LSTM 3:")
print(confusion_matrix_lstm3)
if accuracy_cnn1 != None:
print("RESULTS CNN: \n")
print("epochs: %.0f \n" %EPOCHS_CNN)
print("Accuracy on test set 1 for CNN %.4f \n" %accuracy_cnn1)
print("Accuracy on test set 2 for CNN %.4f \n" %accuracy_cnn2)
print("Accuracy on test set 3 for CNN %.4f \n" %accuracy_cnn3)
print("\n Confusion Matrix CNN 1:")
print(confusion_matrix_cnn1)
print("\n Confusion Matrix CNN 2:")
print(confusion_matrix_cnn2)
print("\n Confusion Matrix CNN 3:")
print(confusion_matrix_cnn3)
| Assignment final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 113 - Students in a class
# Statistics Probability Theory Set Theory
#
# In a class of 120 students numbered 1 to 120, all even numbered students opt for Physics, those whose numbers are divisible by 5 opt for Chemistry and those whose numbers are divisible by 7 opt for Math.
#
# Given this information, how many opt for none of the three subjects?
#
# You can read over set theory [here](https://en.wikipedia.org/wiki/Set_theory).
# We know:
# - chance to not opt into Physics: 1/2
# - chance to not opt into Chemistry: 4/5
# - chance to not opt into Math: 6/7
# - all 3 chances are independent
#
# Therefore:
# The chance to be enrolled in neither of the 3 is: 6/7 * 4/5 * 1/2 = 12/35 = .34
| interviewq_exercises/q113_stats_set_theory_probabilities_students_enroll.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:animal] *
# language: python
# name: conda-env-animal-py
# ---
# +
from glob import glob
from time import sleep
from baselines.bench import load_results
from matplotlib import pylab as plt
import numpy as np
import argparse
import os
import pandas as pd
num_good_traj = 0
my_dir = '/workspace8/gabriele/RUNS/exp_breakout_27'
exps = glob(my_dir+'*')
d = exps[0]
df = load_results(d)
fig = plt.figure(clear=True, figsize=(11,11))
df2 = pd.DataFrame()
df['f']= df['l'].cumsum()/1000000
df['perf']= df['ereward']
df['perf'].where(df['perf']>0,0,inplace=True)
df['goal'] = df['perf']>0.9 #guess a threadshold
df2['len_real'] = df['l'][~((df['ereward'] != 0) & (df['reward_woD'] == 0))]
df2['f_real']= df2['len_real'].cumsum()/1000000
df2['reward_woD'] = df['reward_woD'][~((df['ereward'] != 0) & (df['reward_woD'] == 0))]
df2['real_perf']= df2['reward_woD']
df2['reward_woD'][df2['reward_woD']< 8] = 0
time_limit = 0
fig = plt.figure(clear=True, figsize=(15,9))
num_good_traj = df2['real_perf'][df2['real_perf'] > 0].count()
roll = 500
total_time = df['t'].iloc[-1]
total_steps = df['l'].sum()
total_episodes = df['r'].size
ax = plt.subplot(2, 2, 1)
ax.set_title(' {} total time: {:.1f} h FPS {:.1f}'.format(d.upper(),total_time/3600, total_steps/total_time))
df[['f','rr']].rolling(roll).mean().iloc[0:-1:40].plot('f','rr', ax=ax,legend=False)
#df[['f','ereward']].rolling(roll).mean().iloc[0:-1:40].plot('f','ereward', ax=ax,legend=False)
ax.set_xlabel('N. steps (M)')
ax.set_ylabel('Reward')
#plt.xlim((0, xlim_))
ax.grid(True)
# ax = plt.subplot(2, 2, 2)
# df[['f','min_value']].rolling(roll).mean().iloc[0:-1:40].plot('f','min_value', ax=ax,legend=False)
# df[['f','max_value']].rolling(roll).mean().iloc[0:-1:40].plot('f','max_value', ax=ax,legend=False)
# df[['f','mean_value']].rolling(roll).mean().iloc[0:-1:40].plot('f','mean_value', ax=ax,legend=False)
# ax.set_xlabel('N. steps (M)')
# ax.set_ylabel('Min Value')
# #plt.xlim((0, xlim_))
# ax.grid(True)
ax = plt.subplot(2, 2, 3)
df2[['f_real','reward_woD']].rolling(roll).mean().iloc[0:-1:40].plot('f_real','reward_woD', ax=ax,legend=False)
ax.set_xlabel('N. steps (M)')
ax.set_ylabel('Performance without Deomnstrations')
#plt.xlim((0, xlim_))
ax.grid(True)
# ax = plt.subplot(2, 2, 4)
# df[['l']].rolling(roll).mean().iloc[0:-1:40].plot(y='l', ax=ax,legend=False)
# ax.set_xlabel('N. episodes')
# ax.set_ylabel('Episode lenght')
# #plt.xlim((0, xlim_))
# ax.grid(True)
# -
df2['reward_woD'].sort_values( ascending=False)
df2['reward_woD'].shape
| main/.ipynb_checkpoints/Atari mnitor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GltiE234FEdP"
# ## 관련 함수, 패키지 호출
# + id="l2t5h3NRnb3C"
# 글꼴설치
# !sudo apt-get install -y fonts-nanum
# !sudo fc-cache -fv
# !rm ~/.cache/matplotlib -rf
# 설치 후 런타임 재시작 필요
# + colab={"base_uri": "https://localhost:8080/"} id="jPIqpw8CEcOV" executionInfo={"status": "ok", "timestamp": 1642214345561, "user_tz": -540, "elapsed": 25419, "user": {"displayName": "\ud55c\ud658\uae30", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhouE8RMT6PlvcJ-7mr_clqAORabr2bBgeicarWgzw=s64", "userId": "08351897761321616592"}} outputId="2edbb497-7d98-4b09-cdf6-e9b94bc012f0"
from google.colab import drive
drive.mount('/content/drive')
# 파이토치
import torch
import torch.nn.functional as F
import torchvision
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
import torchvision
from torchvision import datasets, models, transforms
import pandas as pd
import numpy as np
import time
import os
from tqdm.notebook import tqdm
import re
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # device 객체
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
# 한글 폰트 설정하기
fontpath = '/usr/share/fonts/truetype/nanum/NanumBarunGothic.ttf'
font = fm.FontProperties(fname=fontpath, size=10)
plt.rc('font', family='NanumBarunGothic')
matplotlib.font_manager._rebuild()
# + id="Ao0Mh9LqEqtY" executionInfo={"status": "ok", "timestamp": 1642214345562, "user_tz": -540, "elapsed": 6, "user": {"displayName": "\ud55c\ud658\uae30", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhouE8RMT6PlvcJ-7mr_clqAORabr2bBgeicarWgzw=s64", "userId": "08351897761321616592"}}
transforms_test = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# class_names = ['airc', 'airpods', 'bike', 'camera', 'car', 'coat', 'desktop', 'dressshose', 'electro', 'galaxy', 'glass', 'hat', 'iphone',
# 'jewelry', 'jumper', 'keyboard', 'laptop', 'mouse', 'onepiece', 'pants', 'shirt', 'skirt', 'sneaker', 'top', 'tv', 'wallet', 'watch']
class_names = ['에어컨', '에어팟', '오토바이', '카메라', '자동차', '코트', '데스크탑', '구두', '전자제품', '갤럭시', '안경', '모자', '아이폰',
'쥬얼리', '점퍼', '키보드', '노트북', '마우스', '원피스', '바지', '셔츠', '치마', '스니커즈', '상의', 'tv', '지갑', '시계']
def imshow(input, title):
input = input.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
input = std * input + mean
input = np.clip(input, 0, 1)
plt.imshow(input)
plt.title(title)
plt.show()
## 입력한 제목 전처리 및 태그출력
def find_tag(title, cat_matrix):
test = title[0].split(' ')
for i, j in enumerate(test):
i = re.sub('\W+',' ', j)
test_input = []
for i in test:
if i in cat_matrix.columns:
test_input.append(i)
test_input.append('label')
test_matrix = cat_matrix[test_input]
# 상위 top5 태그 추출
test_matrix['target'] = 0
for i in range(len(test_input)-1):
test_matrix['target'] += test_matrix[test_input[i]]
test_matrix['target'] = test_matrix['target'] / (len(test_input)-1)
test_matrix = test_matrix[['label','target']].sort_values(by='target', ascending=False)[:5]
# list로 출력
# tag = test_matrix.label.to_list()
# return(tag)
return(test_matrix)
# + [markdown] id="VLtplRneJvLz"
# ## 모델 호출
# + id="WJ1K5vHSJxx2" executionInfo={"status": "ok", "timestamp": 1642214581543, "user_tz": -540, "elapsed": 10692, "user": {"displayName": "\ud55c\ud658\uae30", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhouE8RMT6PlvcJ-7mr_clqAORabr2bBgeicarWgzw=s64", "userId": "08351897761321616592"}}
## 이미지 모델 호출
model = models.resnet50(pretrained=True)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, 27)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
checkpoint = torch.load('/content/drive/MyDrive/Colab Notebooks/1조/4.Image_model/모델백업/ResNet50_Pre_T/rn50b96l31_49.pth')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
check = checkpoint['epoch']
loss = checkpoint['loss']
model.eval()
## 태그추천 모델 호출
tag_list = ['노트북','에어팟','아이폰','키보드','마우스','갤럭시']
total_df = []
for i in range(len(tag_list)):
total_df.append(pd.read_csv(f'/content/drive/MyDrive/Colab Notebooks/1조/3.Recommen_model/tag_model/{tag_list[i]}.csv'))
tag_dict={'노트북':total_df[0],'에어팟':total_df[1],'아이폰':total_df[2],'키보드':total_df[3],'마우스':total_df[4],'갤럭시':total_df[5]}
# + [markdown] id="bwQpUObrFqht"
# ## 구현
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Jy-iesuzFBBC" executionInfo={"status": "ok", "timestamp": 1642215099259, "user_tz": -540, "elapsed": 1651, "user": {"displayName": "\ud55c\ud658\uae30", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhouE8RMT6PlvcJ-7mr_clqAORabr2bBgeicarWgzw=s64", "userId": "08351897761321616592"}} outputId="312ded16-899d-4e8a-fc03-bf0257b92142"
from PIL import Image
image = Image.open('/content/drive/MyDrive/final_test/key.jpg')
image = transforms_test(image).unsqueeze(0).to(device)
# 사진 출력 및 카테고리 출력
with torch.no_grad():
outputs = model(image)
_, preds = torch.max(outputs, 1)
imshow(image.cpu().data[0], title='예측 결과: ' + class_names[preds[0]])
print(class_names[preds[0]])
# + colab={"base_uri": "https://localhost:8080/", "height": 484} id="qMst3t6QLBPn" executionInfo={"status": "ok", "timestamp": 1642215115182, "user_tz": -540, "elapsed": 14187, "user": {"displayName": "\ud55c\ud658\uae30", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhouE8RMT6PlvcJ-7mr_clqAORabr2bBgeicarWgzw=s64", "userId": "08351897761321616592"}} outputId="1d2c8054-8184-413c-b150-457b615872ab"
# 제목 입력
title = [input('제목을 입력해주세요 : ')]
select_matrix = tag_dict[class_names[preds[0]]]
find_tag(title, select_matrix)
# + id="9QryN9dunn9H"
| Final_project_bunjang_image_tag.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: julia-nteract-1.5
# kernelspec:
# argv:
# - C:\Users\snikula\AppData\Local\JuliaPro-1.5.4-1\Julia-1.5.4\bin\julia.exe
# - -i
# - --color=yes
# - C:\Users\snikula\.julia\packages\IJulia\e8kqU\src\kernel.jl
# - '{connection_file}'
# display_name: Julia nteract 1.5.4
# env: {}
# interrupt_mode: message
# language: julia
# name: julia-nteract-1.5
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # Atomin rakenne
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
using Printf
using Dates
b=2.897768e-3 # Wienin siirtymälain vakio
c=2.99792e8 # Valon nopeus
h=6.6260693e-34 # Plancin vakio
me=9.10938e-31 # Elektronin massa
eV=1.6021766e-19 # Elektorinivoltti
sigma=5.670374419e-8 # Stefan-Boltzman
@printf "%s\n" Dates.now();
versioninfo(verbose=false);
# + [markdown] nteract={"transient": {"deleting": false}}
# 5-6
#
# Kirjan vastaus: a) 10.0 GHz b) 2.99 cm
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
dE=41.4*eV
lambda=h*c/dE
f=c/lambda
@printf("Taajuus on %.1e Hz ja aallonpituus %.2e m.\n",
f,lambda)
# + [markdown] nteract={"transient": {"deleting": false}}
# 5-7
#
# Kirjan vastaus: 2.9 PHz
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
dE=(-1.5-(-13.6))*eV
lambda=h*c/dE
f=c/lambda
@printf("Taajuus on %.1e Hz ja aallonpituus %.2e m.\n",
f,lambda)
# + [markdown] nteract={"transient": {"deleting": false}}
# 5-8
#
# Kirjan vastaus: a) 5.88 ueV b) radioaalto (1.52 GHz)
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
lambda=21.1e-2
dE=h*c/lambda
f=c/lambda
@printf("dE on %.2e eV ja taajuus on %.2e Hz.\n",dE/eV,f)
# + [markdown] nteract={"transient": {"deleting": false}}
# 5-9
#
# Kirjan vastaus: 162 nm
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
dE1=h*c/25.6e-9
dE2=h*c/30.4e-9
dE=dE1-dE2
lambda=h*c/dE
@printf("Aallonpituus on %.2e m.\n",
lambda)
# + [markdown] nteract={"transient": {"deleting": false}}
# 5-10
#
# Kirjan vastaus: b) <br>
# a 3400 nm (ei näkyvä valo) <br>
# b 650 nm (punainen valo) <br>
# c 1100 nm (ei näkyvä valo)<br>
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
function ap(otsikko,dE)
@printf("Aallonpituus %s on noin %.0f nm.\n",
otsikko,h*c/(dE*eV*1e-9))
end
ap("a",0.37)
ap("b",1.9)
ap("c",1.1)
| 7/5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a>
# ___
# <center><em>Copyright by Pierian Data Inc.</em></center>
# <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>
# # Grid Search
#
# We can search through a variety of combinations of hyperparameters with a grid search. While many linear models are quite simple and even come with their own specialized versions that do a search for you, this method of a grid search will can be applied to *any* model from sklearn, and we will need to use it later on for more complex models, such as Support Vector Machines.
# ## Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# ## Data
df = pd.read_csv("../DATA/Advertising.csv")
df.head()
# ### Formatting Data
# +
## CREATE X and y
X = df.drop('sales',axis=1)
y = df['sales']
# TRAIN TEST SPLIT
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
# SCALE DATA
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# -
# ## Model
from sklearn.linear_model import ElasticNet
help(ElasticNet)
base_elastic_model = ElasticNet()
# ## Grid Search
#
# A search consists of:
#
# * an estimator (regressor or classifier such as sklearn.svm.SVC());
# * a parameter space;
# * a method for searching or sampling candidates;
# * a cross-validation scheme
# * a score function.
param_grid = {'alpha':[0.1,1,5,10,50,100],
'l1_ratio':[.1, .5, .7, .9, .95, .99, 1]}
from sklearn.model_selection import GridSearchCV
# verbose number a personal preference
grid_model = GridSearchCV(estimator=base_elastic_model,
param_grid=param_grid,
scoring='neg_mean_squared_error',
cv=5,
verbose=2)
grid_model.fit(X_train,y_train)
grid_model.best_estimator_
grid_model.best_params_
# +
# pd.DataFrame(grid_model.cv_results_)
# -
# ## Using Best Model From Grid Search
y_pred = grid_model.predict(X_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test,y_pred)
| Data Science Resources/Jose portila - ML/10-Cross-Val-and-LinReg-Project/01-Grid-Search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Tree
# +
class treeNode:
def __init__(self, data, children = []):
self.data = data
self.children = children
def __str__(self, level=0):
ret = " " * level + str(self.data) + "\n"
for child in self.children:
ret += child.__str__(level+1)
return ret
def addChild(self, treeNode):
self.children.append(treeNode)
tree = treeNode('Drinks', [])
cold = treeNode('Cold', [])
hot = treeNode('Hot', [])
tree.addChild(cold)
tree.addChild(hot)
tea = treeNode('Tea', [])
coffee = treeNode('Coffee', [])
cola = treeNode('Cola', [])
fanta = treeNode('Fanta', [])
cold.addChild(cola)
cold.addChild(fanta)
hot.addChild(tea)
hot.addChild(coffee)
print(tree)
# -
# ## [1] Binary Tree using Linked List
# +
from Module.classCollection import Queue
class treeNode:
def __init__(self, data):
self.data = data
self.leftChild = None
self.rightChild = None
def preorderTraversal(rootNode):
if rootNode is None:
return
else:
print(rootNode.data)
preorderTraversal(rootNode.leftChild)
preorderTraversal(rootNode.rightChild)
def inorderTraversal(rootNode):
if rootNode is None:
return
else:
preorderTraversal(rootNode.leftChild)
print(rootNode.data)
preorderTraversal(rootNode.rightChild)
def postorderTraversal(rootNode):
if rootNode is None:
return
else:
preorderTraversal(rootNode.leftChild)
preorderTraversal(rootNode.rightChild)
print(rootNode.data)
def levelorderTraversal(rootNode):
if not rootNode:
return
else:
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
print(root.value.data)
if root.value.leftChild:
customQueue.enqueue(root.value.leftChild)
if root.value.rightChild:
customQueue.enqueue(root.value.rightChild)
def searchBT(rootNode, nodeValue):
if not rootNode:
return
else:
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
if root.value.data == nodeValue:
return "Success"
if root.value.leftChild:
customQueue.enqueue(root.value.leftChild)
if root.value.rightChild:
customQueue.enqueue(root.value.rightChild)
return "Unsuccess"
def insertBT(rootNode, newNode):
if not rootNode:
return
else:
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
print(root.value.data)
if root.value.leftChild:
customQueue.enqueue(root.value.leftChild)
else:
root.value.leftChild = newNode
return "Successfully inserted"
if root.value.rightChild:
customQueue.enqueue(root.value.rightChild)
else:
root.value.rightChild = newNode
return "Successfully inserted"
def getDeepestnode(rootNode):
if not rootNode:
return
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
print(root.value.data)
if root.value.leftChild:
customQueue.enqueue(root.value.leftChild)
if root.value.rightChild:
customQueue.enqueue(root.value.rightChild)
deepestNode = root.value
return deepestNode
def deleteDeepestNode(rootNode, dNode):
if not rootNode:
return
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
if root.value is dNode:
root.value = None
return
if root.value.rightChild:
if root.value.rightChild is dNode:
root.value.rightChild = None
return
else:
customQueue.enqueue(root.value.rightChild)
else:
if root.value.leftChild is dNode:
root.value.leftChild = None
return
else:
customQueue.enqueue(root.value.leftChild)
def deleteNodeBT(rootNode, node):
if not rootNode:
return "The BT does not exist"
else:
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
if root.value.data == node:
dNode = getDeepestnode(rootNode)
root.value.data = dNode.data
deleteDeepestNode(rootNode, dNode)
return "The node has been successully deleted"
if root.value.leftChild:
customQueue.enqueue(root.value.leftChild)
if root.value.rightChild:
customQueue.enqueue(root.value.rightChild)
return "Failed to detete"
def deleteBT(rootNode):
rootNode.data = None
rootNode.leftChild = None
rootNode.rightChild = None
return "The BT has been successfully deleted"
newBT = treeNode("Drinks")
leftChild = treeNode("Hot")
rightChild = treeNode("Cold")
tea = treeNode("Tea")
coffee = treeNode("Coffee")
leftChild.leftChild = tea
rightChild.leftChild = coffee
newBT.leftChild = leftChild
newBT.rightChild = rightChild
print("--Preorder--")
preorderTraversal(newBT)
print('\n')
print("--Inorder--")
inorderTraversal(newBT)
print('\n')
print("--Postorder--")
postorderTraversal(newBT)
print('\n')
print("--Levelorder--")
levelorderTraversal(newBT)
print("--Search in level order--")
searchBT(newBT, "Tea")
print("--insert Cola--")
newNode = treeNode("Cola")
print(insertBT(newBT, newNode))
levelorderTraversal(newBT)
print("----delete----")
deleteNodeBT(newBT, 'Hot')
levelorderTraversal(newBT)
print("----delete entire binary tree----")
deleteBT(newBT)
levelorderTraversal(newBT)
# -
# ## [2] Binary Tree using Python List
# +
class BinaryTree:
def __init__(self, size):
self.customList = size*[None]
self.lastUsedIndex = 0
self.maxSize = size
def insertNode(self, value):
if self.lastUsedIndex + 1 == self.maxSize:
return "The Binary Tree is full"
self.customList[self.lastUsedIndex+1] = value
self.lastUsedIndex += 1
return "The value has been successfully inserted"
def searchNode(self, value):
for i in range(len(self.customList)) :
if self.customList[i] == value:
return "Sucess"
return "Not found"
def preorderTraversal(self, index): # define the beginning part
if index > self.lastUsedIndex:
return
print(self.customList[index])
self.preorderTraversal(index*2)
self.preorderTraversal(index*2+1)
def inorderTraversal(self, index): # define the beginning part
if index > self.lastUsedIndex:
return
self.inorderTraversal(index*2)
print(self.customList[index])
self.inorderTraversal(index*2+1)
def postorderTraversal(self, index): # define the beginning part
if index > self.lastUsedIndex:
return
self.postorderTraversal(index*2)
self.postorderTraversal(index*2+1)
print(self.customList[index])
def levelorderTraversal(self, index): # define the beginning part
if index > self.lastUsedIndex:
return
for i in range(1,self.lastUsedIndex+1):
print(f"{self.customList[i]} ")
def deleteNode(self, value):
if self.lastUsedIndex == 0:
return "There is no any node to delete"
for i in range(1, self.lastUsedIndex+1):
if self.customList[i]==value:
self.customList[i] = self.customList[self.lastUsedIndex]
self.customList[self.lastUsedIndex] = None
self.lastUsedIndex -= 1
return "Successfully deleted"
return "Fail to find the same node"
def deleteBT(self):
self.customList = None
return "BT is successfully removed."
newBT = BinaryTree(8)
print("----1. insert----")
print(newBT.insertNode("Drinks"))
print(newBT.insertNode("Hot"))
print(newBT.insertNode("Cold"))
print(newBT.insertNode("Tea"))
print(newBT.insertNode("Coffee"))
print("----2. search----")
print(newBT.searchNode("Hot"))
print("----3. preOrder----")
newBT.preorderTraversal(1)
print("----4. inOrder----")
newBT.inorderTraversal(1)
print("----5. postOrder----")
newBT.postorderTraversal(1)
print("----6. levelOrder----")
newBT.levelorderTraversal(1)
print("----7. delete node----")
newBT.deleteNode('Hot')
newBT.levelorderTraversal(1)
# -
| 12_Tree and Binary Tree.ipynb |