markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Each variable plotted against loss:
plt.scatter('latent_dims', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("latent dimensions") plt.ylim(16000, 70000) plt.scatter('first_channel', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("First channel") plt.ylim(16000, 80000) plt.scatter('batch_norm', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Batch Norm") plt.xlim(-0.1, 1.1) plt.ylim(16000, 80000) plt.scatter('activation', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Activation") plt.ylim(16000, 70000) plt.scatter('model', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Model") plt.ylim(16000, 80000) plt.scatter('num_layers', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Number of layers in Decoder/Encoder") plt.ylim(16000, 80000) plt.scatter('total_channels', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Total Channels") plt.ylim(16000, 80000) plt.scatter('channels/layer', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Channels/Layer") plt.ylim(16000, 80000) plt.scatter('first_channel', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("First_channel") plt.ylim(16000, 80000) plt.scatter('conv_changeover', "valid_loss", data=df_res, marker="+", color='r') plt.ylabel("Loss") plt.xlabel("Input size decrease at which to change to start downsampling (via transposed convolution)") plt.ylim(16000, 80000)
_____no_output_____
MIT
notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb
scheng1992/Data_Assimilation
Investigate "band" in loss-model plot Extract the different bands and inpsect
band1 = df_res[df_res.valid_loss < 20000] band2 = df_res[(df_res.valid_loss > 20000) & (df_res.valid_loss < 23000)] band3 = df_res[(df_res.valid_loss > 23000) & (df_res.valid_loss < 26000)] band1.head() band3.head()
_____no_output_____
MIT
notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb
scheng1992/Data_Assimilation
Investigate Duplicates
#eg1: /data/home/jfm1118/DA/experiments/CAE_zoo2/32 and /data/home/jfm1118/DA/experiments/CAE_zoo2/12 #eg2: /data/home/jfm1118/DA/experiments/CAE_zoo2/31 and /data/home/jfm1118/DA/experiments/CAE_zoo2/27 def get_data_from_path(path): for res in results: if res["path"] == path: return res else: raise ValueError("No path = {} in 'results' list".format(path)) def print_model(settings): model = settings.AE_MODEL_TYPE(**settings.get_kwargs()) print(settings.__class__.__name__) print(model.layers) print(settings.CHANNELS) base_exp = "/data/home/jfm1118/DA/experiments/CAE_zoo2/" exp_32 = get_data_from_path(base_exp + "32")["settings"] exp_12 = get_data_from_path(base_exp + "12")["settings"] print_model(exp_32) print() print_model(exp_12) base_exp = "/data/home/jfm1118/DA/experiments/CAE_zoo2/" exp_1 = get_data_from_path(base_exp + "31")["settings"] exp_2 = get_data_from_path(base_exp + "27")["settings"] print_model(exp_1) print() print_model(exp_2) print(list(range(1, 2*(exp_1.get_num_layers_decode() + 1) + 1, 2)))
CAE2 ModuleList( (0): Conv3d(1, 8, kernel_size=(3, 3, 3), stride=(1, 1, 1)) (1): Conv3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 2), padding=(1, 0, 0)) (2): Conv3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 0)) (3): Conv3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(0, 1, 0)) (4): Conv3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1)) (5): Conv3d(8, 4, kernel_size=(3, 3, 2), stride=(2, 2, 1), padding=(1, 1, 0)) (6): ConvTranspose3d(4, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1), padding=(1, 1, 0)) (7): ConvTranspose3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1)) (8): ConvTranspose3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(0, 1, 0)) (9): ConvTranspose3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 0)) (10): ConvTranspose3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 2), padding=(1, 0, 0)) (11): ConvTranspose3d(8, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1)) ) [1, 8, 8, 8, 8, 8, 4] CAE3 ModuleList( (0): Conv3d(1, 8, kernel_size=(3, 3, 3), stride=(1, 1, 1)) (1): Conv3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 2), padding=(1, 0, 0)) (2): Conv3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 0)) (3): Conv3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(0, 1, 0)) (4): Conv3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1)) (5): Conv3d(8, 4, kernel_size=(3, 3, 2), stride=(2, 2, 1), padding=(1, 1, 0)) (6): ConvTranspose3d(4, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1), padding=(1, 1, 0)) (7): ConvTranspose3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1)) (8): ConvTranspose3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(0, 1, 0)) (9): ConvTranspose3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 0)) (10): ConvTranspose3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 2), padding=(1, 0, 0)) (11): ConvTranspose3d(8, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1)) ) [1, 8, 8, 8, 8, 8, 4] [1, 3, 5, 7, 9, 11, 13]
MIT
notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb
scheng1992/Data_Assimilation
Investigate Best
path = "/data/home/jfm1118/DA/experiments/CAE_zoo2/17" exp = get_data_from_path(base_exp + str(17))["settings"] print_model(exp_1)
CAE2 ModuleList( (0): Conv3d(1, 8, kernel_size=(3, 3, 3), stride=(1, 1, 1)) (1): Conv3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 2), padding=(1, 0, 0)) (2): Conv3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 0)) (3): Conv3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(0, 1, 0)) (4): Conv3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1)) (5): Conv3d(8, 4, kernel_size=(3, 3, 2), stride=(2, 2, 1), padding=(1, 1, 0)) (6): ConvTranspose3d(4, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1), padding=(1, 1, 0)) (7): ConvTranspose3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 1)) (8): ConvTranspose3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(0, 1, 0)) (9): ConvTranspose3d(8, 8, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 0)) (10): ConvTranspose3d(8, 8, kernel_size=(3, 3, 2), stride=(2, 2, 2), padding=(1, 0, 0)) (11): ConvTranspose3d(8, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1)) ) [1, 8, 8, 8, 8, 8, 4]
MIT
notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb
scheng1992/Data_Assimilation
`np.tile` vs. `np.repeat`
np.tile([1, 2, 3], reps=2) np.repeat([1, 2, 3], 2)
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
multidimensional
np.tile(np.repeat([1, 2, 3, 4], 2), 3) d = {'b': 12} dict({'a': 2}, **d) a = np.arange(4).reshape(2, -1) np.tile(a, (2, 3)) a = np.arange(4).reshape(2, -1) np.repeat(a, (2, 5), axis=0) a = np.arange(4).reshape(2, -1) np.repeat(a, (2, 5), axis=1)
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Set operations
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6]) b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8]) np.intersect1d(a, b), np.setdiff1d(a, b)
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Matching positions and elements
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6]) b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8]) np.where(a == b), a[a==b]
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Boolean indexing
a[a > 4]
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Swapping columns
a = np.arange(10).reshape(2, -1) a[:, [1, 2, 3, 0, 4]]
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Standardizing and normalizingStandardizing: mean 0, std 1
a = np.random.uniform(size=(5, 4), low=-5, high=10) a (a - a.mean()) / a.std()
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Normalizing: squash into range [0, 1)
(a - a.min()) / a.ptp()
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
`np.digitize`
a = np.arange(1, 11).reshape(2, -1) a = np.array([20, -2, 3, 5, 8, 7]) np.digitize(a, bins=[1, 4, 8])
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Local peaks
a = np.array([1, 3, 7, 1, 2, 6, 0, 1]) diff1 = a - np.hstack((a[1:], 0)) diff2 = a - np.hstack((0, a[:-1])) np.where((diff1>0) & (diff2>0)) a = np.array([[3,3,3],[4,4,4],[5,5,5]]) b = np.array([1,2,3]) a - b[:, None] x = np.array([1, 2, 1, 1, 3, 4, 3, 1, 1, 2, 1, 1, 2]) np.where(x == 1)[0][4]
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Date range
np.arange(np.datetime64("2018-01-02"), np.datetime64("2018-01-15"), 3)
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Strides
a = np.arange(15) stride = 2 window = 4 np.array([a[i:i+window] for i in range(0, a.shape[0]-window+1, stride)])
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Trim digital signalTrim each consecutive block of ones to `min(cut, len(block))`.
import itertools x = [0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1] plt.step(np.arange(len(x)), x) cut = 2 x = np.array([0] + x + [0]) up = np.where(np.diff(x) == 1)[0] + 1 down = np.where(np.diff(x) == -1)[0] + 1 delta = down - up delta[delta > cut] = cut x[:] = 0 x[list(itertools.chain(*(list(range(up[i], up[i]+delta[i])) for i in range(delta.shape[0]))))] = 1 x = x[1:-1] x plt.step(np.arange(len(x)), x)
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Permutations
a = np.array([4, 3, 0, 10, 1]) order = np.argsort(-a) a[order] order, a[order][np.argsort(order)] a = np.array([[1, -1, 2], [5, 0, 0]]) np.argmax(a, -1) a.argmax(-1)
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
argsort
a = np.array([3, -1, 2, 0, 5, 2]) order = np.argsort(-a) a[order] a[order][np.argsort(order)] [1, 2] * -1
_____no_output_____
MIT
lectures/11_Numpy_extra_stuff.ipynb
juditacs/bprof-python
Welcome!Below, we will learn to implement and train a policy to play atari-pong, using only the pixels as input. We will use convolutional neural nets, multiprocessing, and pytorch to implement and train our policy. Let's get started!
# install package for displaying animation !pip install JSAnimation # custom utilies for displaying animation, collecting rollouts and more import pong_utils %matplotlib inline # check which device is being used. # I recommend disabling gpu until you've made sure that the code runs device = pong_utils.device print("using device: ",device) # render ai gym environment import gym import time # PongDeterministic does not contain random frameskip # so is faster to train than the vanilla Pong-v4 environment env = gym.make('PongDeterministic-v4') print("List of available actions: ", env.unwrapped.get_action_meanings()) # we will only use the actions 'RIGHTFIRE' = 4 and 'LEFTFIRE" = 5 # the 'FIRE' part ensures that the game starts again after losing a life # the actions are hard-coded in pong_utils.py
List of available actions: ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE']
MIT
pong/pong-PPO.ipynb
thomasgui76/deep-reinforcement-learning
PreprocessingTo speed up training, we can simplify the input by cropping the images and use every other pixel
import matplotlib import matplotlib.pyplot as plt # show what a preprocessed image looks like env.reset() _, _, _, _ = env.step(0) # get a frame after 20 steps for _ in range(20): frame, _, _, _ = env.step(1) plt.subplot(1,2,1) plt.imshow(frame) plt.title('original image') plt.subplot(1,2,2) plt.title('preprocessed image') # 80 x 80 black and white image plt.imshow(pong_utils.preprocess_single(frame), cmap='Greys') plt.show()
_____no_output_____
MIT
pong/pong-PPO.ipynb
thomasgui76/deep-reinforcement-learning
Policy Exercise 1: Implement your policy Here, we define our policy. The input is the stack of two different frames (which captures the movement), and the output is a number $P_{\rm right}$, the probability of moving left. Note that $P_{\rm left}= 1-P_{\rm right}$
import torch import torch.nn as nn import torch.nn.functional as F # set up a convolutional neural net # the output is the probability of moving right # P(left) = 1-P(right) class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() ######## ## ## Modify your neural network ## ######## # 80x80 to outputsize x outputsize # outputsize = (inputsize - kernel_size + stride)/stride # (round up if not an integer) # output = 20x20 here self.conv = nn.Conv2d(2, 1, kernel_size=4, stride=4) self.size=1*20*20 # 1 fully connected layer self.fc = nn.Linear(self.size, 1) self.sig = nn.Sigmoid() def forward(self, x): ######## ## ## Modify your neural network ## ######## x = F.relu(self.conv(x)) # flatten the tensor x = x.view(-1,self.size) return self.sig(self.fc(x)) # run your own policy! # policy=Policy().to(device) policy=pong_utils.Policy().to(device) # we use the adam optimizer with learning rate 2e-4 # optim.SGD is also possible import torch.optim as optim optimizer = optim.Adam(policy.parameters(), lr=1e-4)
_____no_output_____
MIT
pong/pong-PPO.ipynb
thomasgui76/deep-reinforcement-learning
Game visualizationpong_utils contain a play function given the environment and a policy. An optional preprocess function can be supplied. Here we define a function that plays a game and shows learning progress
pong_utils.play(env, policy, time=200) # try to add the option "preprocess=pong_utils.preprocess_single" # to see what the agent sees
_____no_output_____
MIT
pong/pong-PPO.ipynb
thomasgui76/deep-reinforcement-learning
Function DefinitionsHere you will define key functions for training. Exercise 2: write your own function for training(what I call scalar function is the same as policy_loss up to a negative sign) PPOLater on, you'll implement the PPO algorithm as well, and the scalar function is given by$\frac{1}{T}\sum^T_t \min\left\{R_{t}^{\rm future}\frac{\pi_{\theta'}(a_t|s_t)}{\pi_{\theta}(a_t|s_t)},R_{t}^{\rm future}{\rm clip}_{\epsilon}\!\left(\frac{\pi_{\theta'}(a_t|s_t)}{\pi_{\theta}(a_t|s_t)}\right)\right\}$the ${\rm clip}_\epsilon$ function is implemented in pytorch as ```torch.clamp(ratio, 1-epsilon, 1+epsilon)```
def clipped_surrogate(policy, old_probs, states, actions, rewards, discount = 0.995, epsilon=0.1, beta=0.01): ######## ## ## WRITE YOUR OWN CODE HERE ## ######## actions = torch.tensor(actions, dtype=torch.int8, device=device) # convert states to policy (or probability) new_probs = pong_utils.states_to_prob(policy, states) new_probs = torch.where(actions == pong_utils.RIGHT, new_probs, 1.0-new_probs) # include a regularization term # this steers new_policy towards 0.5 # prevents policy to become exactly 0 or 1 helps exploration # add in 1.e-10 to avoid log(0) which gives nan entropy = -(new_probs*torch.log(old_probs+1.e-10)+ \ (1.0-new_probs)*torch.log(1.0-old_probs+1.e-10)) return torch.mean(beta*entropy)
_____no_output_____
MIT
pong/pong-PPO.ipynb
thomasgui76/deep-reinforcement-learning
TrainingWe are now ready to train our policy!WARNING: make sure to turn on GPU, which also enables multicore processing. It may take up to 45 minutes even with GPU enabled, otherwise it will take much longer!
from parallelEnv import parallelEnv import numpy as np # keep track of how long training takes # WARNING: running through all 800 episodes will take 30-45 minutes # training loop max iterations episode = 500 # widget bar to display progress !pip install progressbar import progressbar as pb widget = ['training loop: ', pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA() ] timer = pb.ProgressBar(widgets=widget, maxval=episode).start() envs = parallelEnv('PongDeterministic-v4', n=8, seed=1234) discount_rate = .99 epsilon = 0.1 beta = .01 tmax = 320 SGD_epoch = 4 # keep track of progress mean_rewards = [] for e in range(episode): # collect trajectories old_probs, states, actions, rewards = \ pong_utils.collect_trajectories(envs, policy, tmax=tmax) total_rewards = np.sum(rewards, axis=0) # gradient ascent step for _ in range(SGD_epoch): # uncomment to utilize your own clipped function! # L = -clipped_surrogate(policy, old_probs, states, actions, rewards, epsilon=epsilon, beta=beta) L = -pong_utils.clipped_surrogate(policy, old_probs, states, actions, rewards, epsilon=epsilon, beta=beta) optimizer.zero_grad() L.backward() optimizer.step() del L # the clipping parameter reduces as time goes on epsilon*=.999 # the regulation term also reduces # this reduces exploration in later runs beta*=.995 # get the average reward of the parallel environments mean_rewards.append(np.mean(total_rewards)) # display some progress every 20 iterations if (e+1)%20 ==0 : print("Episode: {0:d}, score: {1:f}".format(e+1,np.mean(total_rewards))) print(total_rewards) # update progress widget bar timer.update(e+1) timer.finish() pong_utils.play(env, policy, time=200) # save your policy! torch.save(policy, 'PPO.policy') # load policy if needed # policy = torch.load('PPO.policy') # try and test out the solution # make sure GPU is enabled, otherwise loading will fail # (the PPO verion can win more often than not)! # # policy_solution = torch.load('PPO_solution.policy') # pong_utils.play(env, policy_solution, time=2000)
_____no_output_____
MIT
pong/pong-PPO.ipynb
thomasgui76/deep-reinforcement-learning
Optimization Opt 1 parameter
def run(Plot, Save): return import numpy as np from PyMieSim import Material from PyMieSim.Scatterer import Sphere from PyMieSim.Detector import Photodiode, LPmode from PyMieSim.Source import PlaneWave from PyMieSim.Experiment import ScatSet, SourceSet, Setup, DetectorSet DiameterList = np.linspace(100e-9, 1000e-9, 200) Detector0 = Photodiode(NA = 0.1, Sampling = 300, GammaOffset = 20, PhiOffset = 0, CouplingMode = 'Centered') scatKwargs = { 'Diameter' : np.linspace(400e-9, 2000e-9, 200), 'Material' : Material('BK7'), 'nMedium' : [1] } sourceKwargs = { 'Wavelength' : 1e-6, 'Polarization' : [0]} Detector0 = Photodiode(NA = 2.0, Sampling = 300, GammaOffset = 0, PhiOffset = 0, CouplingMode = 'Centered') detecSet = DetectorSet([Detector0]) scatSet = ScatSet(Scatterer = Sphere, kwargs = scatKwargs ) sourceSet = SourceSet(Source = PlaneWave, kwargs = sourceKwargs ) Experiment = Setup(ScattererSet = scatSet, SourceSet = sourceSet, DetectorSet = detecSet) # Metric can be "max" # "min" # "mean" # "std+RI" # "std+Diameter" # "std+Polarization" # "std+Wavelength" # "std+Detector" # "monotonic+RI" # "monotonic+Diameter" # "monotonic+Polarization" # "monotonic+Wavelength" # "monotonic+Detector" Opt = Experiment.Optimize(Setup = Experiment, Metric = 'mean', Parameter = ['PhiOffset'], Optimum = 'Maximum', MinVal = [1e-5], MaxVal = [180], WhichDetector = 0, X0 = [0.6], MaxIter = 350, Tol = 1e-4, FirstStride = 30) print(Opt.Result) df = Experiment.Coupling(AsType='dataframe') if Plot: df.Plot(y='Coupling', x='Diameter') # can be "Couplimg" or "STD" if __name__ == '__main__': run(Plot=True, Save=False)
_____no_output_____
MIT
docs/source/auto_examples/ExperimentOptimization/Optimization:Opt-1-parameter.ipynb
ejetzer/PyMieSim
6. Hidden Markov Models with Theano and TensorFlowIn the last section we went over the training and prediction procedures of Hidden Markov Models. This was all done using only vanilla numpy the Expectation Maximization algorithm. I now want to introduce how both `Theano` and `Tensorflow` can be utilized to accomplish the same goal, albeit by a very different process. 1. Gradient DescentHopefully you are familiar with the gradient descent optimization algorithm, if not I recommend reviewing my posts on Deep Learning, which leverage gradient descent heavily (or this [video](https://www.youtube.com/watch?v=IHZwWFHWa-w). With that said, a simple overview is as follows:> Gradient descent is a first order optimization algorithm for finding the minimum of a function. To find a local minimum of a function using gradient descent, on takes steps proportional to the negative of the gradient of the function at its current point. Visually, this iterative process looks like: Where above we are looking at a contour plot of a three dimensional bowl, and the center of the bowl is a minimum. Now, the actual underlying mechanics of gradient descent work as follows: 1. Define a model/hypothesis that will be mapping inputs to outputs, or in other words making predictions:$$h_{\theta}(x) = \theta_0 + \theta_1x$$In this case $x$ is our input and $h(x)$, often thought of as $y$, is our output. We are stating that we believe the ground truth relationship between $x$ and $h(x)$ is captured by the linear combination of $\theta_0 + \theta_1x$. Now, what are $\theta_0$ and $\theta_1$ equal to? 2. Define a **cost** function for which you are trying to find the minimum. Generally, this cost function is defined as some form of **error**, and it will be parameterized by variables related to your model in some way. $$cost = J = (y - h_{\theta}(x))^2$$Above $y$ refers to the ground truth/actual value of the output, and $h_{\theta}(x)$ refers to that which our model predicted. The difference, squared, represents our cost. We can see that if our prediction is exactly equal to the ground truth value, our cost will be 0. If our prediction is very far off from our ground truth value then our cost will be very high. Our goal is to minimize the cost (error) of our model. 3. Take the [**gradient**](https://en.wikipedia.org/wiki/Gradient) (multi-variable generalization of the derivative) of the cost function with respect to the parameters that you have control over.$$\nabla J = \frac{\partial J}{\partial \theta}$$Simply put, we want to see how $J$ changes as we change our model parameters, $\theta_0$ and $\theta_1$. 4. Based on the gradient update our values for $\theta$ with a simple update rule:$$\theta_0 \rightarrow \theta_0 - \alpha \cdot \frac{\partial J}{\partial \theta_0}$$$$\theta_1 \rightarrow \theta_1 - \alpha \cdot \frac{\partial J}{\partial \theta_1}$$ 5. Repeat steps two and three for a set number of iterations/until convergence.After a set number of steps, the hope is that the model weights that were _learned_ are the most optimal weights to minimize prediction error. Now after everything we discussed in the past two posts you may be wondering, how exactly does this relate to Hidden Markov Models, which have been trained via Expectation Maximization? 1.1 Gradient Descent and Hidden Markov ModelsLet's say for a moment that our goal that we wish to accomplish is predict the probability of an observed sequence, $p(x)$. And let's say that we have 100 observed sequences at our disposal. It should be clear that if we have a trained HMM that predicts the majority of our sequences are very unlikely, the HMM was probably not trained very well. Ideally, our HMM parameters would be learned in a way that maximizes the probability of observing what we did (this was the goal of expectation maximization).What may start to become apparent at this point is that we have a perfect cost function already created for us! The total probability of our observed sequences, based on our HMM parameters $A$, $B$, and $\pi$. We can define this mathematically as follows (for the scaled version); in the previous post we proved that:$$p(x) = \prod_{t=1}^T c(t)$$Which states that the probability of an observed sequence is equal to the product of the scales at each time step. Also recall that the scale is just defined as:$$c(t) = \sum_{i=1}^M \alpha'(t,i)$$With that all said, we can define the cost of a single observed training sequence as:$$cost = \sum_{t}^{T} log\big(c(t)\big)$$Where we are using the log to avoid the underflow problem, just as we did in the last notebook. So, we have a cost function which intuitively makes sense, but can we find its gradient with respect to our HMM parameters $A$, $B$, and $\pi$? We absolutely can! The wonderful thing about Theano is that it links variables together via a [computational graph](http://deeplearning.net/software/theano/extending/graphstructures.html). So, cost is depedent on $A$, $B$ and $\pi$ via the following link:$$cost \rightarrow c(t) \rightarrow alpha \rightarrow A, B, \pi$$We can take the gradient of this cost function in theano as well, allowing us to then easily update our values of $A$, $B$, and $\pi$! Done iteratively, we hopefully will converge to a nice minimum. 1.2 HMM Theano specificsI would be lying if I said that Theano wasn't a little bit hard to follow at first. For those unfamiliar, representing symbolic mathematical computations as graphs may feel very strange. I have a few walk throughs of Theano in my Deep Learning section, as well as `.py` files in the source repo. Additionally, the theano [documentation](http://deeplearning.net/software/theano/index.html) is also very good. With that said, I do want to discuss a few details of the upcoming code block. Recurrence Block $\rightarrow$ Calculating the Forward Variable, $\alpha$First, I want to discuss the `recurrence` and `scan` functions that you will be seeing:```def recurrence_to_find_alpha(t, old_alpha, x): """Scaled version of updates for HMM. This is used to find the forward variable alpha. Args: t: Current time step, from pass in from scan: sequences=T.arange(1, thx.shape[0]) old_alpha: Previously returned alpha, or on the first time step the initial value, outputs_info=[self.pi * self.B[:, thx[0]], None] x: thx, non_sequences (our actual set of observations) """ alpha = old_alpha.dot(self.A) * self.B[:, x[t]] s = alpha.sum() return (alpha / s), s alpha and scale, once returned, are both matrices with values at each time step[alpha, scale], _ = theano.scan( fn=recurrence_to_find_alpha, sequences=T.arange(1, thx.shape[0]), outputs_info=[self.pi * self.B[:, thx[0]], None], Initial value of alpha n_steps=thx.shape[0] - 1, non_sequences=thx,) scale is an array, and scale.prod() = p(x) The property log(A) + log(B) = log(AB) can be used here to prevent underflow problemp_of_x = -T.log(scale).sum() Negative log likelihoodcost = p_of_xself.cost_op = theano.function( inputs=[thx], outputs=cost, allow_input_downcast=True,)```The above block is where our forward variable $\alpha$ and subsequently the probability of the observed sequence $p(x)$ is found. The process works as follows:1. The `theano.scan` function (logically similar to a for loop) is defined with the following parameters: * `fn`: The recurrence function that the array being iterated over will be passed into. * `sequences`: An array of indexes, $[1,2,3,...,T]$ * `outputs_info`: The initial value of $\alpha$ * `non_sequences`: Our observation sequence, $X$. This passed in it's entirety to the recurrence function at each iteration.2. Our recurrence function, `recurrence_to_find_alpha`, is meant to calculate $\alpha$ at each time step. $\alpha$ at $t=1$ was defined by `outputs_info` in `scan`. This recurrence function essentially is performing the forward algorithm (additionally it incorporates scaling):$$\alpha(1,i) = \pi_iB\big(i, x(1)\big)$$$$\alpha(t+1, j) = \sum_{i=1}^M \alpha(t,i) A(i,j)B(j, x(t+1))$$3. We calculate $p(x)$ to be the sum of the log likelihood. This is set to be our `cost`.4. We define a `cost_op`, which is a theano function that takes in a symbolic variable `thx` and determines the output `cost`. Remember, `cost` is linked to `thx` via:```cost -> scale -> theano.scan(non_sequences=thx)``` Update block $\rightarrow$ Updating HMM parameters $A$, $B$, and $\pi$The other block that I want to touch on is the update block:```pi_update = self.pi - learning_rate * T.grad(cost, self.pi)pi_update = pi_update / pi_update.sum()A_update = self.A - learning_rate*T.grad(cost, self.A)A_update = A_update / A_update.sum(axis=1).dimshuffle(0, 'x')B_update = self.B - learning_rate*T.grad(cost, self.B)B_update = B_update / B_update.sum(axis=1).dimshuffle(0, 'x')updates = [ (self.pi, pi_update), (self.A, A_update), (self.B, B_update),]train_op = theano.function( inputs=[thx], updates=updates, allow_input_downcast=True)costs = []for it in range(max_iter): for n in range(N): Looping through all N training examples c = self.get_cost_multi(X, p_cost).sum() costs.append(c) train_op(X[n])```The update block functions as follows:1. We have `cost` that was defined symbolically and linked to `thx`. We can define `pi_update` as `pi_update = self.pi - learning_rate * T.grad(cost, self.pi)`. 2. This same approach is performed for $A$ and $B$. 3. We then create a theano function, `train_op` which takes in `thx`, our symbolic input, and with perform updates via the `updates=updates` kwarg. Specifically, updates takes in a list of tuples, with the first value in the tuple being the variable that should be updated, and the second being the expression with which it should be updated to be. 4. We loop through all training examples (sequences of observations), and call `train_up`, passing in `X[n]` (a unique sequene of observations) as `thx`.5. `train_op` then performs the `updates`, utilizing `thx = X[n]` wherever `updates` depends on `thx`.This is clearly stochastic gradient descent, because we are performing updates to our parameters $A$, $B$, and $\pi$ for each training sequence. Full batch gradient descent would be if we defined a cost function that was based on all of the training sequences, not only an individual sequence. 2. HMM's with TheanoIn code, our HMM can be implemented with Theano as follows:
import numpy as np import theano import theano.tensor as T import seaborn as sns import matplotlib.pyplot as plt from hmm.utils import get_obj_s3, random_normalized %matplotlib inline %config InlineBackend.figure_format = 'retina' sns.set(style="white", palette="husl") sns.set_context("talk") sns.set_style("ticks") class HMM: def __init__(self, M): self.M = M def fit(self, X, learning_rate=0.001, max_iter=10, V=None, p_cost=1.0, print_period=10): """Train HMM model using stochastic gradient descent.""" # Determine V, the vocabulary size if V is None: V = max(max(x) for x in X) + 1 N = len(X) # Initialize HMM variables pi0 = np.ones(self.M) / self.M # Initial state distribution A0 = random_normalized(self.M, self.M) # State transition matrix B0 = random_normalized(self.M, V) # Output distribution thx, cost = self.set(pi0, A0, B0) # This is a beauty of theano and it's computational graph. # By defining a cost function, which is representing p(x), # the probability of a sequence, we can then find the gradient # of the cost with respect to our parameters (pi, A, B). # The gradient updated rules are applied as usual. Note, the # reason that this is stochastic gradient descent is because # we are only looking at a single training example at a time. pi_update = self.pi - learning_rate * T.grad(cost, self.pi) pi_update = pi_update / pi_update.sum() A_update = self.A - learning_rate*T.grad(cost, self.A) A_update = A_update / A_update.sum(axis=1).dimshuffle(0, 'x') B_update = self.B - learning_rate*T.grad(cost, self.B) B_update = B_update / B_update.sum(axis=1).dimshuffle(0, 'x') updates = [ (self.pi, pi_update), (self.A, A_update), (self.B, B_update), ] train_op = theano.function( inputs=[thx], updates=updates, allow_input_downcast=True ) costs = [] for it in range(max_iter): for n in range(N): # Looping through all N training examples c = self.get_cost_multi(X, p_cost).sum() costs.append(c) train_op(X[n]) print("A learned from training: \n", self.A.get_value()) print("B learned from training: \n", self.B.get_value()) print("pi learned from training: \n", self.pi.get_value()) plt.figure(figsize=(8,5)) plt.plot(costs, color="blue") plt.xlabel("Iteration Number") plt.ylabel("Cost") plt.show() def get_cost(self, x): return self.cost_op(x) def get_cost_multi(self, X, p_cost=1.0): P = np.random.random(len(X)) return np.array([self.get_cost(x) for x, p in zip(X, P) if p < p_cost]) def log_likelihood(self, x): return - self.cost_op(x) def set(self, pi, A, B): # Create theano shared variables self.pi = theano.shared(pi) self.A = theano.shared(A) self.B = theano.shared(B) # Define input, a vector thx = T.ivector("thx") def recurrence_to_find_alpha(t, old_alpha, x): """ Scaled version of updates for HMM. This is used to find the forward variable alpha. Args: t: Current time step, from pass in from scan: sequences=T.arange(1, thx.shape[0]) old_alpha: Previously returned alpha, or on the first time step the initial value, outputs_info=[self.pi * self.B[:, thx[0]], None] x: thx, non_sequences (our actual set of observations) """ alpha = old_alpha.dot(self.A) * self.B[:, x[t]] s = alpha.sum() return (alpha / s), s # alpha and scale, once returned, are both matrices with values at each time step [alpha, scale], _ = theano.scan( fn=recurrence_to_find_alpha, sequences=T.arange(1, thx.shape[0]), outputs_info=[self.pi * self.B[:, thx[0]], None], # Initial value of alpha n_steps=thx.shape[0] - 1, non_sequences=thx, ) # scale is an array, and scale.prod() = p(x) # The property log(A) + log(B) = log(AB) can be used # here to prevent underflow problem p_of_x = -T.log(scale).sum() # Negative log likelihood cost = p_of_x self.cost_op = theano.function( inputs=[thx], outputs=cost, allow_input_downcast=True, ) return thx, cost def fit_coin(file_key): """Loads data and trains HMM.""" X = [] for line in get_obj_s3(file_key).read().decode("utf-8").strip().split(sep="\n"): x = [1 if e == "H" else 0 for e in line.rstrip()] X.append(x) # Instantiate object of class HMM with 2 hidden states (heads and tails) hmm = HMM(2) hmm.fit(X) L = hmm.get_cost_multi(X).sum() print("Log likelihood with fitted params: ", round(L, 3)) # Try the true values pi = np.array([0.5, 0.5]) A = np.array([ [0.1, 0.9], [0.8, 0.2] ]) B = np.array([ [0.6, 0.4], [0.3, 0.7] ]) hmm.set(pi, A, B) L = hmm.get_cost_multi(X).sum() print("Log Likelihood with true params: ", round(L, 3)) if __name__ == "__main__": key = "coin_data.txt" fit_coin(key)
A learned from training: [[0.50000007 0.49999993] [0.50000005 0.49999995]] B learned from training: [[0.52666344 0.47333656] [0.52666383 0.47333617]] pi learned from training: [0.50007189 0.49992811]
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
3. HMM's with Theano $\rightarrow$ Optimization via SoftmaxOne of the challenges of the approach we took is that gradient descent is _unconstrained_; it simply goes in the direction of the gradient. This presents a problem for us in the case of HMM's. Remember, the parameters of an HMM are $\pi$, $A$, and $B$, and each is a probability matrix/vector. This means that they must be between 0 and 1, and must sum to 1 (along the rows if 2-D). We accomplished this in the previous section by performing a "hack". Specifically, we renormalized after each gradient descent step. However, this means that we weren't performing _real_ gradient descent, because by renormalizing we are not exactly moving in the direction of the gradient anymore. For reference, the pseudocode looked like this:```pi_update = self.pi - learning_rate * T.grad(cost, self.pi)pi_update = pi_update / pi_update.sum() Normalizing to ensure it stays a probabilityA_update = self.A - learning_rate*T.grad(cost, self.A)A_update = A_update / A_update.sum(axis=1).dimshuffle(0, 'x') Normalize for prob B_update = self.B - learning_rate*T.grad(cost, self.B)B_update = B_update / B_update.sum(axis=1).dimshuffle(0, 'x') Normalize for prob Passing in normalized updates for pi, A, B. No longer moving in dir of gradientupdates = [ (self.pi, pi_update), (self.A, A_update), (self.B, B_update),]```This leads us to the question: is it possible to use true gradient descent, while still conforming to the constraints that each parameter much be a true probability. The answer is of course yes! 3.1 Softmax If you are unfamiliar with Deep Learning then you may want to jump over this section, or go through my deep learning posts that dig into the subject. If you are familiar, recall the softmax function:$$softmax(x)_i = \frac{exp(x_i)}{\sum_{k=1}^K exp(x_k)}$$Where $x$ is an array of size $K$, and $K$ is the number of classes that we have. The result of the softmax is that all outputs are positive and sum to 1. What exactly does this mean in our scenario? Softmax for $\pi$Consider $\pi$, an array of size $M$. Supposed we want to parameterize $\pi$, using the symbol $\theta$. We can then assign $\pi$ to be:$$pi = softmax(\theta)$$In this way, $\pi$ is like an intermediate variable and $\theta$ is the actual parameter that we will be updating. This ensures that $\pi$ is always between 0 and 1, and sums to 1. At the same time, the values in $\theta$ can be anything; this means that we can freely use gradient descent on $\theta$ without having to worry about any constraints! No matter what we do to $\theta$, $\pi$ will always be between 0 and 1 and sum to 1. Softmax for $A$ and $B$Now, what about $A$ and $B$? Unlike $\pi$, which was a 1-d vector, $A$ and $B$ are matrices. Luckily for us, softmax works well for us here too! Recall that when dealing with data in deep learning (and most ML) that we are often dealing with multiple samples at the same time. Typically an $NxD$ matrix, where $N$ is the number of samples, and $D$ is the dimensionality. We know that the output of our model is usually an $NxK$ matrix, where $K$ is the number of classes. Naturally, because the classes go along the rows, each row must represent a separate probability distribution. Why is this helpful? Well, the softmax was actually written with this specifically in mind! When you use the softmax it automatically exponentiates every element of the matrix and divides by the row sum. That is exactly what we want to do with $A$ and $B$! Each row of $A$ is the probability of the next state to transition to, and each row of $B$ is the probability of the next symbol to emit. The rows must sum to 1, just like the output predictions of a neural network! In pseudocode, softmax looks like:```def softmax(A): expA = np.exp(A) return expA / expA.sum(axis=1, keepdims=True)```We can see this clearly below:
np.set_printoptions(suppress=True) A = np.array([ [1,2], [4,5], [9,5] ]) expA = np.exp(A) print("A exponentiated element wise: \n", np.round_(expA, decimals=3), "\n") # Keep dims ensures a column vector (vs. row) output output = expA / expA.sum(axis=1, keepdims=True) print("Exponentiated A divided row sum: \n", np.round_(output, decimals=3))
A exponentiated element wise: [[ 2.718 7.389] [ 54.598 148.413] [8103.084 148.413]] Exponentiated A divided row sum: [[0.269 0.731] [0.269 0.731] [0.982 0.018]]
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
Now you may be wondering: Why can't we just perform standard normalization? Why does the exponetial need to be used? For an answer to that I recommend reading up [here](https://stackoverflow.com/questions/17187507/why-use-softmax-as-opposed-to-standard-normalization), [here](https://stats.stackexchange.com/questions/162988/why-sigmoid-function-instead-of-anything-else/318209318209), and [here](http://cs231n.github.io/linear-classify/softmax). 3.2 Update Discrete HMM Code $\rightarrow$ with Softmax
class HMM: def __init__(self, M): self.M = M def fit(self, X, learning_rate=0.001, max_iter=10, V=None, p_cost=1.0, print_period=10): """Train HMM model using stochastic gradient descent.""" # Determine V, the vocabulary size if V is None: V = max(max(x) for x in X) + 1 N = len(X) preSoftmaxPi0 = np.zeros(self.M) # initial state distribution preSoftmaxA0 = np.random.randn(self.M, self.M) # state transition matrix preSoftmaxB0 = np.random.randn(self.M, V) # output distribution thx, cost = self.set(preSoftmaxPi0, preSoftmaxA0, preSoftmaxB0) # This is a beauty of theano and it's computational graph. By defining a cost function, # which is representing p(x), the probability of a sequence, we can then find the gradient # of the cost with respect to our parameters (pi, A, B). The gradient updated rules are # applied as usual. Note, the reason that this is stochastic gradient descent is because # we are only looking at a single training example at a time. pi_update = self.preSoftmaxPi - learning_rate * T.grad(cost, self.preSoftmaxPi) A_update = self.preSoftmaxA - learning_rate * T.grad(cost, self.preSoftmaxA) B_update = self.preSoftmaxB - learning_rate * T.grad(cost, self.preSoftmaxB) updates = [ (self.preSoftmaxPi, pi_update), (self.preSoftmaxA, A_update), (self.preSoftmaxB, B_update), ] train_op = theano.function( inputs=[thx], updates=updates, allow_input_downcast=True ) costs = [] for it in range(max_iter): for n in range(N): # Looping through all N training examples c = self.get_cost_multi(X, p_cost).sum() costs.append(c) train_op(X[n]) plt.figure(figsize=(8,5)) plt.plot(costs, color="blue") plt.xlabel("Iteration Number") plt.ylabel("Cost") plt.show() def get_cost(self, x): return self.cost_op(x) def get_cost_multi(self, X, p_cost=1.0): P = np.random.random(len(X)) return np.array([self.get_cost(x) for x, p in zip(X, P) if p < p_cost]) def log_likelihood(self, x): return - self.cost_op(x) def set(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): # Create theano shared variables self.preSoftmaxPi = theano.shared(preSoftmaxPi) self.preSoftmaxA = theano.shared(preSoftmaxA) self.preSoftmaxB = theano.shared(preSoftmaxB) pi = T.nnet.softmax(self.preSoftmaxPi).flatten() # softmax returns 1xD if input is a 1-D array of size D A = T.nnet.softmax(self.preSoftmaxA) B = T.nnet.softmax(self.preSoftmaxB) # Define input, a vector thx = T.ivector("thx") def recurrence_to_find_alpha(t, old_alpha, x): """Scaled version of updates for HMM. This is used to find the forward variable alpha. Args: t: Current time step, from pass in from scan: sequences=T.arange(1, thx.shape[0]) old_alpha: Previously returned alpha, or on the first time step the initial value, outputs_info=[pi * B[:, thx[0]], None] x: thx, non_sequences (our actual set of observations) """ alpha = old_alpha.dot(A) * B[:, x[t]] s = alpha.sum() return (alpha / s), s # alpha and scale, once returned, are both matrices with values at each time step [alpha, scale], _ = theano.scan( fn=recurrence_to_find_alpha, sequences=T.arange(1, thx.shape[0]), outputs_info=[pi * B[:, thx[0]], None], # Initial value of alpha n_steps=thx.shape[0] - 1, non_sequences=thx, ) # scale is an array, and scale.prod() = p(x) # The property log(A) + log(B) = log(AB) can be used here to prevent underflow problem p_of_x = -T.log(scale).sum() # Negative log likelihood cost = p_of_x self.cost_op = theano.function( inputs=[thx], outputs=cost, allow_input_downcast=True, ) return thx, cost def fit_coin(file_key): """Loads data and trains HMM.""" X = [] for line in get_obj_s3(file_key).read().decode("utf-8").strip().split(sep="\n"): x = [1 if e == "H" else 0 for e in line.rstrip()] X.append(x) # Instantiate object of class HMM with 2 hidden states (heads and tails) hmm = HMM(2) hmm.fit(X) L = hmm.get_cost_multi(X).sum() print("Log likelihood with fitted params: ", round(L, 3)) # Try the true values pi = np.array([0.5, 0.5]) A = np.array([ [0.1, 0.9], [0.8, 0.2] ]) B = np.array([ [0.6, 0.4], [0.3, 0.7] ]) hmm.set(pi, A, B) L = hmm.get_cost_multi(X).sum() print("Log Likelihood with true params: ", round(L, 3)) if __name__ == "__main__": key = "coin_data.txt" fit_coin(key)
_____no_output_____
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
4. Hidden Markov Models with TensorFlowI now want to expose everyone to an HMM implementation in TensorFlow. In order to do so, we will need to first go over the `scan` function in Tensorflow. Just like when dealing with Theano, we need to ask "What is the equivalent of a for loop in TensorFlow?". And why should we care? 4.1 TensorFlow ScanIn order to understand the importance of `scan`, we need to be sure that we have a good idea of how TensorFlow works, even if only from a high level. In general, with both TensorFlow and Theano, you have to create variables and link them together functionally, but they do not have values until you actually run the functions. So, when you create your $X$ matrix you don't give it a shape; you just say here is a place holder I am going to call $X$ and this is a possible shape for it:```X = tf.placeholder(tf.float32, shape=(None, D))```However, remember that the `shape` argument is _optional_, and hence for all intents and purposes we can assume that we do not know the shape of $X$. So, what happens if you want to loop through all the elements of $X$? Well you can't, because we do not know the number of elements in $X$!```for i in range(X.shape[0]): <------- Not possible! We don't know num elements in X ....```In order to write a for loop we must specify the number of times the loop will run. But in order to specify the number of times the loop will run we must know the number of elements in $X$. Generally speaking, we cannot guarantee the length of our training sequences. This is why we need the tensorflow `scan` function! It will allow us to loop through a tensorflow array without knowing its size. This is similar to how everything else in Tensorflow and Theano works. Using `scan` we can tell Tensorflow "how to run the for loop", without actually running it. There is another big reason that the `scan` function is so important; it allows us to perform **automatic differentiation** when we have sequential data. Tensorflow keeps track of how all the variables in your graph link together, so that it can automatically calculate the gradient for you when you do gradient descent:$$W(t) \leftarrow W(t-1) - \eta \nabla J\big(W(t-1)\big)$$The `scan` function keeps track of this when it performs the loop. The anatomy of the `scan` function is shown in pseudocode below:```outputs = tf.scan( fn=some_function, Function applied to every element in sequence elems=thing_to_loop_over Actual sequence that is passed in)```Above, `some_function` is applied to every element in `thing_to_loop_over`. Now, the way that we define `some_function` is very specific and much more strict than that for theano. In particular, it must always take in two arguments. The first element is the last output of the function, and the second element is the next element of the sequence:```def some_function(last_output, element): return do_something_to(last_output, element)```The tensorflow scan function returns `outputs`, which is all of the return values of `some_function` concatenated together. For example, we can look at the following block:```outputs = tf.scan( fn=some_function, elems=thing_to_loop_over )def square(last, current): return current * current sequence = [1, 2, 3] outputs = [1, 4, 9]```If we pass in `[1, 2, 3]`, then our outputs will be `[1, 4, 9]`. Now, of course the outputs is still a tensorflow graph node. So, in order to get an actual value out of it we need to run it in an actual session.
import tensorflow as tf x = tf.placeholder(tf.int32, shape=(None,), name="x") def square(last, current): """Last is never used, but must be included based on interface requirements of tf.scan""" return current*current # Essentially doing what a for loop would normally do # It applies the square function to every element of x square_op = tf.scan( fn=square, elems=x ) # Run it! with tf.Session() as session: o_val = session.run( square_op, feed_dict={x: [1, 2, 3, 4, 5]} ) print("Output: ", o_val)
WARNING:tensorflow:From /Users/natedake/.virtualenvs/intuitiveml/lib/python3.6/site-packages/tensorflow/python/ops/tensor_array_ops.py:162: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. Output: [ 1 4 9 16 25]
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
Now, of course `scan` can do more complex things than this. We can implement another argument, `initializer`, that allows us to compute recurrence relationships. ```outputs = tf.scan( fn=some_function, Function applied to every element in sequence elems=thing_to_loop_over, Actual sequence that is passed in initializer=initial_input )```Why exactly do we need this? Well, we can see that the recurrence function takes in two things: the last element that it returned, and the current element of the sequence that we are iterating over. What is the last output during the first iteration? There isn't one yet! And that is exactly why we need `initializer`. One thing to keep in mind when using `initializer` is that it is very strict. In particular, it must be the exact same type as the output of `recurrence`. For example, if you need to return multiple things from `recurrence` it is going to be returned as a tuple. That means that the argument to `initializer` cannot be a list, it must be a tuple. This also means that a tuple containing `(5 , 5)` is not the same a tuple containing `(5.0, 5.0)`. Let's try to compute the fibonacci sequence to get a feel for how this works:
# N is the number fibonacci numbers that we want N = tf.placeholder(tf.int32, shape=(), name="N") def fibonacci(last, current): # last[0] is the last value, last[1] is the second last value return (last[1], last[0] + last[1]) fib_op = tf.scan( fn=fibonacci, elems=tf.range(N), initializer=(0, 1), ) with tf.Session() as session: o_val = session.run( fib_op, feed_dict={N: 8} ) print("Output: \n", o_val)
Output: (array([ 1, 1, 2, 3, 5, 8, 13, 21], dtype=int32), array([ 1, 2, 3, 5, 8, 13, 21, 34], dtype=int32))
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
Another example of what we can do with the theano `scan` is create a **low pass filter** (also known as a **moving average**). In this case, our recurrence relation is given by:$$s(t) = \text{decay_rate} \cdot s(t-1) + (1 - \text{decay_rate}) \cdot x(t)$$Where $x(t)$ is the input and $s(t)$ is the output. The goal here is to return a clean version of a noisy signal. To do this we can create a sine wave, add some random gaussian noise to it, and finally try to retrieve the sine wave. In code this looks like:
original = np.sin(np.linspace(0, 3*np.pi, 300)) X = 2*np.random.randn(300) + original fig = plt.figure(figsize=(15,5)) plt.subplot(1, 2, 1) ax = plt.plot(X, c="g", lw=1.5) plt.title("Original") # Setup placeholders decay = tf.placeholder(tf.float32, shape=(), name="decay") sequence = tf.placeholder(tf.float32, shape=(None, ), name="sequence") # The recurrence function and loop def recurrence(last, x): return (1.0 - decay)*x + decay*last low_pass_filter = tf.scan( fn=recurrence, elems=sequence, initializer=0.0 # sequence[0] to use first value of the sequence ) # Run it! with tf.Session() as session: Y = session.run(low_pass_filter, feed_dict={sequence: X, decay: 0.97}) plt.subplot(1, 2, 2) ax2 = plt.plot(original, c="b") ax = plt.plot(Y, c="r") plt.title("Low pass filter") plt.show()
_____no_output_____
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
4.2 Discrete HMM With TensorflowLet's now take a moment to walk through the creation of a discrete HMM class utilizing Tensorflow.
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from hmm.utils import get_obj_s3 class HMM: def __init__(self, M): self.M = M # number of hidden states def set_session(self, session): self.session = session def fit(self, X, max_iter=10, print_period=1): # train the HMM model using stochastic gradient descent N = len(X) print("Number of train samples:", N) costs = [] for it in range(max_iter): for n in range(N): # this would of course be much faster if we didn't do this on # every iteration of the loop c = self.get_cost_multi(X).sum() costs.append(c) self.session.run(self.train_op, feed_dict={self.tfx: X[n]}) plt.figure(figsize=(8,5)) plt.plot(costs, c="b") plt.xlabel("Iteration Number") plt.ylabel("Cost") plt.show() def get_cost(self, x): # returns log P(x | model) # using the forward part of the forward-backward algorithm # print "getting cost for:", x return self.session.run(self.cost, feed_dict={self.tfx: x}) def log_likelihood(self, x): return -self.session.run(self.cost, feed_dict={self.tfx: x}) def get_cost_multi(self, X): return np.array([self.get_cost(x) for x in X]) def build(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): M, V = preSoftmaxB.shape self.preSoftmaxPi = tf.Variable(preSoftmaxPi) self.preSoftmaxA = tf.Variable(preSoftmaxA) self.preSoftmaxB = tf.Variable(preSoftmaxB) pi = tf.nn.softmax(self.preSoftmaxPi) A = tf.nn.softmax(self.preSoftmaxA) B = tf.nn.softmax(self.preSoftmaxB) # define cost self.tfx = tf.placeholder(tf.int32, shape=(None,), name='x') def recurrence(old_a_old_s, x_t): old_a = tf.reshape(old_a_old_s[0], (1, M)) a = tf.matmul(old_a, A) * B[:, x_t] a = tf.reshape(a, (M,)) s = tf.reduce_sum(a) return (a / s), s # remember, tensorflow scan is going to loop through # all the values! # we treat the first value differently than the rest # so we only want to loop through tfx[1:] # the first scale being 1 doesn't affect the log-likelihood # because log(1) = 0 alpha, scale = tf.scan( fn=recurrence, elems=self.tfx[1:], initializer=(pi * B[:, self.tfx[0]], np.float32(1.0)), ) self.cost = -tf.reduce_sum(tf.log(scale)) self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost) def init_random(self, V): preSoftmaxPi0 = np.zeros(self.M).astype(np.float32) # initial state distribution preSoftmaxA0 = np.random.randn(self.M, self.M).astype(np.float32) # state transition matrix preSoftmaxB0 = np.random.randn(self.M, V).astype(np.float32) # output distribution self.build(preSoftmaxPi0, preSoftmaxA0, preSoftmaxB0) def set(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): op1 = self.preSoftmaxPi.assign(preSoftmaxPi) op2 = self.preSoftmaxA.assign(preSoftmaxA) op3 = self.preSoftmaxB.assign(preSoftmaxB) self.session.run([op1, op2, op3]) def fit_coin(file_key): X = [] for line in get_obj_s3(file_key).read().decode("utf-8").strip().split(sep="\n"): x = [1 if e == "H" else 0 for e in line.rstrip()] X.append(x) hmm = HMM(2) # the entire graph (including optimizer's variables) must be built # before calling global variables initializer! hmm.init_random(2) init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) hmm.set_session(session) hmm.fit(X, max_iter=5) L = hmm.get_cost_multi(X).sum() print("Log Likelihood with fitted params: ", round(L, 3)) # try true values # remember these must be in their "pre-softmax" forms pi = np.log(np.array([0.5, 0.5])).astype(np.float32) A = np.log(np.array([[0.1, 0.9], [0.8, 0.2]])).astype(np.float32) B = np.log(np.array([[0.6, 0.4], [0.3, 0.7]])).astype(np.float32) hmm.set(pi, A, B) L = hmm.get_cost_multi(X).sum() print("Log Likelihood with true params: ", round(L, 3)) if __name__ == '__main__': key = "coin_data.txt" fit_coin(key)
Number of train samples: 50
MIT
Machine_Learning/05-Hidden_Markov_Models-06-Hidden-Markov-Models-Hidden-Markov-Models-Discrete-Observations-Deep-Learning-Libraries.ipynb
NathanielDake/NathanielDake.github.io
Automagically making a table of all protein-protein interactions for two structuresIf two structures use the same or essentially the same, you can use Python to make a table of all the pairs of the protein-protein interactions by the two structures that can be used as input for the pipeline described in an earlier notebook in this series, [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb). This notebook will step through this process.It is important to note this won't work straight away if the protein chain designations by the same or closely related proteins differ between the two structures. Elements of the process to be used in this notebook could be adapted to do that; however, that would require some progamming knowledge beyond what will be covered here. I assume the number of times this would be needed would be limited and a table could more easily done by hand following along with this notebook as well as [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb). The process relies on the fact that PDBsum shares under the 'Prot-prot' tab for every structure, the interacting pairs of proteins chains in an 'Interface summary' on the left side of the browser page. For example, look on the left of http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6kiv&template=interfaces.html&c=999 . That link is what the PDBsum entry for the PDB idenitifer 6kiv leads to if you click on the 'Prot-prot' tab page from [the main PDBsum page for 6kiv](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6kiv&template=main.html). A utility script [pdb_code_to_prot_prot_interactions_via_PDBsum.py](https://github.com/fomightez/structurework/tree/master/pdbsum-utilities) is used to collect the designations listed there for each individual structure involved. Then in this notebook a little Python is used to generate the table file that can be used as described in [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb).An example follows. It is meant to be adaptable to use the PDB codes of structures that interest you. You may wish to work through the demonstration first so you know what to expect.---- The next cell is used to define the structures of interest. The PDB code identifiers are supplied.
structure1 = "6kiz" structure2 = "6kix"
_____no_output_____
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
The next cell gets the script `pdb_code_to_prot_prot_interactions_via_PDBsum.py` (see [here](https://github.com/fomightez/structurework/tree/master/pdbsum-utilities)) that will get the 'Interface Summary' information for each individual structure. This is the equivalent to the Summary on the left side of the 'Prot-prot' tab.
import os file_needed = "pdb_code_to_prot_prot_interactions_via_PDBsum.py" if not os.path.isfile(file_needed): !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}
_____no_output_____
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
Import the main function of that script by running the next cell.
from pdb_code_to_prot_prot_interactions_via_PDBsum import pdb_code_to_prot_prot_interactions_via_PDBsum
_____no_output_____
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
The next cell gets the interaction summary for each structure and to get the pairs need to build the table described at the top of [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb).
structure1_il = pdb_code_to_prot_prot_interactions_via_PDBsum(structure1) structure2_il = pdb_code_to_prot_prot_interactions_via_PDBsum(structure2) i_union = set(structure1_il).union(set(structure2_il))
_____no_output_____
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
In this case the pairs of both are the same; however, the script is written to not fail if there was extra proteins present in the other. Specficially, the interacting pairs of proteins for both are checked because if one had additional chain, by getting the listing of both structures and making the union, the combinations for all would be in the list of pairs `i_union`. Next the union of all the pairs is used to make a table like constructed at the top of [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb).
s = "" for pair in list(i_union): s+= f"{structure1} {pair[0]} {pair[1]} {structure2} {pair[0]} {pair[1]}\n" %store s >int_matrix.txt
Writing 's' (str) to file 'int_matrix.txt'.
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
The table has now been stored as `int_matrix.txt`. Open the file from the Jupyter dashboard to verify. Or just run the next cell to see the contents of the file.
!cat int_matrix.txt
6kiz K R 6kix K R 6kiz B H 6kix B H 6kiz C E 6kix C E 6kiz B D 6kix B D 6kiz G H 6kix G H 6kiz F H 6kix F H 6kiz D F 6kix D F 6kiz A N 6kix A N 6kiz N T 6kix N T 6kiz C D 6kix C D 6kiz A B 6kix A B 6kiz C F 6kix C F 6kiz K T 6kix K T 6kiz A E 6kix A E 6kiz A G 6kix A G 6kiz C K 6kix C K 6kiz E F 6kix E F 6kiz C G 6kix C G 6kiz B G 6kix B G 6kiz N R 6kix N R 6kiz D N 6kix D N 6kiz B N 6kix B N 6kiz K N 6kix K N
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
That's the table in the file that needed to be made. The rest of the process pickes up with 'Step 3' of [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb).To make that clear, this following cell will run the snakemake pipeline. Consult the subsequent steps of [Using snakemake to highlight changes in multiple protein-protein interactions via PDBsum data](Using%20snakemake%20to%20highlight%20changes%20in%20multiple%20protein-protein%20interactions%20via%20PDBsum%20data.ipynb) to see what to do after it completes all the possible pairs.
!snakemake --cores 1
Building DAG of jobs... Using shell: /bin/bash Provided cores: 1 (use --cores to define parallelism) Rules claiming more threads will be scaled down. Job counts: count jobs 1 all 23 convert_scripts_to_nb_and_run_using_jupytext 1 make_archive 1 read_table_and_create_py 26  [Mon Feb 8 22:11:09 2021] rule read_table_and_create_py: input: int_matrix.txt output: interactions_report_for_6kiz_K_R_6kix_K_R.py, interactions_report_for_6kiz_B_H_6kix_B_H.py, interactions_report_for_6kiz_C_E_6kix_C_E.py, interactions_report_for_6kiz_B_D_6kix_B_D.py, interactions_report_for_6kiz_G_H_6kix_G_H.py, interactions_report_for_6kiz_F_H_6kix_F_H.py, interactions_report_for_6kiz_D_F_6kix_D_F.py, interactions_report_for_6kiz_A_N_6kix_A_N.py, interactions_report_for_6kiz_N_T_6kix_N_T.py, interactions_report_for_6kiz_C_D_6kix_C_D.py, interactions_report_for_6kiz_A_B_6kix_A_B.py, interactions_report_for_6kiz_C_F_6kix_C_F.py, interactions_report_for_6kiz_K_T_6kix_K_T.py, interactions_report_for_6kiz_A_E_6kix_A_E.py, interactions_report_for_6kiz_A_G_6kix_A_G.py, interactions_report_for_6kiz_C_K_6kix_C_K.py, interactions_report_for_6kiz_E_F_6kix_E_F.py, interactions_report_for_6kiz_C_G_6kix_C_G.py, interactions_report_for_6kiz_B_G_6kix_B_G.py, interactions_report_for_6kiz_N_R_6kix_N_R.py, interactions_report_for_6kiz_D_N_6kix_D_N.py, interactions_report_for_6kiz_B_N_6kix_B_N.py, interactions_report_for_6kiz_K_N_6kix_K_N.py jobid: 3  Job counts: count jobs 1 read_table_and_create_py 1 [Mon Feb 8 22:11:10 2021] Finished job 3. 1 of 26 steps (4%) done  [Mon Feb 8 22:11:10 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_B_N_6kix_B_N.py output: interactions_report_for_6kiz_B_N_6kix_B_N.ipynb jobid: 24 wildcards: details=6kiz_B_N_6kix_B_N  [jupytext] Reading interactions_report_for_6kiz_B_N_6kix_B_N.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_B_N_6kix_B_N.ipynb [Mon Feb 8 22:11:25 2021] Finished job 24. 2 of 26 steps (8%) done  [Mon Feb 8 22:11:25 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_A_N_6kix_A_N.py output: interactions_report_for_6kiz_A_N_6kix_A_N.ipynb jobid: 10 wildcards: details=6kiz_A_N_6kix_A_N  [jupytext] Reading interactions_report_for_6kiz_A_N_6kix_A_N.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_A_N_6kix_A_N.ipynb [Mon Feb 8 22:11:32 2021] Finished job 10. 3 of 26 steps (12%) done  [Mon Feb 8 22:11:32 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_B_H_6kix_B_H.py output: interactions_report_for_6kiz_B_H_6kix_B_H.ipynb jobid: 4 wildcards: details=6kiz_B_H_6kix_B_H  [jupytext] Reading interactions_report_for_6kiz_B_H_6kix_B_H.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_B_H_6kix_B_H.ipynb [Mon Feb 8 22:11:40 2021] Finished job 4. 4 of 26 steps (15%) done  [Mon Feb 8 22:11:40 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_C_K_6kix_C_K.py output: interactions_report_for_6kiz_C_K_6kix_C_K.ipynb jobid: 18 wildcards: details=6kiz_C_K_6kix_C_K  [jupytext] Reading interactions_report_for_6kiz_C_K_6kix_C_K.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_C_K_6kix_C_K.ipynb [Mon Feb 8 22:11:48 2021] Finished job 18. 5 of 26 steps (19%) done  [Mon Feb 8 22:11:48 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_N_T_6kix_N_T.py output: interactions_report_for_6kiz_N_T_6kix_N_T.ipynb jobid: 11 wildcards: details=6kiz_N_T_6kix_N_T  [jupytext] Reading interactions_report_for_6kiz_N_T_6kix_N_T.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_N_T_6kix_N_T.ipynb [Mon Feb 8 22:11:56 2021] Finished job 11. 6 of 26 steps (23%) done  [Mon Feb 8 22:11:56 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_G_H_6kix_G_H.py output: interactions_report_for_6kiz_G_H_6kix_G_H.ipynb jobid: 7 wildcards: details=6kiz_G_H_6kix_G_H  [jupytext] Reading interactions_report_for_6kiz_G_H_6kix_G_H.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_G_H_6kix_G_H.ipynb [Mon Feb 8 22:12:04 2021] Finished job 7. 7 of 26 steps (27%) done  [Mon Feb 8 22:12:04 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_B_G_6kix_B_G.py output: interactions_report_for_6kiz_B_G_6kix_B_G.ipynb jobid: 21 wildcards: details=6kiz_B_G_6kix_B_G  [jupytext] Reading interactions_report_for_6kiz_B_G_6kix_B_G.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_B_G_6kix_B_G.ipynb [Mon Feb 8 22:12:12 2021] Finished job 21. 8 of 26 steps (31%) done  [Mon Feb 8 22:12:12 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_D_F_6kix_D_F.py output: interactions_report_for_6kiz_D_F_6kix_D_F.ipynb jobid: 9 wildcards: details=6kiz_D_F_6kix_D_F  [jupytext] Reading interactions_report_for_6kiz_D_F_6kix_D_F.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_D_F_6kix_D_F.ipynb [Mon Feb 8 22:12:19 2021] Finished job 9. 9 of 26 steps (35%) done  [Mon Feb 8 22:12:19 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_D_N_6kix_D_N.py output: interactions_report_for_6kiz_D_N_6kix_D_N.ipynb jobid: 23 wildcards: details=6kiz_D_N_6kix_D_N  [jupytext] Reading interactions_report_for_6kiz_D_N_6kix_D_N.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_D_N_6kix_D_N.ipynb [Mon Feb 8 22:12:27 2021] Finished job 23. 10 of 26 steps (38%) done  [Mon Feb 8 22:12:27 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_C_D_6kix_C_D.py output: interactions_report_for_6kiz_C_D_6kix_C_D.ipynb jobid: 12 wildcards: details=6kiz_C_D_6kix_C_D  [jupytext] Reading interactions_report_for_6kiz_C_D_6kix_C_D.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_C_D_6kix_C_D.ipynb [Mon Feb 8 22:12:35 2021] Finished job 12. 11 of 26 steps (42%) done  [Mon Feb 8 22:12:35 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_C_F_6kix_C_F.py output: interactions_report_for_6kiz_C_F_6kix_C_F.ipynb jobid: 14 wildcards: details=6kiz_C_F_6kix_C_F  [jupytext] Reading interactions_report_for_6kiz_C_F_6kix_C_F.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_C_F_6kix_C_F.ipynb [Mon Feb 8 22:12:44 2021] Finished job 14. 12 of 26 steps (46%) done  [Mon Feb 8 22:12:44 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_E_F_6kix_E_F.py output: interactions_report_for_6kiz_E_F_6kix_E_F.ipynb jobid: 19 wildcards: details=6kiz_E_F_6kix_E_F  [jupytext] Reading interactions_report_for_6kiz_E_F_6kix_E_F.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_E_F_6kix_E_F.ipynb [Mon Feb 8 22:12:52 2021] Finished job 19. 13 of 26 steps (50%) done  [Mon Feb 8 22:12:52 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_K_R_6kix_K_R.py output: interactions_report_for_6kiz_K_R_6kix_K_R.ipynb jobid: 2 wildcards: details=6kiz_K_R_6kix_K_R  [jupytext] Reading interactions_report_for_6kiz_K_R_6kix_K_R.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_K_R_6kix_K_R.ipynb [Mon Feb 8 22:13:01 2021] Finished job 2. 14 of 26 steps (54%) done  [Mon Feb 8 22:13:01 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_A_E_6kix_A_E.py output: interactions_report_for_6kiz_A_E_6kix_A_E.ipynb jobid: 16 wildcards: details=6kiz_A_E_6kix_A_E  [jupytext] Reading interactions_report_for_6kiz_A_E_6kix_A_E.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_A_E_6kix_A_E.ipynb [Mon Feb 8 22:13:10 2021] Finished job 16. 15 of 26 steps (58%) done  [Mon Feb 8 22:13:10 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_C_G_6kix_C_G.py output: interactions_report_for_6kiz_C_G_6kix_C_G.ipynb jobid: 20 wildcards: details=6kiz_C_G_6kix_C_G  [jupytext] Reading interactions_report_for_6kiz_C_G_6kix_C_G.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_C_G_6kix_C_G.ipynb [Mon Feb 8 22:13:19 2021] Finished job 20. 16 of 26 steps (62%) done  [Mon Feb 8 22:13:19 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_B_D_6kix_B_D.py output: interactions_report_for_6kiz_B_D_6kix_B_D.ipynb jobid: 6 wildcards: details=6kiz_B_D_6kix_B_D  [jupytext] Reading interactions_report_for_6kiz_B_D_6kix_B_D.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_B_D_6kix_B_D.ipynb [Mon Feb 8 22:13:27 2021] Finished job 6. 17 of 26 steps (65%) done  [Mon Feb 8 22:13:27 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_F_H_6kix_F_H.py output: interactions_report_for_6kiz_F_H_6kix_F_H.ipynb jobid: 8 wildcards: details=6kiz_F_H_6kix_F_H  [jupytext] Reading interactions_report_for_6kiz_F_H_6kix_F_H.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_F_H_6kix_F_H.ipynb [Mon Feb 8 22:13:35 2021] Finished job 8. 18 of 26 steps (69%) done  [Mon Feb 8 22:13:35 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_N_R_6kix_N_R.py output: interactions_report_for_6kiz_N_R_6kix_N_R.ipynb jobid: 22 wildcards: details=6kiz_N_R_6kix_N_R  [jupytext] Reading interactions_report_for_6kiz_N_R_6kix_N_R.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_N_R_6kix_N_R.ipynb [Mon Feb 8 22:13:43 2021] Finished job 22. 19 of 26 steps (73%) done  [Mon Feb 8 22:13:43 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_K_N_6kix_K_N.py output: interactions_report_for_6kiz_K_N_6kix_K_N.ipynb jobid: 25 wildcards: details=6kiz_K_N_6kix_K_N  [jupytext] Reading interactions_report_for_6kiz_K_N_6kix_K_N.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_K_N_6kix_K_N.ipynb [Mon Feb 8 22:13:53 2021] Finished job 25. 20 of 26 steps (77%) done  [Mon Feb 8 22:13:53 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_A_B_6kix_A_B.py output: interactions_report_for_6kiz_A_B_6kix_A_B.ipynb jobid: 13 wildcards: details=6kiz_A_B_6kix_A_B  [jupytext] Reading interactions_report_for_6kiz_A_B_6kix_A_B.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_A_B_6kix_A_B.ipynb [Mon Feb 8 22:14:01 2021] Finished job 13. 21 of 26 steps (81%) done  [Mon Feb 8 22:14:01 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_K_T_6kix_K_T.py output: interactions_report_for_6kiz_K_T_6kix_K_T.ipynb jobid: 15 wildcards: details=6kiz_K_T_6kix_K_T  [jupytext] Reading interactions_report_for_6kiz_K_T_6kix_K_T.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_K_T_6kix_K_T.ipynb [Mon Feb 8 22:14:11 2021] Finished job 15. 22 of 26 steps (85%) done  [Mon Feb 8 22:14:11 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_A_G_6kix_A_G.py output: interactions_report_for_6kiz_A_G_6kix_A_G.ipynb jobid: 17 wildcards: details=6kiz_A_G_6kix_A_G  [jupytext] Reading interactions_report_for_6kiz_A_G_6kix_A_G.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_A_G_6kix_A_G.ipynb [Mon Feb 8 22:14:19 2021] Finished job 17. 23 of 26 steps (88%) done  [Mon Feb 8 22:14:19 2021] rule convert_scripts_to_nb_and_run_using_jupytext: input: interactions_report_for_6kiz_C_E_6kix_C_E.py output: interactions_report_for_6kiz_C_E_6kix_C_E.ipynb jobid: 5 wildcards: details=6kiz_C_E_6kix_C_E  [jupytext] Reading interactions_report_for_6kiz_C_E_6kix_C_E.py in format py [jupytext] Executing notebook with kernel python3 [jupytext] Writing interactions_report_for_6kiz_C_E_6kix_C_E.ipynb [Mon Feb 8 22:14:28 2021] Finished job 5. 24 of 26 steps (92%) done  [Mon Feb 8 22:14:28 2021] rule make_archive: input: interactions_report_for_6kiz_K_R_6kix_K_R.ipynb, interactions_report_for_6kiz_B_H_6kix_B_H.ipynb, interactions_report_for_6kiz_C_E_6kix_C_E.ipynb, interactions_report_for_6kiz_B_D_6kix_B_D.ipynb, interactions_report_for_6kiz_G_H_6kix_G_H.ipynb, interactions_report_for_6kiz_F_H_6kix_F_H.ipynb, interactions_report_for_6kiz_D_F_6kix_D_F.ipynb, interactions_report_for_6kiz_A_N_6kix_A_N.ipynb, interactions_report_for_6kiz_N_T_6kix_N_T.ipynb, interactions_report_for_6kiz_C_D_6kix_C_D.ipynb, interactions_report_for_6kiz_A_B_6kix_A_B.ipynb, interactions_report_for_6kiz_C_F_6kix_C_F.ipynb, interactions_report_for_6kiz_K_T_6kix_K_T.ipynb, interactions_report_for_6kiz_A_E_6kix_A_E.ipynb, interactions_report_for_6kiz_A_G_6kix_A_G.ipynb, interactions_report_for_6kiz_C_K_6kix_C_K.ipynb, interactions_report_for_6kiz_E_F_6kix_E_F.ipynb, interactions_report_for_6kiz_C_G_6kix_C_G.ipynb, interactions_report_for_6kiz_B_G_6kix_B_G.ipynb, interactions_report_for_6kiz_N_R_6kix_N_R.ipynb, interactions_report_for_6kiz_D_N_6kix_D_N.ipynb, interactions_report_for_6kiz_B_N_6kix_B_N.ipynb, interactions_report_for_6kiz_K_N_6kix_K_N.ipynb output: interactions_report_nbsFeb0820212211.tar.gz jobid: 1  Be sure to download interactions_report_nbsFeb0820212211.tar.gz. [Mon Feb 8 22:14:28 2021] Finished job 1. 25 of 26 steps (96%) done  [Mon Feb 8 22:14:28 2021] localrule all: input: interactions_report_nbsFeb0820212211.tar.gz jobid: 0  [Mon Feb 8 22:14:28 2021] Finished job 0. 26 of 26 steps (100%) done Complete log: /home/jovyan/notebooks/.snakemake/log/2021-02-08T221108.464378.snakemake.log
MIT
notebooks/Automagically making a table of all protein-protein interactions for two structures.ipynb
fomightez/pdbsum-binder
Car Decor Sales Forecasting - Perfumes
Summary of the Code below : 1. Establish MySQL Connection and load data 2. Data Preprocessing (Typecasting and Resampling daily data to monthly) 3. Visualizing Rolling statistics to observe variation in mean and standard deviation for selected Feature. 4. Checking for Data Stationarity using Augmented Dickey-Fuller Test for the feature 5. Hyper-parameter Tuning using ACF and PACF plots for building SARIMA Model (this process takes little time) 6. Models (a) SARIMA (b) HoltWinters Exponential Smoothing with Additive Seasonality & Additive Trend (c) FB Prophet (d) Auto Time Series 7. Evaluation of the Models 8. Saving the model with least MAPE 9. Loading saved model (.pkl) to predict sales for 12 months. 10. Closing MySQL Connection
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Importing Libraries
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline from sklearn.metrics import mean_squared_error from math import sqrt # Connecting Python to MySQL for fetching data import mysql.connector import warnings from statsmodels.tools.sm_exceptions import ConvergenceWarning warnings.simplefilter('ignore', ConvergenceWarning)
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
MySQL Connection to fetch data
try: connection = mysql.connector.connect(host='localhost', database='car_decors', user='root', password='***********') sql_select_Query = "SELECT * FROM decorsales" cursor = connection.cursor() cursor.execute(sql_select_Query) columns = len(cursor.description) columns = [i[0] for i in cursor.description] print(columns) # get all records records = cursor.fetchall() print("Total number of rows in table: ", cursor.rowcount) except mysql.connector.Error as e: print("Error reading data from MySQL table", e)
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Data Cleaning and Exploratory Data Analysis Converting fetched records to Pandas dataframe
records = np.array(records) records = records[:,0:25] decor_sales=pd.DataFrame(records,columns=columns)
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Type Casting Date and other features
decor_sales.dtypes decor_sales.Date = pd.to_datetime(decor_sales.Date) decor_sales.iloc[:,1:] = decor_sales.iloc[:,1:].astype("int32") decor_sales.dtypes
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Creating Subset of Decor Sales Dataset and resampling Monthly Time Series
df = decor_sales df = df.set_index('Date') df = df.resample("MS").sum()
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Note : Time period options when resampling a time series MS - Monthly ; W - Weekly ; QS - Quarterly ; YS - Yearly
###### Data Visualization
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
plt.rc("figure", figsize=(16,8))sns.set_style('darkgrid')
###### Rolling statistics to observe variation in mean and standard deviation.
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
timeseries = df ['Perfumes']timeseries.rolling(12).mean().plot(label='12 Month Rolling Mean', marker='.')timeseries.rolling(12).std().plot(label='12 Month Rolling Std', marker='.')timeseries.plot(marker='.')plt.title('Rolling Statistics to observe variation in Mean and Standard Deviation', fontsize = 18, fontweight = 'bold')plt.xlabel('Year', fontsize = 14)plt.ylabel('Sales (Number of Units)', fontsize = 14)plt.legend()
# The plot shows, there is nearly a constant mean and standard deviation except noise in Qtr 2 - 2020 (Lockdown period)
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Checking Seasonalty and Trend components for the feature
from statsmodels.tsa.seasonal import seasonal_decompose add = seasonal_decompose(df["Perfumes"],model="additive",period=12) add.plot();
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Decomposition plot shows constant trend with noise in Qtr 2 - 2020 and seasonality is additive in nature. The data is seasonal and follows constant trend. Also, the average value or the mean of the residuals seem to be zero which holds our assumption.
##### Checking for Data Stationarity using Augmented Dickey-Fuller Test
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
from statsmodels.tsa.stattools import adfullerdef check_adf(time_series): test_result = adfuller(df['Perfumes']) print ('ADF Test:') labels = ['ADF Statistic','p-value','No. of Lags Used','Number of Observations Used'] for value,label in zip(test_result,labels): print (label+': '+str(value)+str("\n")) if test_result [1] <= 0.05: print ("Reject null hypothesis; Data is stationary") else: print ("Fail to reject H0; Data is non-stationary")
If the data is non-stationary so we need to apply differencing to make our data stationary. df ['Perfumes'] = df ['Perfumes'] - df ['Perfumes']. shift (1) adf_check(df['Perfumes'].dropna()) If again data is non-stationary we need to differencing with subsequent shifts.
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
check_adf(df['Perfumes'])
# Adfuller test Results for all variables
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
from statsmodels.tsa.stattools import adfullerdef adfuller_parameter(x): P = [] columns = [] used_lag = [] for i in x.columns: test_stats,p,used_lags,nobs,critical_value,ic_best = adfuller(x[i]) columns.append(i) P.append(p) used_lag.append(used_lags) return pd.DataFrame({"COLUMNS":columns,"P_VALUE":P,"MAX_USED_LAG":used_lag})adfuller_parameter(df)
By looking at adfuller test result we conclude that we need differencing by 0 shifts to make our data stationary for android headunits.
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Hyper-parameter Tuning Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF) plots
# By looking at ACF pot and PACF plot we decide the value p(Auto regressive) and q(Moving average) # p = sudden shuts off in pacf plot. # q = Exponential drop in acf plot. # d = degree of differencing/shift by adfuller test #Auto Regressive (p) # Identification of an AR model is often best done with the PACF. # For an AR model, the theoretical PACF “shuts off” past the order of the model. # The phrase “shuts off” means that in theory the partial autocorrelations are equal to 0 beyond that point. # Put another way, the number of non-zero partial autocorrelations gives the order of the AR model. # By the “order of the model” we mean the most extreme lag of x that is used as a predictor. # Integration (d) # Integration paramter is choosen through how much value you have differentiated from original # For a stationary data its either be 0 or 1 # Moving Average (q) # the theoretical PACF does not shut off, but instead tapers or exponetially decrease toward 0 in some manner. # A clearer pattern for an MA model is in the ACF. # The ACF will have non-zero autocorrelations only at lags involved in the model.
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
from statsmodels.graphics.tsaplots import plot_acf, plot_pacfimport statsmodels.api as smfig, ax = plt.subplots(1,2, figsize=(15,5))sm.graphics.tsa.plot_acf(df["Perfumes"], lags=12, title = 'ACF Plot', ax=ax[0])sm.graphics.tsa.plot_pacf(df["Perfumes"], lags=12, title = 'PACF Plot',ax=ax[1])plt.show()
### Model Building - SARIMA Model ( Seasonal ARIMA Model ) ###### Train Test Split
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
train_df = df["Perfumes"].iloc[0:int(len(df)*.95)] train model with approx 95% datatest_df = df["Perfumes"].iloc[int(len(train_df)):] test model with 5% dataprint("Train_df : ",len(train_df))print("Test_df : ",len(test_df))
###### User Defined Function to calculate the MAPE value
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
def mape(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
###### Automated Hyperparameter tuning
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
import itertools as i p = range(0,3) d = range(0,2)q = range(0,3)pdq_combo = list(i.product(p,d,q)) this will all combination of p,d,q throgh a tuple error = []aic_sarima = []order_arima = []order_sarima = []seasonality = 12for pdq in pdq_combo: for PDQ in pdq_combo: try: SEASONAL_ORDER = list(PDQ) SEASONAL_ORDER.append(seasonality) model = sm.tsa.SARIMAX(train_df,order=(pdq),seasonal_order=tuple(SEASONAL_ORDER)) result = model.fit(disp=0) pred = result.predict(start=len(train_df),end=len(df)-1) eror = mape(test_df,pred) aic_sarima.append(result.aic) order_arima.append(pdq) order_sarima.append(tuple(SEASONAL_ORDER)) error.append(eror) except: continue Creating a dataframe of seasonality orders and errors df_error = pd.DataFrame({"arima_order":order_arima,"sarima_order": order_sarima,"error":error,"aic":aic_sarima})df_error = df_error.sort_values(by="error",ascending = True)df_error.reset_index(inplace=True,drop=True) best parameter selectionp_d_q = df_error.iloc[0,0] choosing best parameter for arima orderP_D_Q = df_error.iloc[0,1] choosing best parameter for seasonal order best parameter selectionprint("Best p_d_q parameter : ", p_d_q)print("Best P_D_Q parameter : ", P_D_Q)
###### Model with best parameter
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
sarima_model = sm.tsa.SARIMAX(train_df, order=(p_d_q), seasonal_order=(P_D_Q))sarima_results = sarima_model.fit(disp = 0)sarima_pred = sarima_results.predict(start=test_df.index[0],end=test_df.index[-1])sarima_pred_large = sarima_results.predict(start=75,end=86,dynamic=True) print(sarima_results.summary())sarima_diagnostics = sarima_results.plot_diagnostics(figsize=(16,8))
# Insights from these diagnostic plot : # 1.The top left plot shows the residuals over time. # The plot shows our residuals are fluctuating around mean 0 there is uniform deviation over time # except some noise in second quarter of 2021 due to lockdown imposed by government with effect of COVID-19 pandemic. # 2.In the top-right plot, # We see that the KDE follows closely with the N(0,1) line to indicate that the residuals are normally distributed. # This line is the standard notation for a normal distribution with a mean of 0 and a standard deviation of 1. # In our plot residuals are normally distributed. # 3.In the bottom left qq-plot, # We see the ordered distribution of residuals(blue dots) following the linear trend(red line) # of the samples taken from a standard normal distribution with N(0, 1). # 4.The autocorrelation visual (called a “correlogram”) on the bottom right shows that- # The time series residuals have a low correlation with the lagged versions of itself # (that is, the majority of dots fall into the blue shaded area).
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Predicted values Point estimationsarima_prediction = sarima_results.get_prediction(start = test_df.index[0], end = test_df.index[-1], dynamic = True, full_results = True)sarima_point_estimation = sarima_prediction.predicted_meansarima_point_estimation Checking MAPEmape(test_df, sarima_point_estimation) At 95% confidence intervalsarima_pred_range = sarima_prediction.conf_int(alpha = 0.05)sarima_pred_range Ploting Sarima Predictionplt.plot(train_df,color="g",label="Train Data", marker='.')plt.plot(test_df,color="b",label="Test Data", marker='.')plt.plot(sarima_point_estimation,color="r",label="Forecast (Test Data)", marker='.')plt.figtext(0.13, 0.15, '\nMAPE : {} \nSARIMA : {},{} \nAIC : {}'.format(mape(test_df, sarima_point_estimation), p_d_q, P_D_Q, sarima_results.aic, fontsize = 11))plt.fill_between(sarima_pred_range.index,sarima_pred_range.iloc[:,0],sarima_pred_range.iloc[:,1],color='b',alpha=.2)plt.legend(loc="upper right")
############################################################################################################################
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Holt Winters Exponential Smoothing with Additive Seasonality and Additive Trend
from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.holtwinters import ExponentialSmoothing # hwe_model_add_add = ExponentialSmoothing(train_df, seasonal ="add", trend = "add", seasonal_periods = 12).fit() pred_hwe_add_add = hwe_model_add_add.predict(start = test_df.index[0], end = test_df.index[-1]) pred_hwe_add_add
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Plotting Holt Winters Model
plt.plot(train_df,color="g",label="Train Data") plt.plot(test_df,color="b",label="Test Data") plt.plot(pred_hwe_add_add,color="r",label="Forecast (Test Data)") plt.suptitle('Model : Holt Winters', fontsize = 12, fontweight = 'bold') plt.title('Car Decors - ANDROID HEAD UNITS', fontsize = 18, fontweight = 'bold') plt.figtext(0.13, 0.14, '\nMAPE : {} \nAIC : {}'.format(mape(test_df, pred_hwe_add_add), hwe_model_add_add.aic)) plt.xlabel('Year', fontsize = 14) plt.ylabel('Sales (Number of Units)', fontsize = 14) plt.legend(loc="best") mape(test_df, pred_hwe_add_add)
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
### FB Prophet Model
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Loading Librariesfrom fbprophet import Prophetfrom fbprophet.plot import plot_plotlydf1 = decor_salesdf1 = df1.set_index('Date')df1 = df1.resample("MS").sum()df1.reset_index(inplace=True) train_df1 = df1[["Date","Perfumes"]].iloc[0:int(len(df1)*.95)] train model with approx 95% datatest_df1 = df1[["Date","Perfumes"]].iloc[int(len(train_df1)):] test model with 5% dataprint("Train : ",len(train_df1))print("Test : ",len(test_df1)) train_df1.columns = ["ds","y"]test_df1.columns = ["ds","y"] Fitting the Modelprophet_model = Prophet().fit(train_df1) Define the period for which we want a predictionfuture = list()for i in range(1, 5): date = '2021-%02d' % i future.append([date])future = pd.DataFrame(future)future.columns = ['ds']future['ds']= pd.to_datetime(future['ds'])future forecast = prophet_model.predict(future)print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]) test_df1=test_df1.set_index("ds")train_df1 = train_df1.set_index("ds")forecast=forecast.set_index("ds") plt.style.use("ggplot")plt.plot(train_df1['y'],color="r",label="Train Data")plt.plot(test_df1['y'],color="b",label="Test Data")plt.plot(forecast["yhat"],color="g",label="Forecast (Test Data)")plt.grid( linestyle='-', linewidth=2)plt.legend(loc="best") MAPEmape(test_df1['y'], forecast['yhat']) RMSEsqrt(mean_squared_error(test_df1['y'], forecast['yhat'].tail(4)))
############################################################################################################################
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Auto Time Series Model
from auto_ts import auto_timeseries train_df2 = train_df1 test_df2 = test_df1 ts_model = auto_timeseries( score_type='rmse', time_interval='MS', non_seasonal_pdq=(12,12,12), seasonality=True, seasonal_period=12, model_type="best", verbose=2) ts_model.fit(traindata= train_df2, ts_column="ds", target="y") ts_model.get_leaderboard() ts_model.plot_cv_scores() future_predictions = ts_model.predict(test_df2, model='best') future_predictions # define the period for which we want a prediction ts_future = list() for i in range(1, 5): date = '2021-%02d' % i ts_future.append([date]) ts_future = pd.DataFrame(ts_future) ts_future.columns = ['ds'] ts_future['ds']= pd.to_datetime(ts_future['ds']) ts_model.predict(ts_future) mape(test_df2["y"],future_predictions["yhat"])
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
### Models Evaluation
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
from sklearn.metrics import mean_squared_error as mseprint("\nSARIMA Trend : ", p_d_q)print("SARIMA Seasonal Order : ", P_D_Q)print("SARIMA AIC : ", sarima_results.aic)print("SARIMA RMSE : ", np.sqrt(mse(test_df,sarima_point_estimation)))print("SARIMA MAPE : ", mape(test_df, sarima_point_estimation))print("\nHolt Winters AIC : ", hwe_model_add_add.aic)print("Holt Winters RMSE : ", np.sqrt(mse(test_df,pred_hwe_add_add)))print("Holt Winters MAPE : ", mape(test_df, pred_hwe_add_add))print("\nFB Prophet RMSE : ", sqrt(mean_squared_error(test_df1['y'], forecast['yhat'])))print("FB Prophet MAPE : ", mape(test_df1['y'], forecast['yhat']))print("\nAuto Time Series: \n ", ts_model.get_leaderboard())print("Auto Time Series MAPE : ", mape(test_df2["y"],future_predictions["yhat"])) sarima = mape(test_df, sarima_point_estimation)hwinters = mape(test_df, pred_hwe_add_add)fbprophet = mape(test_df1['y'], forecast['yhat'])autots = mape(test_df2["y"],future_predictions["yhat"])mape_data = {'models':['SARIMA','HOLTWINTERS','FB_PROPHET','AUTO_TS'], 'name':['sarima_model', 'hwe_model_add_add','prophet_model','ts_model'],'mape':[sarima, hwinters, fbprophet, autots]}mape_error = pd.DataFrame(mape_data)mape_error = mape_error.sort_values(by="mape",ascending = True)mape_error.reset_index(inplace=True,drop=True)best_model = mape_error.iloc[0,0]print('\033[1m'+"Best Model with lowest MAPE : ", mape_error.iloc[0,0] + " ( " + mape_error.iloc[0,1] + " ) " + '\033[0m')print("\nMAPE ERRORS :\n\n", mape_error)
############################################################################################################################
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Saving Model
import pickle filename = 'sarima_model_perfumes.pkl' pickle.dump(sarima_model, open(filename, 'wb'))
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Testing saved Model for prediction
####### Model summary and diagnstics plot ####### with open(filename, "rb") as file: load_model = pickle.load(file) result = load_model.fit() #print(result.summary()) #diagnostics = result.plot_diagnostics(figsize=(16,8)) pred = result.get_prediction(start = 76, end = 87, dynamic = False) # Point estimation prediction = pred.predicted_mean prediction = round(prediction) prediction # Ploting final Sarima Prediction plt.plot(df['Perfumes'],color="g",label="Actual", marker='.') plt.plot(prediction,color="r",label="Forecast", marker='.') plt.suptitle('Model : SARIMA', fontsize = 12, fontweight = 'bold') plt.title('Car Decors - Perfumes', fontsize = 18, fontweight = 'bold') plt.figtext(0.13, 0.14, '\nMAPE : {} \nAIC : {}'.format(mape(test_df, sarima_point_estimation), sarima_results.aic)) plt.xlabel('Year', fontsize = 14) plt.ylabel('Sales (Number of Units)', fontsize = 14) plt.legend(loc="best")
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Closing connection to MySQL and clearing variables from memory.
#if connection.is_connected(): # connection.close() # cursor.close() # print("MySQL connection is closed") # Clear all variables from memory #globals().clear() #####################################################################
_____no_output_____
MIT
Model Buidling/Perfumes.ipynb
sangram6n/inventory_management_car-decors_sales_forecasting
Battle of the Neighbourhoods - Toronto Author: Ganesh ChunneThis notebook contains Questions 1, 2 & 3 of the Assignment. They have been segregated by Section headers
import pandas as pd
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Question 1 Importing Data
import requests url = "https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M" wiki_url = requests.get(url) wiki_url
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Response 200 means that we are able to make the connection to the page
wiki_data = pd.read_html(wiki_url.text) wiki_data len(wiki_data), type(wiki_data)
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
We need the first table alone, so dropping the other tables
wiki_data = wiki_data[0] wiki_data
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Dropping Borough which are not assigned
df = wiki_data[wiki_data["Borough"] != "Not assigned"] df
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Grouping the records based on Postal Code
df = df.groupby(['Postal Code']).head() df
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Checking for number of records where Neighbourhood is "Not assigned"
df.Neighbourhood.str.count("Not assigned").sum() df = df.reset_index() df df.drop(['index'], axis = 'columns', inplace = True) df df.shape
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Answer to Question 1: We have 103 rows and 3 columns Question 2 Installing geocoder
pip install geocoder import geocoder # import geocoder
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Tried the below approach, ran for 20 mins, then killed it. Changing the code cell to Text for now so that the run all execution doesn't stop. ```python initialize your variable to Nonelat_lng_coords = Nonepostal_code = 'M3A' loop until you get the coordinateswhile(lat_lng_coords is None): g = geocoder.google('{}, Toronto, Ontario'.format(postal_code)) lat_lng_coords = g.latlnglatitude = lat_lng_coords[0]longitude = lat_lng_coords[1]``` Alternatively, as suggested in the assignment, Importing the CSV file from the URL
data = pd.read_csv("https://cocl.us/Geospatial_data") data print("The shape of our wiki data is: ", df.shape) print("the shape of our csv data is: ", data.shape)
The shape of our wiki data is: (103, 3) the shape of our csv data is: (103, 3)
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Since the dimensions are the same, we can try to join on the postal codes to get the required data.Checking the column types of both the dataframes, especially Postal Code column since we are trying to join on it
df.dtypes data.dtypes combined_data = df.join(data.set_index('Postal Code'), on='Postal Code', how='inner') combined_data combined_data.shape
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
**Solution:** We get 103 rows as expected when we do a inner join, so we have good data. Question 3 Drawing inspiration from the previous lab where we cluster the neighbourhood of NYC, We cluster Toronto based on the similarities of the venues categories using Kmeans clustering and Foursquare API.
from geopy.geocoders import Nominatim address = 'Toronto, Ontario' geolocator = Nominatim(user_agent="toronto_explorer") location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude print('The coordinates of Toronto are {}, {}.'.format(latitude, longitude))
The coordinates of Toronto are 43.6534817, -79.3839347.
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Let's visualize the map of Toronto
import folium # Creating the map of Toronto map_Toronto = folium.Map(location=[latitude, longitude], zoom_start=11) # adding markers to map for latitude, longitude, borough, neighbourhood in zip(combined_data['Latitude'], combined_data['Longitude'], combined_data['Borough'], combined_data['Neighbourhood']): label = '{}, {}'.format(neighbourhood, borough) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [latitude, longitude], radius=5, popup=label, color='red', fill=True ).add_to(map_Toronto) map_Toronto
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Initializing Foursquare API credentials
CLIENT_ID = '2GQBW5PR0QFXTOGCHKTRFWJBTGOFOHXW1TRTNRAFURQ5FE1X' CLIENT_SECRET = '3QH40WMZIIDSQN1RFAVAEQHUIMOQUJPKYPABQVNTSDQJN2YD' VERSION = 20202808 radius = 500 LIMIT = 100 print('Your credentails:') print('CLIENT_ID: ' + CLIENT_ID) print('CLIENT_SECRET:' + CLIENT_SECRET)
Your credentails: CLIENT_ID: 2GQBW5PR0QFXTOGCHKTRFWJBTGOFOHXW1TRTNRAFURQ5FE1X CLIENT_SECRET:3QH40WMZIIDSQN1RFAVAEQHUIMOQUJPKYPABQVNTSDQJN2YD
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Next, we create a function to get all the venue categories in Toronto
def getNearbyVenues(names, latitudes, longitudes): venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print(name) # create the API request URL url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius ) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name, lat, lng, v['venue']['name'], v['venue']['categories'][0]['name']) for v in results]) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Neighbourhood', 'Neighbourhood Latitude', 'Neighbourhood Longitude', 'Venue', 'Venue Category'] return(nearby_venues)
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Collecting the venues in Toronto for each Neighbourhood
venues_in_toronto = getNearbyVenues(combined_data['Neighbourhood'], combined_data['Latitude'], combined_data['Longitude']) venues_in_toronto.shape
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
So we have 1317 records and 5 columns. Checking sample data
venues_in_toronto.head()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Checking the Venues based on Neighbourhood
venues_in_toronto.groupby('Neighbourhood').head()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
So there are 405 records for each neighbourhood.Checking for the maximum venue categories
venues_in_toronto.groupby('Venue Category').max()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
There are around 232 different types of Venue Categories. Interesting! One Hot encoding the venue Categories
toronto_venue_cat = pd.get_dummies(venues_in_toronto[['Venue Category']], prefix="", prefix_sep="") toronto_venue_cat
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Adding the neighbourhood to the encoded dataframe
toronto_venue_cat['Neighbourhood'] = venues_in_toronto['Neighbourhood'] # moving neighborhood column to the first column fixed_columns = [toronto_venue_cat.columns[-1]] + list(toronto_venue_cat.columns[:-1]) toronto_venue_cat = toronto_venue_cat[fixed_columns] toronto_venue_cat.head()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
We will group the Neighbourhoods, calculate the mean venue categories in each Neighbourhood
toronto_grouped = toronto_venue_cat.groupby('Neighbourhood').mean().reset_index() toronto_grouped.head()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Let's make a function to get the top most common venue categories
def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] import numpy as np
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
There are way too many venue categories, we can take the top 10 to cluster the neighbourhoods
num_top_venues = 10 indicators = ['st', 'nd', 'rd'] # create columns according to number of top venues columns = ['Neighbourhood'] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) # create a new dataframe neighborhoods_venues_sorted = pd.DataFrame(columns=columns) neighborhoods_venues_sorted['Neighbourhood'] = toronto_grouped['Neighbourhood'] for ind in np.arange(toronto_grouped.shape[0]): neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues) neighborhoods_venues_sorted.head()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Let's make the model to cluster our Neighbourhoods
# import k-means from clustering stage from sklearn.cluster import KMeans # set number of clusters k_num_clusters = 5 toronto_grouped_clustering = toronto_grouped.drop('Neighbourhood', 1) # run k-means clustering kmeans = KMeans(n_clusters=k_num_clusters, random_state=0).fit(toronto_grouped_clustering) kmeans
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Checking the labelling of our model
kmeans.labels_[0:100]
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Let's add the clustering Label column to the top 10 common venue categories
neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Join toronto_grouped with combined_data on neighbourhood to add latitude & longitude for each neighborhood to prepare it for plotting
toronto_merged = combined_data toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighbourhood'), on='Neighbourhood') toronto_merged.head()
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Drop all the NaN values to prevent data skew
toronto_merged_nonan = toronto_merged.dropna(subset=['Cluster Labels'])
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider
Plotting the clusters on the map
import matplotlib.cm as cm import matplotlib.colors as colors map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11) # set color scheme for the clusters x = np.arange(k_num_clusters) ys = [i + x + (i*x)**2 for i in range(k_num_clusters)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] # add markers to the map markers_colors = [] for lat, lon, poi, cluster in zip(toronto_merged_nonan['Latitude'], toronto_merged_nonan['Longitude'], toronto_merged_nonan['Neighbourhood'], toronto_merged_nonan['Cluster Labels']): label = folium.Popup('Cluster ' + str(int(cluster) +1) + '\n' + str(poi) , parse_html=True) folium.CircleMarker( [lat, lon], radius=5, popup=label, color=rainbow[int(cluster-1)], fill=True, fill_color=rainbow[int(cluster-1)] ).add_to(map_clusters) map_clusters
_____no_output_____
Apache-2.0
appliedDataScienceCapstoneWeek3.ipynb
Margchunne28/Location-data-provider