markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Bootstrap Estimate the standard error of $\hat{p}$ using bootstrap.
def bootstrap_se_est(y, stat_function, B=1000): # 1. Generate bootstrap samples from the observed/simulated data (i.e. y) # 2. Compute the statistic (using stat_function passed) on the bootstrap # samples # 3. Compute the standard error -> std dev t_boot_list = [stat_function(np.random.choice(y, len(y), replace=True)) for _ in range(B)] return np.std(t_boot_list) print("Standard error of p_hat computed by bootstrap:") print(bootstrap_se_est(y, estimator))
Standard error of p_hat computed by bootstrap: 0.04889048066853097
MIT
colab/lab_1_statistical_inference_cmunoz.ipynb
cmunozcortes/ds-fundamentals
Validate the estimated standard error by computing it analytically.
def estimator_analytical_se(p, n): return np.sqrt(p * (1-p) / n) print("Analytical standard error for the estimator: ", estimator_analytical_se(p, n))
Analytical standard error for the estimator: 0.04898979485566356
MIT
colab/lab_1_statistical_inference_cmunoz.ipynb
cmunozcortes/ds-fundamentals
Estimate the 95% confidence interval for $p$.
def confidence_interval_95_for_p(y): ci_lower = estimator(y) - 1.96*bootstrap_se_est(y, estimator) ci_higher = estimator(y) + 1.96*bootstrap_se_est(y, estimator) return (ci_lower, ci_higher) lower, higher = confidence_interval_95_for_p(y) print("95% confidence interval for p: ({},{})".format(lower, higher))
95% confidence interval for p: (0.5254445619916019,0.717033857596202)
MIT
colab/lab_1_statistical_inference_cmunoz.ipynb
cmunozcortes/ds-fundamentals
Validate the 95% confidence interval for $p$.
ci_contains_p_flags = [] for sim in range(1000): y = np.random.binomial(n=1, p=p, size=n) ci_lower, ci_higher = confidence_interval_95_for_p(y) if ci_lower < p and p < ci_higher: ci_contains_p_flags.append(1) else: ci_contains_p_flags.append(0) coverage = np.mean(ci_contains_p_flags) print("Coverage of 95% confidence interval for p: ", coverage)
Coverage of 95% confidence interval for p: 0.93
MIT
colab/lab_1_statistical_inference_cmunoz.ipynb
cmunozcortes/ds-fundamentals
Bayesian Inference **[Optional]**Estimate $p$ using Bayesian inference. As the prior for $p$ use Normal(0.5, 0.1).
!pip install pystan import pystan model_code = """ data { int<lower=0> n; int<lower=0,upper=1> y[n]; } parameters { real<lower=0,upper=1> p; } model { p ~ normal(0.5, 0.1); for (i in 1:n) y[i] ~ bernoulli(p); } """ model = pystan.StanModel(model_code=model_code) fit = model.sampling(data={"n": n, "y": y}, iter=2000, chains=4, n_jobs=4) print(fit.stansummary())
_____no_output_____
MIT
colab/lab_1_statistical_inference_cmunoz.ipynb
cmunozcortes/ds-fundamentals
Compute the Bayesian inference results if our data contains 20 coin tosses instead.
n = 20 p = 0.6 y = np.random.binomial(1, p, n) model = pystan.StanModel(model_code=model_code) fit = model.sampling(data={"n": n, "y": y}, iter=2000, chains=4, n_jobs=4) print(fit.stansummary())
_____no_output_____
MIT
colab/lab_1_statistical_inference_cmunoz.ipynb
cmunozcortes/ds-fundamentals
XResNet baseline
#https://github.com/fastai/fastai_docs/blob/master/dev_course/dl2/11_train_imagenette.ipynb def noop(x): return x class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) def conv(ni, nf, ks=3, stride=1, bias=False): return nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=ks//2, bias=bias) act_fn = nn.ReLU(inplace=True) def init_cnn(m): if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0) if isinstance(m, (nn.Conv2d,nn.Linear)): nn.init.kaiming_normal_(m.weight) for l in m.children(): init_cnn(l) def conv_layer(ni, nf, ks=3, stride=1, zero_bn=False, act=True): bn = nn.BatchNorm2d(nf) nn.init.constant_(bn.weight, 0. if zero_bn else 1.) layers = [conv(ni, nf, ks, stride=stride), bn] if act: layers.append(act_fn) return nn.Sequential(*layers) class ResBlock(nn.Module): def __init__(self, expansion, ni, nh, stride=1): super().__init__() nf,ni = nh*expansion,ni*expansion layers = [conv_layer(ni, nh, 3, stride=stride), conv_layer(nh, nf, 3, zero_bn=True, act=False) ] if expansion == 1 else [ conv_layer(ni, nh, 1), conv_layer(nh, nh, 3, stride=stride), conv_layer(nh, nf, 1, zero_bn=True, act=False) ] self.convs = nn.Sequential(*layers) self.idconv = noop if ni==nf else conv_layer(ni, nf, 1, act=False) self.pool = noop if stride==1 else nn.AvgPool2d(2, ceil_mode=True) def forward(self, x): return act_fn(self.convs(x) + self.idconv(self.pool(x))) class XResNet(nn.Sequential): @classmethod def create(cls, expansion, layers, c_in=3, c_out=1000): nfs = [c_in, (c_in+1)*8, 64, 64] stem = [conv_layer(nfs[i], nfs[i+1], stride=2 if i==0 else 1) for i in range(3)] nfs = [64//expansion,64,128,256,512] res_layers = [cls._make_layer(expansion, nfs[i], nfs[i+1], n_blocks=l, stride=1 if i==0 else 2) for i,l in enumerate(layers)] res = cls( *stem, nn.MaxPool2d(kernel_size=3, stride=2, padding=1), *res_layers, nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(nfs[-1]*expansion, c_out), ) init_cnn(res) return res @staticmethod def _make_layer(expansion, ni, nf, n_blocks, stride): return nn.Sequential( *[ResBlock(expansion, ni if i==0 else nf, nf, stride if i==0 else 1) for i in range(n_blocks)]) def xresnet18 (**kwargs): return XResNet.create(1, [2, 2, 2, 2], **kwargs) def xresnet34 (**kwargs): return XResNet.create(1, [3, 4, 6, 3], **kwargs) def xresnet50 (**kwargs): return XResNet.create(4, [3, 4, 6, 3], **kwargs) def xresnet101(**kwargs): return XResNet.create(4, [3, 4, 23, 3], **kwargs) def xresnet152(**kwargs): return XResNet.create(4, [3, 8, 36, 3], **kwargs)
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
XResNet with Self Attention
#Unmodified from https://github.com/fastai/fastai/blob/5c51f9eabf76853a89a9bc5741804d2ed4407e49/fastai/layers.py def conv1d(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False): "Create and initialize a `nn.Conv1d` layer with spectral normalization." conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) nn.init.kaiming_normal_(conv.weight) if bias: conv.bias.data.zero_() return spectral_norm(conv) # Adapted from SelfAttention layer at https://github.com/fastai/fastai/blob/5c51f9eabf76853a89a9bc5741804d2ed4407e49/fastai/layers.py # Inspired by https://arxiv.org/pdf/1805.08318.pdf class SimpleSelfAttention(nn.Module): def __init__(self, n_in:int, ks=1):#, n_out:int): super().__init__() self.conv = conv1d(n_in, n_in, ks, padding=ks//2, bias=False) self.gamma = nn.Parameter(tensor([0.])) def forward(self,x): size = x.size() x = x.view(*size[:2],-1) o = torch.bmm(x.permute(0,2,1).contiguous(),self.conv(x)) o = self.gamma * torch.bmm(x,o) + x return o.view(*size).contiguous() #unmodified from https://github.com/fastai/fastai/blob/9b9014b8967186dc70c65ca7dcddca1a1232d99d/fastai/vision/models/xresnet.py def conv(ni, nf, ks=3, stride=1, bias=False): return nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=ks//2, bias=bias) def noop(x): return x def conv_layer(ni, nf, ks=3, stride=1, zero_bn=False, act=True): bn = nn.BatchNorm2d(nf) nn.init.constant_(bn.weight, 0. if zero_bn else 1.) layers = [conv(ni, nf, ks, stride=stride), bn] if act: layers.append(act_fn) return nn.Sequential(*layers) # Modified from https://github.com/fastai/fastai/blob/9b9014b8967186dc70c65ca7dcddca1a1232d99d/fastai/vision/models/xresnet.py # Added self attention class ResBlock(nn.Module): def __init__(self, expansion, ni, nh, stride=1,sa=False): super().__init__() nf,ni = nh*expansion,ni*expansion layers = [conv_layer(ni, nh, 3, stride=stride), conv_layer(nh, nf, 3, zero_bn=True, act=False) ] if expansion == 1 else [ conv_layer(ni, nh, 1), conv_layer(nh, nh, 3, stride=stride), conv_layer(nh, nf, 1, zero_bn=True, act=False) ] self.sa = SimpleSelfAttention(nf,ks=1) if sa else noop self.convs = nn.Sequential(*layers) self.idconv = noop if ni==nf else conv_layer(ni, nf, 1, act=False) self.pool = noop if stride==1 else nn.AvgPool2d(2, ceil_mode=True) def forward(self, x): return act_fn(self.sa(self.convs(x)) + self.idconv(self.pool(x))) # Modified from https://github.com/fastai/fastai/blob/9b9014b8967186dc70c65ca7dcddca1a1232d99d/fastai/vision/models/xresnet.py # Added self attention class XResNet_sa(nn.Sequential): @classmethod def create(cls, expansion, layers, c_in=3, c_out=1000): nfs = [c_in, (c_in+1)*8, 64, 64] stem = [conv_layer(nfs[i], nfs[i+1], stride=2 if i==0 else 1) for i in range(3)] nfs = [64//expansion,64,128,256,512] res_layers = [cls._make_layer(expansion, nfs[i], nfs[i+1], n_blocks=l, stride=1 if i==0 else 2, sa = True if i in[len(layers)-4] else False) for i,l in enumerate(layers)] res = cls( *stem, nn.MaxPool2d(kernel_size=3, stride=2, padding=1), *res_layers, nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(nfs[-1]*expansion, c_out), ) init_cnn(res) return res @staticmethod def _make_layer(expansion, ni, nf, n_blocks, stride, sa = False): return nn.Sequential( *[ResBlock(expansion, ni if i==0 else nf, nf, stride if i==0 else 1, sa if i in [n_blocks -1] else False) for i in range(n_blocks)]) def xresnet50_sa (**kwargs): return XResNet_sa.create(4, [3, 4, 6, 3], **kwargs)
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
Data loading
#https://github.com/fastai/fastai/blob/master/examples/train_imagenette.py def get_data(size, woof, bs, workers=None): if size<=128: path = URLs.IMAGEWOOF_160 if woof else URLs.IMAGENETTE_160 elif size<=224: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320 else : path = URLs.IMAGEWOOF if woof else URLs.IMAGENETTE path = untar_data(path) n_gpus = num_distrib() or 1 if workers is None: workers = min(8, num_cpus()//n_gpus) return (ImageList.from_folder(path).split_by_folder(valid='val') .label_from_folder().transform(([flip_lr(p=0.5)], []), size=size) .databunch(bs=bs, num_workers=workers) .presize(size, scale=(0.35,1)) .normalize(imagenet_stats))
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
Train
opt_func = partial(optim.Adam, betas=(0.9,0.99), eps=1e-6)
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
Imagewoof Image size = 256
image_size = 256 data = get_data(image_size,woof =True,bs=64)
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
Epochs = 5
# we use the same parameters for baseline and new model epochs = 5 lr = 3e-3 bs = 64 mixup = 0
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
Baseline
m = xresnet50(c_out=10) learn = (Learner(data, m, wd=1e-2, opt_func=opt_func, metrics=[accuracy,top_k_accuracy], bn_wd=False, true_wd=True, loss_func = LabelSmoothingCrossEntropy()) ) if mixup: learn = learn.mixup(alpha=mixup) learn = learn.to_fp16(dynamic=True) learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3) results = [61.8,64.8,57.4,62.4,63,61.8, 57.6,63,62.6, 64.8] #included some from previous notebook iteration np.mean(results), np.std(results), np.min(results), np.max(results)
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
New model
m = xresnet50_sa(c_out=10) learn = None gc.collect() learn = (Learner(data, m, wd=1e-2, opt_func=opt_func, metrics=[accuracy,top_k_accuracy], bn_wd=False, true_wd=True, loss_func = LabelSmoothingCrossEntropy()) ) if mixup: learn = learn.mixup(alpha=mixup) learn = learn.to_fp16(dynamic=True) learn.fit_one_cycle(5, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(5, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(5, lr, div_factor=10, pct_start=0.3) learn.fit_one_cycle(5, lr, div_factor=10, pct_start=0.3) results = [67.4,65.8,70.6,65.8,67.8,69,65.6,66.4, 67.8,70.2] np.mean(results), np.std(results), np.min(results), np.max(results)
_____no_output_____
Apache-2.0
Imagenette Simple Self Attention.ipynb
RubensZimbres/SimpleSelfAttention
Software License Agreement (MIT License) Copyright (c) 2020, Amirhossein Pakdaman. Simple DFS, BFS **Problem**: Implement a search tree with the following characteristics:1. The initial state contains value 10.2. At each step two successors are created, the value one of them is one unit smaller than its parent and the other is one unit larger.3. Search tree continues up to 3 levels of depth.
import IPython IPython.core.display.Image("tree.png", embed=True)
_____no_output_____
MIT
BFS_DFS_simple_example/BFS_DFS_simple_example.ipynb
amirhpd/Python_Basics
BFS
import queue class Node: def __init__(self,value,parent,depth): self.value = value self.parent = parent self.depth = depth parent = Node(10,None,0) frontier = queue.Queue() frontier.put(parent) while frontier: current_node = frontier.get() if current_node.depth > 3: break frontier.put(Node(current_node.value-1, current_node, current_node.depth+1)) frontier.put(Node(current_node.value+1, current_node, current_node.depth+1)) print(current_node.value)
10 9 11 8 10 10 12 7 9 9 11 9 11 11 13
MIT
BFS_DFS_simple_example/BFS_DFS_simple_example.ipynb
amirhpd/Python_Basics
DFS
class Node: def __init__(self,value,parent,depth): self.value = value self.parent = parent self.depth = depth parent = Node(10,None,0) frontier = [] frontier.append(parent) while frontier: current_node = frontier.pop() if current_node.depth > 3: current_node = frontier.pop() if len(frontier) == 0: break current_node = frontier.pop() frontier.append(Node(current_node.value+1, current_node, current_node.depth+1)) frontier.append(Node(current_node.value-1, current_node, current_node.depth+1)) print(current_node.value)
10 9 8 7 9 10 9 11 11 10 9 11 12 11 13
MIT
BFS_DFS_simple_example/BFS_DFS_simple_example.ipynb
amirhpd/Python_Basics
FunctionsLet's say that we have some code that does some task, but the code is 25 lines long, we need to run it over 1000 items and it doesn't work in a loop. How in the world will we handle this situation? That is where functions come in really handy. Functions are a generalized block of code that allow you to run code over and over while changing its parameters if you so choose. Functions may take **(arguments)** that you are allowed to change when you call the function. It may also **return** a value.A function must be defined before you can call it. To define a function, we use the following syntax: def (arg0, arg1, arg3,...): code here must be indented. you can use arg0,...,argn within the function you can also return things return 1 This code returns 1 no matter what you tell the function Functions can take as many arguments as you wish, but they may only return 1 thing. A simple example of a familiar function is any mathematical function. Take sin(x), it is a function that takes one argument x and returns one value based on the input. Let's get familiar with functions.
def add1(x): return x+1 print(add1(1)) def xsq(x): return x**2 print(xsq(5)) for i in range(0,10): print(xsq(i))
2 25 0 1 4 9 16 25 36 49 64 81
MIT
Python Workshop/Functions.ipynb
CalPolyPat/Python-Workshop
The true power of functions is being able to call it as many times as we would like. In the previous example, we called the square function, xsq in a loop 10 times. Let's check out some more complicated examples.
def removefs(data): newdata='' for d in data: if(d=="f" or d=="F"): pass else: newdata+=(d) return newdata print(removefs('ffffffFFFFFg')) intro='''##Functions Let's say that we have some code that does some task, but the code is 25 lines long, we need to run it over 1000 items and it doesn't work in a loop. How in the world will we handle this situation? That is where functions come in really handy. Functions are a generalized block of code that allow you to run code over and over while changing its parameters if you so choose. Functions may take **(arguments)** that you are allowed to change when you call the function. It may also **return** a value. A function must be defined before you can call it. To define a function, we use the following syntax: def <function name>(arg0, arg1, arg3,...): #code here must be indented. #you can use arg0,...,argn within the function #you can also return things return 1 #This code returns 1 no matter what you tell the function Functions can take as many arguments as you wish, but they may only return 1 thing. A simple example of a familiar function is any mathematical function. Take sin(x), it is a function that takes one argument x and returns one value based on the input. Let's get familiar with functions."''' print(removefs(intro)) def removevowels(data): newdata = '' for d in data: if(d=='a' or d=='e' or d=='i' or d=='o' or d=='u' or d=='y'): pass else: newdata+=d return newdata print(removevowels(intro))
##Fnctns Lt's s tht w hv sm cd tht ds sm tsk, bt th cd s 25 lns lng, w nd t rn t vr 1000 tms nd t dsn't wrk n lp. Hw n th wrld wll w hndl ths sttn? Tht s whr fnctns cm n rll hnd. Fnctns r gnrlzd blck f cd tht llw t rn cd vr nd vr whl chngng ts prmtrs f s chs. Fnctns m tk **(rgmnts)** tht r llwd t chng whn cll th fnctn. It m ls **rtrn** vl. A fnctn mst b dfnd bfr cn cll t. T dfn fnctn, w s th fllwng sntx: df <fnctn nm>(rg0, rg1, rg3,...): #cd hr mst b ndntd. # cn s rg0,...,rgn wthn th fnctn # cn ls rtrn thngs rtrn 1 #Ths cd rtrns 1 n mttr wht tll th fnctn Fnctns cn tk s mn rgmnts s wsh, bt th m nl rtrn 1 thng. A smpl xmpl f fmlr fnctn s n mthmtcl fnctn. Tk sn(x), t s fnctn tht tks n rgmnt x nd rtrns n vl bsd n th npt. Lt's gt fmlr wth fnctns."
MIT
Python Workshop/Functions.ipynb
CalPolyPat/Python-Workshop
So clearly we can do some powerful things. Now let's see why these functions have significant power over loops.
def fib(n): a,b = 1,1 for i in range(n-1): a,b = b,a+b return a def printfib(n): for i in range(0,n): print(fib(i)) printfib(15)
1 1 1 2 3 5 8 13 21 34 55 89 144 233 377
MIT
Python Workshop/Functions.ipynb
CalPolyPat/Python-Workshop
Here, using loops within functions allows to generate the fibonacci sequence. We then write a function to print out the first n numbers. Exercises1. Write a function that takes two arguments and returns a value that uses the arguments.2. Write a power function. It should take two arguments and returns the first argument to the power of the second argument.3. is a semi-guided exercise. If you are stumped ask for help.3a. Write a function that takes the cost of a dinner as an argument and returns the cost after a .075% sales tax is added.3b. Write a function that takes the cost of a dinner and tax and adds a 20% tip to the total, then returns the total.3c. Write a function that takes a list of food names(choose them yourself) as an argument and returns the cost of purchasing all those items.3d. Write a function that takes a list of food names as an argument and returns the total cost of having a meal including tax and tip.4 . In the next cell is a 1000-digit number, write a function to solve Project Euler 8 https://projecteuler.net/problem=8
thoudigits = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
_____no_output_____
MIT
Python Workshop/Functions.ipynb
CalPolyPat/Python-Workshop
LambdaNext we will look at a special type of function called a lambda. A lambda is a single line, single expression function. It is perfect for evaluating mathematical expressions like x^2 and e^sin(x^cos(x)). To write a lambda function, we use the following syntax: func = lambda : for example: xsq = lambda x:x**2 xsq(4) returns 16Lambdas will return the result of the expression. Let's check it out.
import numpy as np import matplotlib.pyplot as plt %matplotlib inline #^^^Some junk we will learn later^^^ func = lambda x:np.exp(np.sin(x**np.cos(x))) #^^^The important part^^^ plt.plot(np.linspace(0,10,1000), func(np.linspace(0,10,1000))) #^^^We will learn this next^^^
_____no_output_____
MIT
Python Workshop/Functions.ipynb
CalPolyPat/Python-Workshop
Exploring colour channels In this session, we'll be looking at how to explore the different colour channels that compris an image.
# We need to include the home directory in our path, so we can read in our own module. import os # image processing tools import cv2 import numpy as np # utility functions for this course import sys sys.path.append(os.path.join("..", "..", "CDS-VIS")) from utils.imutils import jimshow from utils.imutils import jimshow_channel # plotting tool import matplotlib.pyplot as plt
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
Rotation
filename = os.path.join("..", "..", "CDS-VIS", "img", "terasse.jpeg") image = cv2.imread(filename) image.shape jimshow(image)
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
Splitting channels
(B, G, R) = cv2.split(image) jimshow_channel(R, "Red")
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
__Empty numpy array__
zeros = np.zeros(image.shape[:2], dtype = "uint8") jimshow(cv2.merge([zeros, zeros, R])) jimshow(cv2.merge([zeros, G, zeros])) jimshow(cv2.merge([B, zeros, zeros]))
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
Histograms
jimshow_channel(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), "Greyscale")
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
__A note on ```COLOR_BRG2GRAY```__
greyed_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
```greyed_image.flatten() != image.flatten()``` A quick greyscale histogram using matplotlib
# Create figure plt.figure() # Add histogram plt.hist(image.flatten(), 256, [0,256]) # Plot title plt.title("Greyscale histogram") plt.xlabel("Bins") plt.ylabel("# of Pixels") plt.show()
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
Plotting color histograms ```cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]])```- images : it is the source image of type uint8 or float32 represented as โ€œ[img]โ€.- channels : it is the index of channel for which we calculate histogram. - For grayscale image, its value is [0] and - color image, you can pass [0], [1] or [2] to calculate histogram of blue, green or red channel respectively.- mask : mask image. To find histogram of full image, it is given as โ€œNoneโ€.- histSize : this represents our BIN count. For full scale, we pass [256].- ranges : this is our RANGE. Normally, it is [0,256].
# split channels channels = cv2.split(image) # names of colours colors = ("b", "g", "r") # create plot plt.figure() # add title plt.title("Histogram") # Add xlabel plt.xlabel("Bins") # Add ylabel plt.ylabel("# of Pixels") # for every tuple of channel, colour for (channel, color) in zip(channels, colors): # Create a histogram hist = cv2.calcHist([channel], [0], None, [256], [0, 256]) # Plot histogram plt.plot(hist, color=color) # Set limits of x-axis plt.xlim([0, 256]) # Show plot plt.show()
_____no_output_____
MIT
notebooks/session2_inclass_rdkm.ipynb
Rysias/cds-visual
[๋ชจ๋“ˆ 2.1] SageMaker ํด๋Ÿฌ์Šคํ„ฐ์—์„œ ํ›ˆ๋ จ (No VPC์—์„œ ์‹คํ–‰)์ด ๋…ธํŠธ๋ถ์€ ์•„๋ž˜์˜ ์ž‘์—…์„ ์‹คํ–‰ ํ•ฉ๋‹ˆ๋‹ค.- SageMaker Hosting Cluster ์—์„œ ํ›ˆ๋ จ์„ ์‹คํ–‰- ํ›ˆ๋ จํ•œ Job ์ด๋ฆ„์„ ์ €์žฅ - ๋‹ค์Œ ๋…ธํŠธ๋ถ์—์„œ ๋ชจ๋ธ ๋ฐฐํฌ ๋ฐ ์ถ”๋ก ์‹œ์— ์‚ฌ์šฉ ํ•ฉ๋‹ˆ๋‹ค.--- SageMaker์˜ ์„ธ์…˜์„ ์–ป๊ณ , role ์ •๋ณด๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค.- ์œ„์˜ ๋‘ ์ •๋ณด๋ฅผ ํ†ตํ•ด์„œ SageMaker Hosting Cluster์— ์—ฐ๊ฒฐํ•ฉ๋‹ˆ๋‹ค.
import os import sagemaker from sagemaker import get_execution_role sagemaker_session = sagemaker.Session() role = get_execution_role()
_____no_output_____
MIT
scratch/working/2.2.NoVPC-EFS-Train-Model.ipynb
gonsoomoon-ml/SageMaker-With-Secure-VPC
๋กœ์ปฌ์˜ ๋ฐ์ดํ„ฐ S3 ์—…๋กœ๋”ฉ๋กœ์ปฌ์˜ ๋ฐ์ดํ„ฐ๋ฅผ S3์— ์—…๋กœ๋”ฉํ•˜์—ฌ ํ›ˆ๋ จ์‹œ์— Input์œผ๋กœ ์‚ฌ์šฉ ํ•ฉ๋‹ˆ๋‹ค.
# dataset_location = sagemaker_session.upload_data(path='data', key_prefix='data/DEMO-cifar10') # display(dataset_location) dataset_location = 's3://sagemaker-ap-northeast-2-057716757052/data/DEMO-cifar10' dataset_location # efs_dir = '/home/ec2-user/efs/data' # ! ls {efs_dir} -al # ! aws s3 cp {dataset_location} {efs_dir} --recursive from sagemaker.inputs import FileSystemInput # Specify EFS ile system id. file_system_id = 'fs-38dc1558' # 'fs-xxxxxxxx' print(f"EFS file-system-id: {file_system_id}") # Specify directory path for input data on the file system. # You need to provide normalized and absolute path below. train_file_system_directory_path = '/data/train' eval_file_system_directory_path = '/data/eval' validation_file_system_directory_path = '/data/validation' print(f'EFS file-system data input path: {train_file_system_directory_path}') print(f'EFS file-system data input path: {eval_file_system_directory_path}') print(f'EFS file-system data input path: {validation_file_system_directory_path}') # Specify the access mode of the mount of the directory associated with the file system. # Directory must be mounted 'ro'(read-only). file_system_access_mode = 'ro' # Specify your file system type file_system_type = 'EFS' train = FileSystemInput(file_system_id=file_system_id, file_system_type=file_system_type, directory_path=train_file_system_directory_path, file_system_access_mode=file_system_access_mode) eval = FileSystemInput(file_system_id=file_system_id, file_system_type=file_system_type, directory_path=eval_file_system_directory_path, file_system_access_mode=file_system_access_mode) validation = FileSystemInput(file_system_id=file_system_id, file_system_type=file_system_type, directory_path=validation_file_system_directory_path, file_system_access_mode=file_system_access_mode) aws_region = 'ap-northeast-2'# aws-region-code e.g. us-east-1 s3_bucket = 'sagemaker-ap-northeast-2-057716757052'# your-s3-bucket-name prefix = "cifar10/efs" #prefix in your bucket s3_output_location = f's3://{s3_bucket}/{prefix}/output' print(f'S3 model output location: {s3_output_location}') security_group_ids = ['sg-0192524ef63ec6138'] # ['sg-xxxxxxxx'] # subnets = ['subnet-0a84bcfa36d3981e6','subnet-0304abaaefc2b1c34','subnet-0a2204b79f378b178'] # [ 'subnet-xxxxxxx', 'subnet-xxxxxxx', 'subnet-xxxxxxx'] subnets = ['subnet-0a84bcfa36d3981e6'] # [ 'subnet-xxxxxxx', 'subnet-xxxxxxx', 'subnet-xxxxxxx'] from sagemaker.tensorflow import TensorFlow estimator = TensorFlow(base_job_name='cifar10', entry_point='cifar10_keras_sm_tf2.py', source_dir='training_script', role=role, framework_version='2.0.0', py_version='py3', script_mode=True, hyperparameters={'epochs' : 1}, train_instance_count=1, train_instance_type='ml.p3.2xlarge', output_path=s3_output_location, subnets=subnets, security_group_ids=security_group_ids, session = sagemaker.Session() ) estimator.fit({'train': train, 'validation': validation, 'eval': eval, }) # estimator.fit({'train': 'file://data/train', # 'validation': 'file://data/validation', # 'eval': 'file://data/eval'})
train_instance_type has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. train_instance_count has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. train_instance_type has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.
MIT
scratch/working/2.2.NoVPC-EFS-Train-Model.ipynb
gonsoomoon-ml/SageMaker-With-Secure-VPC
VPC_Mode๋ฅผ True, False ์„ ํƒ **[์ค‘์š”] VPC_Mode์—์„œ ์‹คํ–‰์‹œ์— True๋กœ ๋ณ€๊ฒฝํ•ด์ฃผ์„ธ์š”**
VPC_Mode = False from sagemaker.tensorflow import TensorFlow def retrieve_estimator(VPC_Mode): if VPC_Mode: # VPC ๋ชจ๋“œ ๊ฒฝ์šฐ์— subnets, security_group์„ ๊ธฐ์ˆ  ํ•ฉ๋‹ˆ๋‹ค. estimator = TensorFlow(base_job_name='cifar10', entry_point='cifar10_keras_sm_tf2.py', source_dir='training_script', role=role, framework_version='2.0.0', py_version='py3', script_mode=True, hyperparameters={'epochs': 2}, train_instance_count=1, train_instance_type='ml.p3.8xlarge', subnets = ['subnet-090c1fad32165b0fa','subnet-0bd7cff3909c55018'], security_group_ids = ['sg-0f45d634d80aef27e'] ) else: estimator = TensorFlow(base_job_name='cifar10', entry_point='cifar10_keras_sm_tf2.py', source_dir='training_script', role=role, framework_version='2.0.0', py_version='py3', script_mode=True, hyperparameters={'epochs': 2}, train_instance_count=1, train_instance_type='ml.p3.8xlarge') return estimator estimator = retrieve_estimator(VPC_Mode)
train_instance_type has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. train_instance_count has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details. train_instance_type has been renamed in sagemaker>=2. See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.
MIT
scratch/working/2.2.NoVPC-EFS-Train-Model.ipynb
gonsoomoon-ml/SageMaker-With-Secure-VPC
ํ•™์Šต์„ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฒˆ์—๋Š” ๊ฐ๊ฐ์˜ ์ฑ„๋„(`train, validation, eval`)์— S3์˜ ๋ฐ์ดํ„ฐ ์ €์žฅ ์œ„์น˜๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.ํ•™์Šต ์™„๋ฃŒ ํ›„ Billable seconds๋„ ํ™•์ธํ•ด ๋ณด์„ธ์š”. Billable seconds๋Š” ์‹ค์ œ๋กœ ํ•™์Šต ์ˆ˜ํ–‰ ์‹œ ๊ณผ๊ธˆ๋˜๋Š” ์‹œ๊ฐ„์ž…๋‹ˆ๋‹ค.```Billable seconds: ```์ฐธ๊ณ ๋กœ, `ml.p2.xlarge` ์ธ์Šคํ„ด์Šค๋กœ 5 epoch ํ•™์Šต ์‹œ ์ „์ฒด 6๋ถ„-7๋ถ„์ด ์†Œ์š”๋˜๊ณ , ์‹ค์ œ ํ•™์Šต์— ์†Œ์š”๋˜๋Š” ์‹œ๊ฐ„์€ 3๋ถ„-4๋ถ„์ด ์†Œ์š”๋ฉ๋‹ˆ๋‹ค.
%%time estimator.fit({'train':'{}/train'.format(dataset_location), 'validation':'{}/validation'.format(dataset_location), 'eval':'{}/eval'.format(dataset_location)})
2021-01-27 04:02:44 Starting - Starting the training job... 2021-01-27 04:03:08 Starting - Launching requested ML instancesProfilerReport-1611720164: InProgress ......... 2021-01-27 04:04:29 Starting - Preparing the instances for training...... 2021-01-27 04:05:44 Downloading - Downloading input data 2021-01-27 04:05:44 Training - Downloading the training image... 2021-01-27 04:06:11 Training - Training image download completed. Training in progress..2021-01-27 04:06:06,541 sagemaker-containers INFO Imported framework sagemaker_tensorflow_container.training 2021-01-27 04:06:07,035 sagemaker-containers INFO Invoking user script  Training Env:  { "additional_framework_parameters": {}, "channel_input_dirs": { "eval": "/opt/ml/input/data/eval", "validation": "/opt/ml/input/data/validation", "train": "/opt/ml/input/data/train" }, "current_host": "algo-1", "framework_module": "sagemaker_tensorflow_container.training:main", "hosts": [ "algo-1" ], "hyperparameters": { "model_dir": "s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/model", "epochs": 2 }, "input_config_dir": "/opt/ml/input/config", "input_data_config": { "eval": { "TrainingInputMode": "File", "S3DistributionType": "FullyReplicated", "RecordWrapperType": "None" }, "validation": { "TrainingInputMode": "File", "S3DistributionType": "FullyReplicated", "RecordWrapperType": "None" }, "train": { "TrainingInputMode": "File", "S3DistributionType": "FullyReplicated", "RecordWrapperType": "None" } }, "input_dir": "/opt/ml/input", "is_master": true, "job_name": "cifar10-2021-01-27-04-02-44-183", "log_level": 20, "master_hostname": "algo-1", "model_dir": "/opt/ml/model", "module_dir": "s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/source/sourcedir.tar.gz", "module_name": "cifar10_keras_sm_tf2", "network_interface_name": "eth0", "num_cpus": 32, "num_gpus": 4, "output_data_dir": "/opt/ml/output/data", "output_dir": "/opt/ml/output", "output_intermediate_dir": "/opt/ml/output/intermediate", "resource_config": { "current_host": "algo-1", "hosts": [ "algo-1" ], "network_interface_name": "eth0" }, "user_entry_point": "cifar10_keras_sm_tf2.py" }  Environment variables:  SM_HOSTS=["algo-1"] SM_NETWORK_INTERFACE_NAME=eth0 SM_HPS={"epochs":2,"model_dir":"s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/model"} SM_USER_ENTRY_POINT=cifar10_keras_sm_tf2.py SM_FRAMEWORK_PARAMS={} SM_RESOURCE_CONFIG={"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"} SM_INPUT_DATA_CONFIG={"eval":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"train":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"validation":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}} SM_OUTPUT_DATA_DIR=/opt/ml/output/data SM_CHANNELS=["eval","train","validation"] SM_CURRENT_HOST=algo-1 SM_MODULE_NAME=cifar10_keras_sm_tf2 SM_LOG_LEVEL=20 SM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main SM_INPUT_DIR=/opt/ml/input SM_INPUT_CONFIG_DIR=/opt/ml/input/config SM_OUTPUT_DIR=/opt/ml/output SM_NUM_CPUS=32 SM_NUM_GPUS=4 SM_MODEL_DIR=/opt/ml/model SM_MODULE_DIR=s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/source/sourcedir.tar.gz SM_TRAINING_ENV={"additional_framework_parameters":{},"channel_input_dirs":{"eval":"/opt/ml/input/data/eval","train":"/opt/ml/input/data/train","validation":"/opt/ml/input/data/validation"},"current_host":"algo-1","framework_module":"sagemaker_tensorflow_container.training:main","hosts":["algo-1"],"hyperparameters":{"epochs":2,"model_dir":"s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/model"},"input_config_dir":"/opt/ml/input/config","input_data_config":{"eval":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"train":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"validation":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}},"input_dir":"/opt/ml/input","is_master":true,"job_name":"cifar10-2021-01-27-04-02-44-183","log_level":20,"master_hostname":"algo-1","model_dir":"/opt/ml/model","module_dir":"s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/source/sourcedir.tar.gz","module_name":"cifar10_keras_sm_tf2","network_interface_name":"eth0","num_cpus":32,"num_gpus":4,"output_data_dir":"/opt/ml/output/data","output_dir":"/opt/ml/output","output_intermediate_dir":"/opt/ml/output/intermediate","resource_config":{"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"},"user_entry_point":"cifar10_keras_sm_tf2.py"} SM_USER_ARGS=["--epochs","2","--model_dir","s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/model"] SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate SM_CHANNEL_EVAL=/opt/ml/input/data/eval SM_CHANNEL_VALIDATION=/opt/ml/input/data/validation SM_CHANNEL_TRAIN=/opt/ml/input/data/train SM_HP_MODEL_DIR=s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/model SM_HP_EPOCHS=2 PYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/lib/python36.zip:/usr/lib/python3.6:/usr/lib/python3.6/lib-dynload:/usr/local/lib/python3.6/dist-packages:/usr/lib/python3/dist-packages  Invoking script with the following command:  /usr/bin/python3 cifar10_keras_sm_tf2.py --epochs 2 --model_dir s3://sagemaker-ap-northeast-2-057716757052/cifar10-2021-01-27-04-02-44-183/model  Train for 312 steps, validate for 78 steps Epoch 1/2 #015 1/312 [..............................] - ETA: 34:31 - loss: 3.5045 - accuracy: 0.1094#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 7/312 [..............................] - ETA: 4:52 - loss: 3.1433 - accuracy: 0.1462 #010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 13/312 [>.............................] - ETA: 2:35 - loss: 2.9194 - accuracy: 0.1587#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 19/312 [>.............................] - ETA: 1:45 - loss: 2.7623 - accuracy: 0.1641#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 26/312 [=>............................] - ETA: 1:15 - loss: 2.6259 - accuracy: 0.1683#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 32/312 [==>...........................] - ETA: 1:00 - loss: 2.5445 - accuracy: 0.1753#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 39/312 [==>...........................] - ETA: 48s - loss: 2.4627 - accuracy: 0.1873 #010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 45/312 [===>..........................] - ETA: 41s - loss: 2.4148 - accuracy: 0.1951#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 51/312 [===>..........................] - ETA: 36s - loss: 2.3721 - accuracy: 0.2028#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 57/312 [====>.........................] - ETA: 31s - loss: 2.3383 - accuracy: 0.2057#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 64/312 [=====>........................] - ETA: 27s - loss: 2.2982 - accuracy: 0.2120#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 71/312 [=====>........................] - ETA: 24s - loss: 2.2635 - accuracy: 0.2171#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 78/312 [======>.......................] - ETA: 21s - loss: 2.2315 - accuracy: 0.2229#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 85/312 [=======>......................] - ETA: 19s - loss: 2.2051 - accuracy: 0.2268#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 92/312 [=======>......................] - ETA: 17s - loss: 2.1798 - accuracy: 0.2320#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 99/312 [========>.....................] - ETA: 16s - loss: 2.1550 - accuracy: 0.2371#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015106/312 [=========>....................] - ETA: 14s - loss: 2.1355 - accuracy: 0.2412#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015113/312 [=========>....................] - ETA: 13s - loss: 2.1166 - accuracy: 0.2458#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015120/312 [==========>...................] - ETA: 12s - loss: 2.0997 - accuracy: 0.2493#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015127/312 [===========>..................] - ETA: 11s - loss: 2.0852 - accuracy: 0.2542#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015134/312 [===========>..................] - ETA: 10s - loss: 2.0716 - accuracy: 0.2577#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015140/312 [============>.................] - ETA: 9s - loss: 2.0586 - accuracy: 0.2616 #010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015147/312 [=============>................] - ETA: 8s - loss: 2.0466 - accuracy: 0.2645#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015154/312 [=============>................] - ETA: 8s - loss: 2.0331 - accuracy: 0.2677#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015161/312 [==============>...............] - ETA: 7s - loss: 2.0210 - accuracy: 0.2723#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015168/312 [===============>..............] - ETA: 6s - loss: 2.0082 - accuracy: 0.2766#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015175/312 [===============>..............] - ETA: 6s - loss: 1.9988 - accuracy: 0.2790#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015181/312 [================>.............] - ETA: 5s - loss: 1.9901 - accuracy: 0.2804#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015188/312 [=================>............] - ETA: 5s - loss: 1.9790 - accuracy: 0.2836#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015195/312 [=================>............] - ETA: 4s - loss: 1.9695 - accuracy: 0.2856#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015202/312 [==================>...........] - ETA: 4s - loss: 1.9605 - accuracy: 0.2881#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015209/312 [===================>..........] - ETA: 4s - loss: 1.9531 - accuracy: 0.2906#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015216/312 [===================>..........] - ETA: 3s - loss: 1.9457 - accuracy: 0.2930#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015223/312 [====================>.........] - ETA: 3s - loss: 1.9350 - accuracy: 0.2959#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015230/312 [=====================>........] - ETA: 3s - loss: 1.9290 - accuracy: 0.2975#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015237/312 [=====================>........] - ETA: 2s - loss: 1.9219 - accuracy: 0.2991#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015244/312 [======================>.......] - ETA: 2s - loss: 1.9130 - accuracy: 0.3024#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015251/312 [=======================>......] - ETA: 2s - loss: 1.9066 - accuracy: 0.3046#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015258/312 [=======================>......] - ETA: 1s - loss: 1.9006 - accuracy: 0.3065#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015264/312 [========================>.....] - ETA: 1s - loss: 1.8959 - accuracy: 0.3079#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015271/312 [=========================>....] - ETA: 1s - loss: 1.8884 - accuracy: 0.3104#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015278/312 [=========================>....] - ETA: 1s - loss: 1.8834 - accuracy: 0.3122#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015285/312 [==========================>...] - ETA: 0s - loss: 1.8764 - accuracy: 0.3148#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015292/312 [===========================>..] - ETA: 0s - loss: 1.8714 - accuracy: 0.3172#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015299/312 [===========================>..] - ETA: 0s - loss: 1.8642 - accuracy: 0.3197#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015305/312 [============================>.] - ETA: 0s - loss: 1.8589 - accuracy: 0.3213#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015312/312 [==============================] - 10s 32ms/step - loss: 1.8530 - accuracy: 0.3232 - val_loss: 2.0282 - val_accuracy: 0.3226 Epoch 2/2 #015 1/312 [..............................] - ETA: 2s - loss: 1.4358 - accuracy: 0.4531#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 8/312 [..............................] - ETA: 2s - loss: 1.5428 - accuracy: 0.4131#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 15/312 [>.............................] - ETA: 2s - loss: 1.5658 - accuracy: 0.4026#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 22/312 [=>............................] - ETA: 2s - loss: 1.5621 - accuracy: 0.4116#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 29/312 [=>............................] - ETA: 2s - loss: 1.5536 - accuracy: 0.4181#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 36/312 [==>...........................] - ETA: 2s - loss: 1.5312 - accuracy: 0.4316#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 43/312 [===>..........................] - ETA: 2s - loss: 1.5190 - accuracy: 0.4391#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 50/312 [===>..........................] - ETA: 2s - loss: 1.5194 - accuracy: 0.4364#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 56/312 [====>.........................] - ETA: 2s - loss: 1.5234 - accuracy: 0.4351#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 63/312 [=====>........................] - ETA: 1s - loss: 1.5260 - accuracy: 0.4339#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 70/312 [=====>........................] - ETA: 1s - loss: 1.5249 - accuracy: 0.4376#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 77/312 [======>.......................] - ETA: 1s - loss: 1.5162 - accuracy: 0.4421#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 84/312 [=======>......................] - ETA: 1s - loss: 1.5111 - accuracy: 0.4443#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 91/312 [=======>......................] - ETA: 1s - loss: 1.5092 - accuracy: 0.4439#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 98/312 [========>.....................] - ETA: 1s - loss: 1.5105 - accuracy: 0.4430#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015105/312 [=========>....................] - ETA: 1s - loss: 1.5119 - accuracy: 0.4424#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015112/312 [=========>....................] - ETA: 1s - loss: 1.5089 - accuracy: 0.4440#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015119/312 [==========>...................] - ETA: 1s - loss: 1.5087 - accuracy: 0.4458#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015126/312 [===========>..................] - ETA: 1s - loss: 1.5124 - accuracy: 0.4441#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015132/312 [===========>..................] - ETA: 1s - loss: 1.5132 - accuracy: 0.4441#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015139/312 [============>.................] - ETA: 1s - loss: 1.5099 - accuracy: 0.4453#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015146/312 [=============>................] - ETA: 1s - loss: 1.5104 - accuracy: 0.4464#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015153/312 [=============>................] - ETA: 1s - loss: 1.5065 - accuracy: 0.4489#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015160/312 [==============>...............] - ETA: 1s - loss: 1.5054 - accuracy: 0.4499#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015166/312 [==============>...............] - ETA: 1s - loss: 1.5030 - accuracy: 0.4507#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015172/312 [===============>..............] - ETA: 1s - loss: 1.5006 - accuracy: 0.4514#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015179/312 [================>.............] - ETA: 1s - loss: 1.4972 - accuracy: 0.4527#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015186/312 [================>.............] - ETA: 0s - loss: 1.4946 - accuracy: 0.4536#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015193/312 [=================>............] - ETA: 0s - loss: 1.4922 - accuracy: 0.4547#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015200/312 [==================>...........] - ETA: 0s - loss: 1.4917 - accuracy: 0.4553#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015207/312 [==================>...........] - ETA: 0s - loss: 1.4904 - accuracy: 0.4556#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015214/312 [===================>..........] - ETA: 0s - loss: 1.4877 - accuracy: 0.4567#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015221/312 [====================>.........] - ETA: 0s - loss: 1.4865 - accuracy: 0.4576#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015228/312 [====================>.........] - ETA: 0s - loss: 1.4846 - accuracy: 0.4582#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015235/312 [=====================>........] - ETA: 0s - loss: 1.4813 - accuracy: 0.4593#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015242/312 [======================>.......] - ETA: 0s - loss: 1.4780 - accuracy: 0.4611#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015249/312 [======================>.......] - ETA: 0s - loss: 1.4757 - accuracy: 0.4621#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015255/312 [=======================>......] - ETA: 0s - loss: 1.4742 - accuracy: 0.4624#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015262/312 [========================>.....] - ETA: 0s - loss: 1.4709 - accuracy: 0.4642#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015268/312 [========================>.....] - ETA: 0s - loss: 1.4689 - accuracy: 0.4651#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015275/312 [=========================>....] - ETA: 0s - loss: 1.4664 - accuracy: 0.4662#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015282/312 [==========================>...] - ETA: 0s - loss: 1.4634 - accuracy: 0.4671#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015289/312 [==========================>...] - ETA: 0s - loss: 1.4600 - accuracy: 0.4679#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015296/312 [===========================>..] - ETA: 0s - loss: 1.4562 - accuracy: 0.4693#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015303/312 [============================>.] - ETA: 0s - loss: 1.4529 - accuracy: 0.4707#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015310/312 [============================>.] - ETA: 0s - loss: 1.4507 - accuracy: 0.4713#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015312/312 [==============================] - 3s 10ms/step - loss: 1.4498 - accuracy: 0.4717 - val_loss: 1.6843 - val_accuracy: 0.4161 2021-01-27 04:12:46 Uploading - Uploading generated training model2021-01-27 04:12:39.226548: W tensorflow/python/util/util.cc:299] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1781: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1781: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers. INFO:tensorflow:Assets written to: /opt/ml/model/1/assets INFO:tensorflow:Assets written to: /opt/ml/model/1/assets 2021-01-27 04:12:42,835 sagemaker-containers INFO Reporting training SUCCESS 2021-01-27 04:13:16 Completed - Training job completed ProfilerReport-1611720164: NoIssuesFound Training seconds: 452 Billable seconds: 452 CPU times: user 1.59 s, sys: 1.44 ms, total: 1.59 s Wall time: 10min 46s
MIT
scratch/working/2.2.NoVPC-EFS-Train-Model.ipynb
gonsoomoon-ml/SageMaker-With-Secure-VPC
training_job_name ์ €์žฅํ˜„์žฌ์˜ training_job_name์„ ์ €์žฅ ํ•ฉ๋‹ˆ๋‹ค.- training_job_name์„ ์—๋Š” ํ›ˆ๋ จ์— ๊ด€๋ จ ๋‚ด์šฉ ๋ฐ ํ›ˆ๋ จ ๊ฒฐ๊ณผ์ธ **Model Artifact** ํŒŒ์ผ์˜ S3 ๊ฒฝ๋กœ๋ฅผ ์ œ๊ณต ํ•ฉ๋‹ˆ๋‹ค.
train_job_name = estimator._current_job_name %store train_job_name
Stored 'train_job_name' (str)
MIT
scratch/working/2.2.NoVPC-EFS-Train-Model.ipynb
gonsoomoon-ml/SageMaker-With-Secure-VPC
Running scripts with python shell
#!pip install tensorflow==1.14.0 #!pip install tensorflow-base==1.14.0 #!pip install tensorflow-gpu==1.14.0 %tensorflow_version 1.x ! python main_train.py --config config_default.json
_____no_output_____
MIT
notebooks/test_0_lstm_shell_colab.ipynb
SPRACE/track-ml
Plot Predicted Data
import os import json import numpy as np import pandas as pd configs = json.load(open('config_default.json', 'r')) cylindrical = configs['data']['cylindrical'] # set to polar or cartesian coordenates normalise = configs['data']['normalise'] name = configs['model']['name'] if cylindrical: coord = 'cylin' else: coord = 'xyz' path1 = 'results/x_true_%s_%s.csv' % (name, coord) path2 = 'results/y_true_%s_%s.csv' % (name, coord) path3 = 'results/y_pred_%s_%s.csv' % (name, coord) print('loading from .. %s' % path1) print('loading from .. %s' % path2) print('loading from .. %s' % path3) df_test = pd.read_csv(path1, header=None) df_true = pd.read_csv(path2, header=None) df_pred = pd.read_csv(path3, header=None) print('shape df_test ', df_test.shape) print('shape df_true ', df_true.shape) print('shape df_pred ', df_pred.shape) # concat #y_true = pd.concat([df_test, df_true], axis = 1, ignore_index = True) #y_pred = pd.concat([df_test, df_pred], axis = 1, ignore_index = True) y_true = np.concatenate([df_test, df_true], axis = 1) y_pred = np.concatenate([df_test, df_pred], axis = 1) y_true = pd.DataFrame(y_true) y_pred = pd.DataFrame(y_pred) #y_true.name = 'real' #y_pred.name = 'pred' y_pred.columns.name = 'pred' y_true.columns.name = 'real' print('size y_true ', y_true.shape) print('size y_pred ', y_pred.shape) from core.utils.utils import * import warnings N_tracks = 30 path_html = '' name = configs['model']['name'] fig = track_plot_xyz([y_true, y_pred], n_hits = 10, cylindrical = cylindrical, n_tracks = N_tracks, title='Track Prediction #10 Hit - Model %s (Nearest hits)' % name.upper()) fig.show()
_____no_output_____
MIT
notebooks/test_0_lstm_shell_colab.ipynb
SPRACE/track-ml
Matrix Profile IntroductionThe matrix profile (MP) is a data structure and associated algorithms that helps solve the dual problem of anomaly detection and motif discovery. It is robust, scalable and largely parameter-free.MP can be combined with other algorithms to accomplish:* Motif discovery* Time series chains* Anomaly discovery* Joins* Semantic segmentationmatrixprofile-ts offers 3 different algorithms to compute Matrix Profile:* STAMP (Scalable Time Series Anytime Matrix Profile) - Each distance profile is independent of other distance profiles, the order in which they are computed can be random. It is an anytime algorithm.* STOMP (Scalable Time Series Ordered Matrix Profile) - This algorithm is an exact ordered algorithm. It is significantly faster than STAMP.* SCRIMP++ (Scalable Column Independent Matrix Profile) - This algorithm combines the anytime component of STAMP with the speed of STOMP.See: https://towardsdatascience.com/introduction-to-matrix-profiles-5568f3375d90 Code Example
!pip install matrixprofile-ts import pandas as pd ## example data importing data = pd.read_csv('https://raw.githubusercontent.com/iotanalytics/IoTTutorial/main/data/SCG_data.csv').drop('Unnamed: 0',1).to_numpy()[0:20,:1000] import operator import numpy as np import matplotlib.pyplot as plt from matrixprofile import * import numpy as np from datetime import datetime import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import neighbors, datasets # Pull a portion of the data pattern = data[10,:] + max(abs(data[10,:])) # Compute Matrix Profile m = 10 mp = matrixProfile.stomp(pattern,m) #Append np.nan to Matrix profile to enable plotting against raw data mp_adj = np.append(mp[0],np.zeros(m-1)+np.nan) #Plot the signal data fig, (ax1, ax2) = plt.subplots(2,1,sharex=True,figsize=(20,10)) ax1.plot(np.arange(len(pattern)),pattern) ax1.set_ylabel('Signal', size=22) #Plot the Matrix Profile ax2.plot(np.arange(len(mp_adj)),mp_adj, label="Matrix Profile", color='red') ax2.set_ylabel('Matrix Profile', size=22) ax2.set_xlabel('Time', size=22);
_____no_output_____
MIT
code/preprocessing_and_decomposition/Matrix_Profile.ipynb
iotanalytics/IoTTutorial
Steps to build a Neural Network1. Empty Model (sequential/Model)2
import tensorflow.keras.datasets as kd data = kd.fashion_mnist.load_data() (xtrain,ytrain),(xtest,ytest) = data xtrain.shape import matplotlib.pyplot as plt plt.imshow(xtrain[0,:,:],cmap='gray_r') ytrain[0] xtrain1 = xtrain.reshape(-1,28*28) xtest1 = xtest.reshape(-1,28*28) xtrain1.shape from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense model_ann = Sequential() model_ann.add(Dense(units=128, input_shape=(784,), activation='relu')) model_ann.add(Dense(units=128, activation='relu')) model_ann.add(Dense(units=10, activation='softmax')) model_ann.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy']) model_ann.summary() 1st layer = history = model_ann.fit(xtrain1,ytrain,epochs=10) plt.plot(history.history['loss']) plt.plot(history.history['accuracy']) plt.grid() plt. plt.xticks(range(1,11)) plt.xlabel('Epochs-->') plt.show() ypred = model_ann.predict(xtest1) labels.get(ytest[0]) ypred[0].argmax() model_ann.evaluate(xtest1,ytest)
313/313 [==============================] - 1s 2ms/step - loss: 0.4793 - accuracy: 0.8335
MIT
day37_ML_ANN_RNN.ipynb
DynamicEngine2001/Programming-Codes
Churn Modelling
import pandas as pd df = pd.read_csv('Churn_Modelling.csv') df df.info() df1 = pd.get_dummies(df) df1.head()
_____no_output_____
MIT
day37_ML_ANN_RNN.ipynb
DynamicEngine2001/Programming-Codes
Recurrent Neural Network
import numpy as np stock_data = pd.read_csv('stock_data.csv') fb = stock_data[['Open']] [stock_data['Stock']=='FB'].copy() fb.head() fb = fb.values fb.shape x = [] y = [] for i in range(20, len(fb)): x.append(fb['Open'].valuesfb[i-20:1].tolist()) y.append(fb[i].tolist())
_____no_output_____
MIT
day37_ML_ANN_RNN.ipynb
DynamicEngine2001/Programming-Codes
Python Modules
%%writefile weather.py def prognosis(): print("It will rain today") import weather weather.prognosis()
It will rain today
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
How does Python know from where to import packages/modules from?
# Python imports work by searching the directories listed in sys.path. import sys sys.path ## "__main__" usage # A module can discover whether or not it is running in the main scope by checking its own __name__, # which allows a common idiom for conditionally executing code in a module when it is run as a script or with python -m # but not when it is imported: %%writefile hw.py #!/usr/bin/env python def hw(): print("Running Main") def hw2(): print("Hello 2") if __name__ == "__main__": # execute only if run as a script print("Running as script") hw() hw2() import main import hw main.main() hw.hw2() # Running on all 3 OSes from command line: python main.py
_____no_output_____
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
Make main.py self running on Linux (also should work on MacOS): Add !/usr/bin/env python to first line of scriptmark it executable using need to change permissions too!$ chmod +x main.py Making Standalone .EXEs for Python in Windows * http://www.py2exe.org/ used to be for Python 2 , now supposedly Python 3 as well* http://www.pyinstaller.org/ Tutorial: https://medium.com/dreamcatcher-its-blog/making-an-stand-alone-executable-from-a-python-script-using-pyinstaller-d1df9170e263 Need to create exe on a similar system as target system!
# Exercise Write a function which returns a list of fibonacci numbers up to starting with 1, 1, 2, 3, 5 up to the nth. So Fib(4) would return [1,1,2,3]
_____no_output_____
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
![Fibo](https://upload.wikimedia.org/wikipedia/commons/thumb/d/db/34%2A21-FibonacciBlocks.png/450px-34%2A21-FibonacciBlocks.png) ![Fibonacci](https://upload.wikimedia.org/wikipedia/commons/thumb/8/8e/Leonardo_da_Pisa.jpg/330px-Leonardo_da_Pisa.jpg)
%%writefile fibo.py # Fibonacci numbers module def fib(n): # write Fibonacci series up to n a, b = 1 1 while b < n: print(b, end=' ') a, b = b, a+b print() def fib2(n): # return Fibonacci series up to n result = [] a, b = 1, 1 while b < n: result.append(b) a, b = b, a+b return result import fibo fibo.fib(100) fibo.fib2(100) fib=fibo.fib
_____no_output_____
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
If you intend to use a function often you can assign it to a local name:
fib(300)
1 1 2 3 5 8 13 21 34 55 89 144 233
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
There is a variant of the import statement that imports names from a module directly into the importing moduleโ€™s symbol table.
from fibo import fib, fib2 # we overwrote fib=fibo.fib fib(100) fib2(200)
_____no_output_____
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
This does not introduce the module name from which the imports are taken in the local symbol table (so in the example, fibo is not defined). There is even a variant to import all names that a module defines: **NOT RECOMMENDED**
## DO not do this Namespace collission possible!! from fibo import * fib(400)
1 1 2 3 5 8 13 21 34 55 89 144 233 377
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
If the module name is followed by as, then the name following as is bound directly to the imported module.
import fibo as fib dir(fib) fib.fib(50) ### It can also be used when utilising from with similar effects: from fibo import fib as fibonacci fibonacci(200)
1 1 2 3 5 8 13 21 34 55 89 144
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
Executing modules as scriptsยถ When you run a Python module withpython fibo.py the code in the module will be executed, just as if you imported it, but with the \_\_name\_\_ set to "\_\_main\_\_". That means that by adding this code at the end of your module:
%%writefile fibbo.py # Fibonacci numbers module def fib(n): # write Fibonacci series up to n a, b = 0, 1 while b < n: print(b, end=' ') a, b = b, a+b print() def fib2(n): # return Fibonacci series up to n result = [] a, b = 0, 1 while b < n: result.append(b) a, b = b, a+b return result if __name__ == "__main__": import sys fib(int(sys.argv[1], 10)) import fibbo as fi fi.fib(200)
1 1 2 3 5 8 13 21 34 55 89 144
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
This is often used either to provide a convenient user interface to a module, or for testing purposes (running the module as a script executes a test suite). The Module Search PathWhen a module named spam is imported, the interpreter first searches for a built-in module with that name. If not found, it then searches for a file named spam.py in a list of directories given by the variable sys.path. sys.path is initialized from these locations:* The directory containing the input script (or the current directory when no file is specified).* PYTHONPATH (a list of directory names, with the same syntax as the shell variable PATH).* The installation-dependent default. Packages are a way of structuring Pythonโ€™s module namespace by using โ€œdotted module namesโ€. For example, the module name A.B designates a submodule named B in a package named A. Just like the use of modules saves the authors of different modules from having to worry about each otherโ€™s global variable names, the use of dotted module names saves the authors of multi-module packages like NumPy or Pillow from having to worry about each otherโ€™s module names.
sound/ Top-level package __init__.py Initialize the sound package formats/ Subpackage for file format conversions __init__.py wavread.py wavwrite.py aiffread.py aiffwrite.py auread.py auwrite.py ... effects/ Subpackage for sound effects __init__.py echo.py surround.py reverse.py ... filters/ Subpackage for filters __init__.py equalizer.py vocoder.py karaoke.py ...
_____no_output_____
MIT
Python_Core/Python Modules and Imports.ipynb
ValRCS/RCS_Python_11
Quick analysis
from phimal_utilities.analysis import Results import matplotlib.pyplot as plt import seaborn as sns sns.set(context='notebook', style='white') %config InlineBackend.figure_format = 'svg' data_mt = Results('runs/testing_multitask_unnormalized//') data_bl = Results('runs/testing_normal_unnormalized//') keys = data_mt.keys fig, axes = plt.subplots(figsize=(10, 3), constrained_layout=True, ncols=2) ax = axes[0] ax.semilogy(data_bl.df.index, data_bl.df[keys['mse']], label='Baseline') ax.semilogy(data_mt.df.index, data_mt.df[keys['mse']], label='Multitask') ax.set_title('MSE') ax.set_xlabel('Epoch', weight='bold') ax.set_ylabel('Cost', weight='bold') ax.legend() #ax.set_xlim([0, 8000]) ax = axes[1] ax.semilogy(data_bl.df.index, data_bl.df[keys['reg']], label='Baseline') ax.semilogy(data_mt.df.index, data_mt.df[keys['reg']], label='Multitask') ax.set_title('Regression') ax.set_xlabel('Epoch', weight='bold') ax.set_ylabel('Cost', weight='bold') ax.legend() #ax.set_xlim([0, 8000]) fig.show() fig, axes = plt.subplots(ncols=3, constrained_layout=True, figsize=(15, 4)) ax = axes[0] ax.plot(data_bl.df.index, data_bl.df[keys['coeffs']]) ax.plot(data_bl.df.index, data_bl.df[keys['coeffs'][2]], lw=3) ax.plot(data_bl.df.index, data_bl.df[keys['coeffs'][5]], lw=3) ax.set_ylim([-2, 2]) ax.set_title('Coefficients baseline') ax.set_xlabel('Epoch', weight='bold') ax.set_ylabel('Cost', weight='bold') #ax.set_xlim([0, 8000]) ax = axes[1] ax.plot(data_mt.df.index, data_mt.df[keys['coeffs']]) ax.plot(data_mt.df.index, data_mt.df[keys['coeffs'][2]], lw=3) ax.plot(data_mt.df.index, data_mt.df[keys['coeffs'][5]], lw=3) ax.set_ylim([-2, 2]) ax.set_title('Coefficients Multitask') ax.set_xlabel('Epoch', weight='bold') ax.set_ylabel('Cost', weight='bold') #ax.set_xlim([0, 8000]) ax = axes[2] true_coeffs = np.zeros(len(keys['unscaled_coeffs'])) true_coeffs[2] = 0.1 true_coeffs[5] = -1 ax.semilogy(data_bl.df.index, np.mean(np.abs(data_bl.df[keys['unscaled_coeffs']] - true_coeffs), axis=1), label='Baseline') ax.semilogy(data_mt.df.index, np.mean(np.abs(data_mt.df[keys['unscaled_coeffs']] - true_coeffs), axis=1), label='Baseline') ax.set_ylim([-5, 2]) ax.legend() fig.show()
_____no_output_____
MIT
notebooks/testing_multitask.ipynb
GJBoth/MultiTaskPINN
What is `torch.nn` *really*?============================by Jeremy Howard, `fast.ai `_. Thanks to Rachel Thomas and Francisco Ingham. We recommend running this tutorial as a notebook, not a script. To download the notebook (.ipynb) file,click `here `_ .PyTorch provides the elegantly designed modules and classes `torch.nn `_ ,`torch.optim `_ ,`Dataset `_ ,and `DataLoader `_to help you create and train neural networks.In order to fully utilize their power and customizethem for your problem, you need to really understand exactly what they'redoing. To develop this understanding, we will first train basic neural neton the MNIST data set without using any features from these models; we willinitially only use the most basic PyTorch tensor functionality. Then, we willincrementally add one feature from ``torch.nn``, ``torch.optim``, ``Dataset``, or``DataLoader`` at a time, showing exactly what each piece does, and how itworks to make the code either more concise, or more flexible.**This tutorial assumes you already have PyTorch installed, and are familiarwith the basics of tensor operations.** (If you're familiar with Numpy arrayoperations, you'll find the PyTorch tensor operations used here nearly identical).MNIST data setup----------------We will use the classic `MNIST `_ dataset,which consists of black-and-white images of hand-drawn digits (between 0 and 9).We will use `pathlib `_for dealing with paths (part of the Python 3 standard library), and willdownload the dataset using`requests `_. We will onlyimport modules when we use them, so you can see exactly what's beingused at each point.
from pathlib import Path import requests DATA_PATH = Path("data") PATH = DATA_PATH / "mnist" PATH.mkdir(parents=True, exist_ok=True) URL = "http://deeplearning.net/data/mnist/" FILENAME = "mnist.pkl.gz" if not (PATH / FILENAME).exists(): content = requests.get(URL + FILENAME).content (PATH / FILENAME).open("wb").write(content)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
This dataset is in numpy array format, and has been stored using pickle,a python-specific format for serializing data.
import pickle import gzip with gzip.open((PATH / FILENAME).as_posix(), "rb") as f: ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Each image is 28 x 28, and is being stored as a flattened row of length784 (=28x28). Let's take a look at one; we need to reshape it to 2dfirst.
from matplotlib import pyplot import numpy as np pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray") print(x_train.shape)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
PyTorch uses ``torch.tensor``, rather than numpy arrays, so we need toconvert our data.
import torch x_train, y_train, x_valid, y_valid = map( torch.tensor, (x_train, y_train, x_valid, y_valid) ) n, c = x_train.shape x_train, x_train.shape, y_train.min(), y_train.max() print(x_train, y_train) print(x_train.shape) print(y_train.min(), y_train.max())
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Neural net from scratch (no torch.nn)---------------------------------------------Let's first create a model using nothing but PyTorch tensor operations. We're assumingyou're already familiar with the basics of neural networks. (If you're not, you canlearn them at `course.fast.ai `_).PyTorch provides methods to create random or zero-filled tensors, which we willuse to create our weights and bias for a simple linear model. These are just regulartensors, with one very special addition: we tell PyTorch that they require agradient. This causes PyTorch to record all of the operations done on the tensor,so that it can calculate the gradient during back-propagation *automatically*!For the weights, we set ``requires_grad`` **after** the initialization, since wedon't want that step included in the gradient. (Note that a trailling ``_`` inPyTorch signifies that the operation is performed in-place.)NoteWe are initializing the weights here with `Xavier initialisation `_ (by multiplying with 1/sqrt(n)).
import math weights = torch.randn(784, 10) / math.sqrt(784) weights.requires_grad_() bias = torch.zeros(10, requires_grad=True)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Thanks to PyTorch's ability to calculate gradients automatically, we canuse any standard Python function (or callable object) as a model! Solet's just write a plain matrix multiplication and broadcasted additionto create a simple linear model. We also need an activation function, sowe'll write `log_softmax` and use it. Remember: although PyTorchprovides lots of pre-written loss functions, activation functions, andso forth, you can easily write your own using plain python. PyTorch willeven create fast GPU or vectorized CPU code for your functionautomatically.
def log_softmax(x): return x - x.exp().sum(-1).log().unsqueeze(-1) def model(xb): return log_softmax(xb @ weights + bias)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
In the above, the ``@`` stands for the dot product operation. We will callour function on one batch of data (in this case, 64 images). This isone *forward pass*. Note that our predictions won't be any better thanrandom at this stage, since we start with random weights.
bs = 64 # batch size xb = x_train[0:bs] # a mini-batch from x preds = model(xb) # predictions preds[0], preds.shape print(preds[0], preds.shape)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
As you see, the ``preds`` tensor contains not only the tensor values, but also agradient function. We'll use this later to do backprop.Let's implement negative log-likelihood to use as the loss function(again, we can just use standard Python):
def nll(input, target): return -input[range(target.shape[0]), target].mean() loss_func = nll
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Let's check our loss with our random model, so we can see if we improveafter a backprop pass later.
yb = y_train[0:bs] print(loss_func(preds, yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Let's also implement a function to calculate the accuracy of our model.For each prediction, if the index with the largest value matches thetarget value, then the prediction was correct.
def accuracy(out, yb): preds = torch.argmax(out, dim=1) return (preds == yb).float().mean()
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Let's check the accuracy of our random model, so we can see if ouraccuracy improves as our loss improves.
print(accuracy(preds, yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
We can now run a training loop. For each iteration, we will:- select a mini-batch of data (of size ``bs``)- use the model to make predictions- calculate the loss- ``loss.backward()`` updates the gradients of the model, in this case, ``weights`` and ``bias``.We now use these gradients to update the weights and bias. We do thiswithin the ``torch.no_grad()`` context manager, because we do not want theseactions to be recorded for our next calculation of the gradient. You can readmore about how PyTorch's Autograd records operations`here `_.We then set thegradients to zero, so that we are ready for the next loop.Otherwise, our gradients would record a running tally of all the operationsthat had happened (i.e. ``loss.backward()`` *adds* the gradients to whatever isalready stored, rather than replacing them)... tip:: You can use the standard python debugger to step through PyTorch code, allowing you to check the various variable values at each step. Uncomment ``set_trace()`` below to try it out.
from IPython.core.debugger import set_trace lr = 0.5 # learning rate epochs = 2 # how many epochs to train for for epoch in range(epochs): for i in range((n - 1) // bs + 1): # set_trace() start_i = i * bs end_i = start_i + bs xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) loss = loss_func(pred, yb) loss.backward() with torch.no_grad(): weights -= weights.grad * lr bias -= bias.grad * lr weights.grad.zero_() bias.grad.zero_()
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
That's it: we've created and trained a minimal neural network (in this case, alogistic regression, since we have no hidden layers) entirely from scratch!Let's check the loss and accuracy and compare those to what we gotearlier. We expect that the loss will have decreased and accuracy tohave increased, and they have.
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Using torch.nn.functional------------------------------We will now refactor our code, so that it does the same thing as before, onlywe'll start taking advantage of PyTorch's ``nn`` classes to make it more conciseand flexible. At each step from here, we should be making our code one or moreof: shorter, more understandable, and/or more flexible.The first and easiest step is to make our code shorter by replacing ourhand-written activation and loss functions with those from ``torch.nn.functional``(which is generally imported into the namespace ``F`` by convention). This modulecontains all the functions in the ``torch.nn`` library (whereas other parts of thelibrary contain classes). As well as a wide range of loss and activationfunctions, you'll also find here some convenient functions for creating neuralnets, such as pooling functions. (There are also functions for doing convolutions,linear layers, etc, but as we'll see, these are usually better handled usingother parts of the library.)If you're using negative log likelihood loss and log softmax activation,then Pytorch provides a single function ``F.cross_entropy`` that combinesthe two. So we can even remove the activation function from our model.
import torch.nn.functional as F loss_func = F.cross_entropy def model(xb): return xb @ weights + bias
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Note that we no longer call ``log_softmax`` in the ``model`` function. Let'sconfirm that our loss and accuracy are the same as before:
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Refactor using nn.Module-----------------------------Next up, we'll use ``nn.Module`` and ``nn.Parameter``, for a clearer and moreconcise training loop. We subclass ``nn.Module`` (which itself is a class andable to keep track of state). In this case, we want to create a class thatholds our weights, bias, and method for the forward step. ``nn.Module`` has anumber of attributes and methods (such as ``.parameters()`` and ``.zero_grad()``)which we will be using.Note``nn.Module`` (uppercase M) is a PyTorch specific concept, and is a class we'll be using a lot. ``nn.Module`` is not to be confused with the Python concept of a (lowercase ``m``) `module `_, which is a file of Python code that can be imported.
from torch import nn class Mnist_Logistic(nn.Module): def __init__(self): super().__init__() self.weights = nn.Parameter(torch.randn(784, 10) / math.sqrt(784)) self.bias = nn.Parameter(torch.zeros(10)) def forward(self, xb): return xb @ self.weights + self.bias
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Since we're now using an object instead of just using a function, wefirst have to instantiate our model:
model = Mnist_Logistic()
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Now we can calculate the loss in the same way as before. Note that``nn.Module`` objects are used as if they are functions (i.e they are*callable*), but behind the scenes Pytorch will call our ``forward``method automatically.
print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Previously for our training loop we had to update the values for each parameterby name, and manually zero out the grads for each parameter separately, like this::: with torch.no_grad(): weights -= weights.grad * lr bias -= bias.grad * lr weights.grad.zero_() bias.grad.zero_()Now we can take advantage of model.parameters() and model.zero_grad() (whichare both defined by PyTorch for ``nn.Module``) to make those steps more conciseand less prone to the error of forgetting some of our parameters, particularlyif we had a more complicated model::: with torch.no_grad(): for p in model.parameters(): p -= p.grad * lr model.zero_grad()We'll wrap our little training loop in a ``fit`` function so we can run itagain later.
def fit(): for epoch in range(epochs): for i in range((n - 1) // bs + 1): start_i = i * bs end_i = start_i + bs xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) loss = loss_func(pred, yb) loss.backward() with torch.no_grad(): for p in model.parameters(): p -= p.grad * lr model.zero_grad() fit()
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Let's double-check that our loss has gone down:
print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Refactor using nn.Linear-------------------------We continue to refactor our code. Instead of manually defining andinitializing ``self.weights`` and ``self.bias``, and calculating ``xb @self.weights + self.bias``, we will instead use the Pytorch class`nn.Linear `_ for alinear layer, which does all that for us. Pytorch has many types ofpredefined layers that can greatly simplify our code, and often makes itfaster too.
class Mnist_Logistic(nn.Module): def __init__(self): super().__init__() self.lin = nn.Linear(784, 10) def forward(self, xb): return self.lin(xb)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
We instantiate our model and calculate the loss in the same way as before:
model = Mnist_Logistic() print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
We are still able to use our same ``fit`` method as before.
fit() print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Refactor using optim------------------------------Pytorch also has a package with various optimization algorithms, ``torch.optim``.We can use the ``step`` method from our optimizer to take a forward step, insteadof manually updating each parameter.This will let us replace our previous manually coded optimization step::: with torch.no_grad(): for p in model.parameters(): p -= p.grad * lr model.zero_grad()and instead use just::: opt.step() opt.zero_grad()(``optim.zero_grad()`` resets the gradient to 0 and we need to call it beforecomputing the gradient for the next minibatch.)
from torch import optim
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
We'll define a little function to create our model and optimizer so wecan reuse it in the future.
def get_model(): model = Mnist_Logistic() return model, optim.SGD(model.parameters(), lr=lr) model, opt = get_model() print(loss_func(model(xb), yb)) for epoch in range(epochs): for i in range((n - 1) // bs + 1): start_i = i * bs end_i = start_i + bs xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Refactor using Dataset------------------------------PyTorch has an abstract Dataset class. A Dataset can be anything that hasa ``__len__`` function (called by Python's standard ``len`` function) anda ``__getitem__`` function as a way of indexing into it.`This tutorial `_walks through a nice example of creating a custom ``FacialLandmarkDataset`` classas a subclass of ``Dataset``.PyTorch's `TensorDataset `_is a Dataset wrapping tensors. By defining a length and way of indexing,this also gives us a way to iterate, index, and slice along the firstdimension of a tensor. This will make it easier to access both theindependent and dependent variables in the same line as we train.
from torch.utils.data import TensorDataset
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Both ``x_train`` and ``y_train`` can be combined in a single ``TensorDataset``,which will be easier to iterate over and slice.
train_ds = TensorDataset(x_train, y_train)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Previously, we had to iterate through minibatches of x and y values separately::: xb = x_train[start_i:end_i] yb = y_train[start_i:end_i]Now, we can do these two steps together::: xb,yb = train_ds[i*bs : i*bs+bs]
model, opt = get_model() for epoch in range(epochs): for i in range((n - 1) // bs + 1): xb, yb = train_ds[i * bs: i * bs + bs] pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Refactor using DataLoader------------------------------Pytorch's ``DataLoader`` is responsible for managing batches. You cancreate a ``DataLoader`` from any ``Dataset``. ``DataLoader`` makes it easierto iterate over batches. Rather than having to use ``train_ds[i*bs : i*bs+bs]``,the DataLoader gives us each minibatch automatically.
from torch.utils.data import DataLoader train_ds = TensorDataset(x_train, y_train) train_dl = DataLoader(train_ds, batch_size=bs)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Previously, our loop iterated over batches (xb, yb) like this::: for i in range((n-1)//bs + 1): xb,yb = train_ds[i*bs : i*bs+bs] pred = model(xb)Now, our loop is much cleaner, as (xb, yb) are loaded automatically from the data loader::: for xb,yb in train_dl: pred = model(xb)
model, opt = get_model() for epoch in range(epochs): for xb, yb in train_dl: pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() print(loss_func(model(xb), yb))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Thanks to Pytorch's ``nn.Module``, ``nn.Parameter``, ``Dataset``, and ``DataLoader``,our training loop is now dramatically smaller and easier to understand. Let'snow try to add the basic features necessary to create effecive models in practice.Add validation-----------------------In section 1, we were just trying to get a reasonable training loop set up foruse on our training data. In reality, you **always** should also havea `validation set `_, in orderto identify if you are overfitting.Shuffling the training data is`important `_to prevent correlation between batches and overfitting. On the other hand, thevalidation loss will be identical whether we shuffle the validation set or not.Since shuffling takes extra time, it makes no sense to shuffle the validation data.We'll use a batch size for the validation set that is twice as large asthat for the training set. This is because the validation set does notneed backpropagation and thus takes less memory (it doesn't need tostore the gradients). We take advantage of this to use a larger batchsize and compute the loss more quickly.
train_ds = TensorDataset(x_train, y_train) train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True) valid_ds = TensorDataset(x_valid, y_valid) valid_dl = DataLoader(valid_ds, batch_size=bs * 2)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
We will calculate and print the validation loss at the end of each epoch.(Note that we always call ``model.train()`` before training, and ``model.eval()``before inference, because these are used by layers such as ``nn.BatchNorm2d``and ``nn.Dropout`` to ensure appropriate behaviour for these different phases.)
model, opt = get_model() for epoch in range(epochs): model.train() for xb, yb in train_dl: pred = model(xb) loss = loss_func(pred, yb) loss.backward() opt.step() opt.zero_grad() model.eval() with torch.no_grad(): valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl) print(epoch, valid_loss / len(valid_dl))
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Create fit() and get_data()----------------------------------We'll now do a little refactoring of our own. Since we go through a similarprocess twice of calculating the loss for both the training set and thevalidation set, let's make that into its own function, ``loss_batch``, whichcomputes the loss for one batch.We pass an optimizer in for the training set, and use it to performbackprop. For the validation set, we don't pass an optimizer, so themethod doesn't perform backprop.
def loss_batch(model, loss_func, xb, yb, opt=None): loss = loss_func(model(xb), yb) if opt is not None: loss.backward() opt.step() opt.zero_grad() return loss.item(), len(xb)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
``fit`` runs the necessary operations to train our model and compute thetraining and validation losses for each epoch.
import numpy as np def fit(epochs, model, loss_func, opt, train_dl, valid_dl): for epoch in range(epochs): model.train() for xb, yb in train_dl: loss_batch(model, loss_func, xb, yb, opt) model.eval() with torch.no_grad(): losses, nums = zip( *[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl] ) val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums) print(epoch, val_loss)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
``get_data`` returns dataloaders for the training and validation sets.
def get_data(train_ds, valid_ds, bs): return ( DataLoader(train_ds, batch_size=bs, shuffle=True), DataLoader(valid_ds, batch_size=bs * 2), )
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Now, our whole process of obtaining the data loaders and fitting themodel can be run in 3 lines of code:
train_dl, valid_dl = get_data(train_ds, valid_ds, bs) model, opt = get_model() fit(epochs, model, loss_func, opt, train_dl, valid_dl)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
You can use these basic 3 lines of code to train a wide variety of models.Let's see if we can use them to train a convolutional neural network (CNN)!Switch to CNN-------------We are now going to build our neural network with three convolutional layers.Because none of the functions in the previous section assume anything aboutthe model form, we'll be able to use them to train a CNN without any modification.We will use Pytorch's predefined`Conv2d `_ classas our convolutional layer. We define a CNN with 3 convolutional layers.Each convolution is followed by a ReLU. At the end, we perform anaverage pooling. (Note that ``view`` is PyTorch's version of numpy's``reshape``)
class Mnist_CNN(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1) def forward(self, xb): xb = xb.view(-1, 1, 28, 28) xb = F.relu(self.conv1(xb)) xb = F.relu(self.conv2(xb)) xb = F.relu(self.conv3(xb)) xb = F.avg_pool2d(xb, 4) return xb.view(-1, xb.size(1)) lr = 0.1
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
`Momentum `_ is a variation onstochastic gradient descent that takes previous updates into account as welland generally leads to faster training.
model = Mnist_CNN() opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9) fit(epochs, model, loss_func, opt, train_dl, valid_dl)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
nn.Sequential------------------------``torch.nn`` has another handy class we can use to simply our code:`Sequential `_ .A ``Sequential`` object runs each of the modules contained within it, in asequential manner. This is a simpler way of writing our neural network.To take advantage of this, we need to be able to easily define a**custom layer** from a given function. For instance, PyTorch doesn'thave a `view` layer, and we need to create one for our network. ``Lambda``will create a layer that we can then use when defining a network with``Sequential``.
class Lambda(nn.Module): def __init__(self, func): super().__init__() self.func = func def forward(self, x): return self.func(x) def preprocess(x): return x.view(-1, 1, 28, 28)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
The model created with ``Sequential`` is simply:
model = nn.Sequential( Lambda(preprocess), nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.AvgPool2d(4), Lambda(lambda x: x.view(x.size(0), -1)), ) opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9) fit(epochs, model, loss_func, opt, train_dl, valid_dl)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Wrapping DataLoader-----------------------------Our CNN is fairly concise, but it only works with MNIST, because: - It assumes the input is a 28\*28 long vector - It assumes that the final CNN grid size is 4\*4 (since that's the averagepooling kernel size we used)Let's get rid of these two assumptions, so our model works with any 2dsingle channel image. First, we can remove the initial Lambda layer butmoving the data preprocessing into a generator:
def preprocess(x, y): return x.view(-1, 1, 28, 28), y class WrappedDataLoader: def __init__(self, dl, func): self.dl = dl self.func = func def __len__(self): return len(self.dl) def __iter__(self): batches = iter(self.dl) for b in batches: yield (self.func(*b)) train_dl, valid_dl = get_data(train_ds, valid_ds, bs) train_dl = WrappedDataLoader(train_dl, preprocess) valid_dl = WrappedDataLoader(valid_dl, preprocess)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Next, we can replace ``nn.AvgPool2d`` with ``nn.AdaptiveAvgPool2d``, whichallows us to define the size of the *output* tensor we want, rather thanthe *input* tensor we have. As a result, our model will work with anysize input.
model = nn.Sequential( nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(1), Lambda(lambda x: x.view(x.size(0), -1)), ) opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Let's try it out:
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Using your GPU---------------If you're lucky enough to have access to a CUDA-capable GPU (you canrent one for about $0.50/hour from most cloud providers) you canuse it to speed up your code. First check that your GPU is working inPytorch:
print(torch.cuda.is_available())
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
And then create a device object for it:
dev = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu")
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Let's update ``preprocess`` to move batches to the GPU:
def preprocess(x, y): return x.view(-1, 1, 28, 28).to(dev), y.to(dev) train_dl, valid_dl = get_data(train_ds, valid_ds, bs) train_dl = WrappedDataLoader(train_dl, preprocess) valid_dl = WrappedDataLoader(valid_dl, preprocess)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
Finally, we can move our model to the GPU.
model.to(dev) opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit
You should find it runs faster now:
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
_____no_output_____
MIT
notebook/pytorch/nn_tutorial.ipynb
mengwangk/myinvestor-toolkit