code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: lang101 # language: python # name: lang101 # --- # ## Introduction to spaCy # __Initializing spaCy__ import spacy # Load the language model for English # In order to do so we create a spacy object called "nlp" nlp = spacy.load("en_core_web_sm") # Now we have a spacy object type(nlp) # We can see that it is a spacy object # Now we can perform NLP tasks. We can use a spacy object to analyze text. # create a Doc object # We annotate a random string using the spacy object doc = nlp("Ferocious winter weather sweeping across large parts of the central and southern US has brought record-breaking cold temperatures, left millions without power and killed at least 21 people across multiple states.") type(doc) # It is a spacy object that comprises tokens # doc is a completely tokenized and annotated string, because the nlp() function has already been called on it - hence, the tokenization has already happened print(doc) # The doc has already been tokenized in the background. This is cool about spacy. # Now we can iterate over the string using spacy. # __Tokens__ # We can call many different methods/attributes on tokens (e.g. token.text is one function that can be called). SpaCy provides an overview of the different attributes/methods that can be called on a doc/token object: https://spacy.io/api/doc # We iterate over each token in the string for token in doc: print(token.text) # here we use the method .text # Now we can see that the string has been tokenized # Punctuations count as individual tokens for token in doc: print(token.text, token.lemma) # here we print both the token and the lemma # Now we get both the token and a number for each token. This is because spaCy converts every string into a number, because this makes opeartions make efficient. Each number is unique. for token in doc: print(token.text, token.lemma_) #_ means that we want the string of the lemma # Now instead of getting the number for each token, we get the lemma itself as a string. We can see what happens with each word when it is lemmatized. for token in doc: print(token.text, token.is_punct) # Here we use the .is_punct method. This is just another example of a method that can be called on a token object for token in doc: print(token.text, token.pos_, token.tag_) # here we use the .pos and .tag to get the part-of-speech tag and the tag for each token # ^<br> # UPOS = token.pos_ <br> # fine-grained tags = token.tag_
notebooks/session3_spaCy_au617836.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Anomalias magnéticas produzidas por fontes crustais com geometria simples: # # > #### Neste exercicio vamos calcular as componentes cartesianas do campo de Indução magnética produzida por 1 prisma vertical, cujas coordenadas dos vértices e a magnetização são totalmente conhecidas. Há ainda a consideração sobre a massa dessa fonte para que façamos o uso da relação de Poisson a fim de comparar o resultado obtido com o calculo analítico das referidas componentes. <br> # > #### Vamos simular um campo de Indução Regional e por fim obter a Anomalia de Campo Total produzida pelo prisma, que em aplicações práticas pode ser aludido a um modelo de dique vertical, ou até mesmo uma soleira. <br> # > #### Você vai perceber que os cálculos começam a ficar mais desafiadores do que para o caso da bolinha, e por este motivo, utilizar a Relação de Poisson pode ser uma ótima alternativa, uma vez que tratam-se de equações para as componentes cartesianas um pouco mais simples do que as equações envolvendo vetores e suas componentes. # Import das Bibliotecas: import numpy import sys import matplotlib.pyplot as plt a = sys.path.append('../modules/') # endereco das funcoes implementadas por voce! import prism, noise, plot_3D, auxiliars # ### Etapa 1: Definicão das coordenadas de Observação: # + nx = 100 # n de observacoes na direcao x ny = 100 # n de observacoes na direcao y size = (nx, ny) xmin = -5000.0 # metros xmax = +5000.0 # metros ymin = -5000.0 # metros ymax = +5000.0 # metros z = -100.0 # altura de voo, em metros x = numpy.linspace(xmin, xmax, nx, endpoint=True) y = numpy.linspace(ymin, ymax, ny, endpoint=True) delt = x[1] - x[0] print(delt) # fazendo o Mesh para gridar a regiao: X,Y = numpy.meshgrid(x,y) # - plt.figure(figsize=(8,8)) plt.plot(X,Y, '.r') plt.show() # ## Etapa 2: Simulação do campo Principal na região das observações: # + I = -30.0 # inclinacao do campo principal em graus D = -23.0 # declinacao do campo principal em graus Fi = 40000.0 # Intensidade do campo principal (nT) # Campo principal variando com as posicao F(X,Y): F = Fi + 0.013*X + 0.08*Y # nT # + # Calculo das componentes cartesianas do versor F: # conversao de graus para radiandos: incl = numpy.deg2rad(I) decl = numpy.deg2rad(D) #----------------------------------- Fx = numpy.cos(incl)*numpy.cos(decl) Fy = numpy.cos(incl)*numpy.sin(decl) Fz = numpy.sin(incl) # - Fx,Fy,Fz # ## Etapa 3: Definição das propriedades da fonte crustal (prisma vertical): # # coordenadas dos vertices (corners) do prisma, em metros: x1,x2 = (-500.0, 500.0) y1,y2 = (-500.0, 500.0) z1,z2 = (2000.0,2500.0) # z eh positivo para baixo! plt.figure(figsize=(12,6)) plt.plot(X,Y, '.r') xs = [x1, x1, x2, x2, x1] ys = [y1, y2, y2, y1, y1] plt.plot(xs,ys,'k-', label='contornos da fonte') plt.legend() plt.show() # + # Propriedades magneticas da fonte crustal: inc = I # magnetizacao puramente induzida dec = -10.0 Mi = 10.0 # intensidade da magnetizacao em A/m # conversao de graus para radianos: inc_rad = numpy.deg2rad(inc) dec_rad = numpy.deg2rad(dec) # versor magnetizacao da fonte crustal modelada: mx = numpy.cos(inc_rad)*numpy.cos(dec_rad) my = numpy.cos(inc_rad)*numpy.sin(dec_rad) mz = numpy.sin(inc_rad) # - # componentes cartesianas do versor magnetizacao: mx,my,mz # guardando na lista: fonte_crustal_mag = [x1,x2,y1,y2,z1,z2,Mi] # calculando as componentes cartesianas do campo B (caixa preta!): bx = prism.prism_bx(X, Y, z, fonte_crustal_mag, I,D, inc, dec) by = prism.prism_by(X, Y, z, fonte_crustal_mag, I,D, inc, dec) bz = prism.prism_bz(X, Y, z, fonte_crustal_mag, I,D, inc, dec) # + # Ploting all results plt.close('all') plt.figure(figsize=(19,7)) #****************************************************** plt.subplot(1,3,1) plt.contourf(Y, X, bx, 20, cmap = plt.cm.RdBu_r) plt.title('Bx (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** plt.subplot(1,3,2) plt.contourf(Y, X, by, 20, cmap = plt.cm.RdBu_r) plt.title('By (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** plt.subplot(1,3,3) plt.contourf(Y, X, bz, 20, cmap = plt.cm.RdBu_r) plt.title('Bz (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() plt.show() # + # Etapa 4: Calculo do Campo total: CT = numpy.sqrt((F*Fx + bx)**2 + (F*Fy + by)**2 + (F*Fz + bz)**2) # Anomalia de Campo Total: ACT = CT - F #----------------------------------------------------------------- #t= len(ACT) #mi = 0.0 #sigma = 4.5 #ACT = noise.noise_gaussiana(t, mi, sigma, ACT) # - # graficos plt.close('all') plt.figure(figsize=(17,6)) #****************************************************** plt.subplot(1,2,1) plt.contourf(Y, X, CT, 20, cmap = plt.cm.RdBu_r) plt.title('Campo Total (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** plt.subplot(1,2,2) plt.contourf(Y, X, ACT, 20, cmap = plt.cm.RdBu_r) plt.title('Anomalia de Campo Total (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** #plt.savefig('teste_100_40000_D10.png', format='png') plt.show() # + t= len(ACT) mi = 0.0 sigma = 0.05 #ACTn = noise.noise_gaussiana(t, mi, sigma, ACT) ACTn = auxiliars.noise_normal_dist(ACT, mi, sigma) # graficos plt.close('all') plt.figure(figsize=(17,6)) #****************************************************** plt.subplot(1,2,1) plt.contourf(Y, X, ACT, 20, cmap = plt.cm.RdBu_r) plt.title('Anomalia de Campo Total (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** plt.subplot(1,2,2) plt.contourf(Y, X, ACTn, 20, cmap = plt.cm.RdBu_r) plt.title('Anomalia de Campo Total adiconando noise (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** #plt.savefig('teste_100_40000_D10.png', format='png') plt.show() # + azim = 0.0 tfa = prism.prism_tf(X, Y,z, fonte_crustal_mag, I,D, inc, dec, azim) # graficos plt.close('all') plt.figure(figsize=(17,6)) #****************************************************** plt.subplot(1,2,1) plt.contourf(Y, X, ACT, 20, cmap = plt.cm.RdBu_r) plt.title('Anomalia de Campo Total (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** plt.subplot(1,2,2) plt.contourf(Y, X, tfa, 20, cmap = plt.cm.RdBu_r) plt.title('Anomalia de Campo Total formula direta (nT)', fontsize = 12) plt.xlabel('East (m)', fontsize = 10) plt.ylabel('North (m)', fontsize = 10) plt.plot(xs,ys,'k-') plt.colorbar() #****************************************************** #plt.savefig('teste_100_40000_D10.png', format='png') plt.show() # - # # <center> ----- Fim -------- # # Graficos 3D: # Pacotes especificos paara graficos 3D: from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection # + # Plot the main 3D source figure = plt.figure(figsize=(10,10)) ax = figure.gca(projection = '3d') #ax.set_title('Vertical dike model', size = 18) x1, x2, y1, y2, z1, z2 = fonte_crustal_mag[:6] v = numpy.array([[x1, y1, z2], [x1, y2, z2], [x2, y2, z2], [x2, y1, z2], [x1, y1, z1], [x1, y2, z1], [x2, y2, z1], [x2, y1, z1]]) vert = [[v[0],v[1],v[2],v[3]], [v[0],v[1],v[5],v[4]], [v[1],v[2],v[6],v[5]], [v[2],v[3],v[7],v[6]], [v[3],v[0],v[4],v[7]], [v[4],v[5],v[6],v[7]]] fig01 = Poly3DCollection(vert, alpha = 0.75, linewidths = 0.75, edgecolors = 'k') fig01.set_facecolor('chocolate') ax.add_collection3d(fig01) # Define the scale of the projection x_scale = 1.2 y_scale = 1.2 z_scale = 1. scale=numpy.diag([x_scale, y_scale, z_scale, 1.0]) scale=scale*(1.0/scale.max()) scale[3,3] = 1. def short_proj(): return numpy.dot(Axes3D.get_proj(ax), scale) ax.get_proj = short_proj # Labels ax.set_xlabel('North (m)', size = 25, labelpad = 30) ax.set_ylabel('East (m)', size = 25, labelpad = 30) ax.set_zlabel('Depth (m)', size = 25, labelpad = 30) ax.set_xlim(x.min(), x.max()) ax.set_ylim(y.min(), y.max()) ax.set_zlim(0., z2) ax.set_xticks(numpy.arange(x.min(), x.max(), 2500)) ax.set_yticks(numpy.linspace(y.min(), y.max(), 5)) ax.set_zticks(numpy.linspace(0., z2, 6)) ax.tick_params(labelsize = 20, pad = 10) # Visualization angle ax.view_init(210, 135) plt.tight_layout(True) #plt.savefig('figs/dikemodel-r1.png', dpi = 300, bbox_inches = 'tight', transparent = True) #plt.savefig('figs/dikemodel-r1.pdf', dpi = 300, bbox_inches = 'tight', transparent = True) plt.show() # - # + # FIM
codes/tests/.ipynb_checkpoints/test_Mag_prisma-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Model Checking # # After running an MCMC simulation, `sample` returns a `MultiTrace` object containing the samples for all the stochastic and deterministic random variables. The final step in Bayesian computation is model checking, in order to ensure that inferences derived from your sample are valid. There are two components to model checking: # # 1. Convergence diagnostics # 2. Goodness of fit # # Convergence diagnostics are intended to detect lack of convergence in the Markov chain Monte Carlo sample; it is used to ensure that you have not halted your sampling too early. However, a converged model is not guaranteed to be a good model. The second component of model checking, goodness of fit, is used to check the internal validity of the model, by comparing predictions from the model to the data used to fit the model. # ## Convergence Diagnostics # # Valid inferences from sequences of MCMC samples are based on the # assumption that the samples are derived from the true posterior # distribution of interest. Theory guarantees this condition as the number # of iterations approaches infinity. It is important, therefore, to # determine the **minimum number of samples** required to ensure a reasonable # approximation to the target posterior density. Unfortunately, no # universal threshold exists across all problems, so convergence must be # assessed independently each time MCMC estimation is performed. The # procedures for verifying convergence are collectively known as # *convergence diagnostics*. # # One approach to analyzing convergence is **analytical**, whereby the # variance of the sample at different sections of the chain are compared # to that of the limiting distribution. These methods use distance metrics # to analyze convergence, or place theoretical bounds on the sample # variance, and though they are promising, they are generally difficult to # use and are not prominent in the MCMC literature. More common is a # **statistical** approach to assessing convergence. With this approach, # rather than considering the properties of the theoretical target # distribution, only the statistical properties of the observed chain are # analyzed. Reliance on the sample alone restricts such convergence # criteria to **heuristics**. As a result, convergence cannot be guaranteed. # Although evidence for lack of convergence using statistical convergence # diagnostics will correctly imply lack of convergence in the chain, the # absence of such evidence will not *guarantee* convergence in the chain. # Nevertheless, negative results for one or more criteria may provide some # measure of assurance to users that their sample will provide valid # inferences. # # For most simple models, convergence will occur quickly, sometimes within # a the first several hundred iterations, after which all remaining # samples of the chain may be used to calculate posterior quantities. For # more complex models, convergence requires a significantly longer burn-in # period; sometimes orders of magnitude more samples are needed. # Frequently, lack of convergence will be caused by **poor mixing**. # Recall that *mixing* refers to the degree to which the Markov # chain explores the support of the posterior distribution. Poor mixing # may stem from inappropriate proposals (if one is using the # Metropolis-Hastings sampler) or from attempting to estimate models with # highly correlated variables. # %matplotlib inline import numpy as np import seaborn as sns; sns.set_context('notebook') import warnings warnings.filterwarnings("ignore", category=UserWarning) # + from pymc3 import Normal, Binomial, sample, Model from pymc3.math import invlogit # Samples for each dose level n = 5 * np.ones(4, dtype=int) # Log-dose dose = np.array([-.86, -.3, -.05, .73]) deaths = np.array([0, 1, 3, 5]) with Model() as bioassay_model: # Logit-linear model parameters alpha = Normal('alpha', 0, sd=100) beta = Normal('beta', 0, sd=100) # Calculate probabilities of death theta = invlogit(alpha + beta * dose) # Data likelihood obs_deaths = Binomial('obs_deaths', n=n, p=theta, observed=deaths) # - with bioassay_model: bioassay_trace = sample(1000) # + from pymc3 import traceplot traceplot(bioassay_trace, varnames=['alpha']) # - # ### Informal Methods # # The most straightforward approach for assessing convergence is based on # simply **plotting and inspecting traces and histograms** of the observed # MCMC sample. If the trace of values for each of the stochastics exhibits # asymptotic behavior over the last $m$ iterations, this may be # satisfactory evidence for convergence. traceplot(bioassay_trace, varnames=['beta']) # A similar approach involves # plotting a histogram for every set of $k$ iterations (perhaps 50-100) # beyond some burn in threshold $n$; if the histograms are not visibly # different among the sample intervals, this may be considered some evidence for # convergence. Note that such diagnostics should be carried out for each # stochastic estimated by the MCMC algorithm, because convergent behavior # by one variable does not imply evidence for convergence for other # variables in the analysis. # + import matplotlib.pyplot as plt beta_trace = bioassay_trace['beta'] fig, axes = plt.subplots(2, 5, figsize=(14,6)) axes = axes.ravel() for i in range(10): axes[i].hist(beta_trace[100*i:100*(i+1)]) plt.tight_layout() # - # An extension of this approach can be taken # when multiple parallel chains are run, rather than just a single, long # chain. In this case, the final values of $c$ chains run for $n$ # iterations are plotted in a histogram; just as above, this is repeated # every $k$ iterations thereafter, and the histograms of the endpoints are # plotted again and compared to the previous histogram. This is repeated # until consecutive histograms are indistinguishable. # Another *ad hoc* method for detecting lack of convergence is to examine # the traces of several MCMC chains initialized with different starting # values. Overlaying these traces on the same set of axes should (if # convergence has occurred) show each chain tending toward the same # equilibrium value, with approximately the same variance. Recall that the # tendency for some Markov chains to converge to the true (unknown) value # from diverse initial values is called *ergodicity*. This property is # guaranteed by the reversible chains constructed using MCMC, and should # be observable using this technique. Again, however, this approach is # only a heuristic method, and cannot always detect lack of convergence, # even though chains may appear ergodic. with bioassay_model: bioassay_trace = sample(1000, chains=2, start=[{'alpha':0.5}, {'alpha':5}]) bioassay_trace.get_values('alpha', chains=0)[0] plt.plot(bioassay_trace.get_values('alpha', chains=0)[:200], 'r--') plt.plot(bioassay_trace.get_values('alpha', chains=1)[:200], 'k--') # A principal reason that evidence from informal techniques cannot # guarantee convergence is a phenomenon called ***metastability***. Chains may # appear to have converged to the true equilibrium value, displaying # excellent qualities by any of the methods described above. However, # after some period of stability around this value, the chain may suddenly # move to another region of the parameter space. This period # of metastability can sometimes be very long, and therefore escape # detection by these convergence diagnostics. Unfortunately, there is no # statistical technique available for detecting metastability. # # ### Formal Methods # # Along with the *ad hoc* techniques described above, a number of more # formal methods exist which are prevalent in the literature. These are # considered more formal because they are based on existing statistical # methods, such as time series analysis. # # PyMC currently includes three formal convergence diagnostic methods. The # first, proposed by [Geweke (1992)](http://projecteuclid.org/DPubS?service=UI&version=1.0&verb=Display&handle=euclid.ss/1177011446), is a time-series approach that # compares the mean and variance of segments from the beginning and end of # a single chain. # # $$z = \frac{\bar{\theta}_a - \bar{\theta}_b}{\sqrt{S_a(0) + S_b(0)}}$$ # # where $a$ is the early interval and $b$ the late interval, and $S_i(0)$ is the spectral density estimate at zero frequency for chain segment $i$. If the # z-scores (theoretically distributed as standard normal variates) of # these two segments are similar, it can provide evidence for convergence. # PyMC calculates z-scores of the difference between various initial # segments along the chain, and the last 50% of the remaining chain. If # the chain has converged, the majority of points should fall within 2 # standard deviations of zero. # # In PyMC, diagnostic z-scores can be obtained by calling the `geweke` function. It # accepts either (1) a single trace, (2) a Node or Stochastic object, or # (4) an entire Model object: # + from pymc3 import geweke with bioassay_model: tr = sample(2000, tune=1000) z = geweke(tr, intervals=15) # - plt.scatter(*z[0]['alpha'].T) plt.hlines([-1,1], 0, 1000, linestyles='dotted') plt.xlim(0, 1000) # The arguments expected are the following: # # - `x` : The trace of a variable. # - `first` : The fraction of series at the beginning of the trace. # - `last` : The fraction of series at the end to be compared with the section at the beginning. # - `intervals` : The number of segments. # # Plotting the output displays the scores in series, making it is easy to # see departures from the standard normal assumption. # A second convergence diagnostic provided by PyMC is the Gelman-Rubin # statistic [Gelman and Rubin (1992)](http://projecteuclid.org/DPubS?service=UI&version=1.0&verb=Display&handle=euclid.ss/1177011136). This diagnostic uses multiple chains to # check for lack of convergence, and is based on the notion that if # multiple chains have converged, by definition they should appear very # similar to one another; if not, one or more of the chains has failed to # converge. # # The Gelman-Rubin diagnostic uses an analysis of variance approach to # assessing convergence. That is, it calculates both the between-chain # varaince (B) and within-chain varaince (W), and assesses whether they # are different enough to worry about convergence. Assuming $m$ chains, # each of length $n$, quantities are calculated by: # # $$\begin{align}B &= \frac{n}{m-1} \sum_{j=1}^m (\bar{\theta}_{.j} - \bar{\theta}_{..})^2 \\ # W &= \frac{1}{m} \sum_{j=1}^m \left[ \frac{1}{n-1} \sum_{i=1}^n (\theta_{ij} - \bar{\theta}_{.j})^2 \right] # \end{align}$$ # # for each scalar estimand $\theta$. Using these values, an estimate of # the marginal posterior variance of $\theta$ can be calculated: # # $$\hat{\text{Var}}(\theta | y) = \frac{n-1}{n} W + \frac{1}{n} B$$ # # Assuming $\theta$ was initialized to arbitrary starting points in each # chain, this quantity will overestimate the true marginal posterior # variance. At the same time, $W$ will tend to underestimate the # within-chain variance early in the sampling run. However, in the limit # as $n \rightarrow # \infty$, both quantities will converge to the true variance of $\theta$. # In light of this, the Gelman-Rubin statistic monitors convergence using # the ratio: # # $$\hat{R} = \sqrt{\frac{\hat{\text{Var}}(\theta | y)}{W}}$$ # # This is called the potential scale reduction, since it is an estimate of # the potential reduction in the scale of $\theta$ as the number of # simulations tends to infinity. In practice, we look for values of # $\hat{R}$ close to one (say, less than 1.1) to be confident that a # particular estimand has converged. In PyMC, the function # `gelman_rubin` will calculate $\hat{R}$ for each stochastic node in # the passed model: # + from pymc3 import gelman_rubin gelman_rubin(bioassay_trace) # - # For the best results, each chain should be initialized to highly # dispersed starting values for each stochastic node. # # By default, when calling the `forestplot` function using nodes with # multiple chains, the $\hat{R}$ values will be plotted alongside the # posterior intervals. # + from pymc3 import forestplot forestplot(bioassay_trace) # - # ## Autocorrelation # # In general, samples drawn from MCMC algorithms will be autocorrelated. This is not a big deal, other than the fact that autocorrelated chains may require longer sampling in order to adequately characterize posterior quantities of interest. The calculation of autocorrelation is performed for each lag $i=1,2,\ldots,k$ (the correlation at lag 0 is, of course, 1) by: # # $$\hat{\rho}_i = 1 - \frac{V_i}{2\hat{\text{Var}}(\theta | y)}$$ # # where $\hat{\text{Var}}(\theta | y)$ is the same estimated variance as calculated for the Gelman-Rubin statistic, and $V_i$ is the variogram at lag $i$ for $\theta$: # # $$\text{V}_i = \frac{1}{m(n-i)}\sum_{j=1}^m \sum_{k=i+1}^n (\theta_{jk} - \theta_{j(k-i)})^2$$ # # This autocorrelation can be visualized using the `autocorrplot` function in PyMC3: # + from pymc3 import autocorrplot autocorrplot(tr); # - # ### Effective sample size # # The effective sample size is estimated using the partial sum: # # $$\hat{n}_{eff} = \frac{mn}{1 + 2\sum_{i=1}^T \hat{\rho}_i}$$ # # where $T$ is the first odd integer such that $\hat{\rho}_{T+1} + \hat{\rho}_{T+2}$ is negative. # # The issue here is related to the fact that we are **estimating** the effective sample size from the fit output. Values of $n_{eff} / n_{iter} < 0.001$ indicate a biased estimator, resulting in an overestimate of the true effective sample size. # + from pymc3 import effective_n effective_n(bioassay_trace) # - # Both low $n_{eff}$ and high $\hat{R}$ indicate **poor mixing**. # # It is tempting to want to **thin** the chain to eliminate the autocorrelation (*e.g.* taking every 20th sample from the traces above), but this is a waste of time. Since thinning deliberately throws out the majority of the samples, no efficiency is gained; you ultimately require more samples to achive a particular desired sample size. # ## Diagnostics for Gradient-based Samplers # # Hamiltonian Monte Carlo is a powerful and efficient MCMC sampler when set up appropriately. However, this typically requires carefull tuning of the sampler parameters, such as tree depth, leapfrog step size and target acceptance rate. Fortunately, the NUTS algorithm takes care of some of this for us. Nevertheless, tuning must be carefully monitored for failures that frequently arise. This is particularly the case when fitting challenging models, such as those with high curvature or heavy tails. # # Fortunately, however, gradient-based sampling provides the ability to diagnose these pathologies. PyMC makes several diagnostic statistics available as attributes of the `MultiTrace` object returned by the `sample` function. bioassay_trace.stat_names # - `mean_tree_accept`: The mean acceptance probability for the tree that generated this sample. The mean of these values across all samples but the burn-in should be approximately `target_accept` (the default for this is 0.8). # - `diverging`: Whether the trajectory for this sample diverged. If there are many diverging samples, this usually indicates that a region of the posterior has high curvature. Reparametrization can often help, but you can also try to increase `target_accept` to something like 0.9 or 0.95. # - `energy`: The energy at the point in phase-space where the sample was accepted. This can be used to identify posteriors with problematically long tails. See below for an example. # - `energy_error`: The difference in energy between the start and the end of the trajectory. For a perfect integrator this would always be zero. # - `max_energy_error`: The maximum difference in energy along the whole trajectory. # - `depth`: The depth of the tree that was used to generate this sample # - `tree_size`: The number of leafs of the sampling tree, when the sample was accepted. This is usually a bit less than $2 ^ \text{depth}$. If the tree size is large, the sampler is using a lot of leapfrog steps to find the next sample. This can for example happen if there are strong correlations in the posterior, if the posterior has long tails, if there are regions of high curvature ("funnels"), or if the variance estimates in the mass matrix are inaccurate. Reparametrisation of the model or estimating the posterior variances from past samples might help. # - `tune`: This is `True`, if step size adaptation was turned on when this sample was generated. # - `step_size`: The step size used for this sample. # - `step_size_bar`: The current best known step-size. After the tuning samples, the step size is set to this value. This should converge during tuning. # If the name of the statistic does not clash with the name of one of the variables, we can use indexing to get the values. The values for the chains will be concatenated. # # We can see that the step sizes converged after the 2000 tuning samples for both chains to about the same value. The first 3000 values are from chain 1, the second from chain 2. with bioassay_model: trace = sample(1000, tune=2000, init=None, chains=2, discard_tuned_samples=False) plt.plot(trace['step_size_bar']) # The `get_sampler_stats` method provides more control over which values should be returned, and it also works if the name of the statistic is the same as the name of one of the variables. We can use the `chains` option, to control values from which chain should be returned, or we can set `combine=False` to get the values for the individual chains: # The `NUTS` step method has a maximum tree depth parameter so that infinite loops (which can occur for non-identified models) are avoided. When the maximum tree depth is reached (the default value is 10), the trajectory is stopped. However complex (but identifiable) models can saturate this threshold, which reduces sampling efficiency. # # The `MultiTrace` stores the tree depth for each iteration, so inspecting these traces can reveal saturation if it is occurring. sizes1, sizes2 = trace.get_sampler_stats('depth', combine=False) fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True) ax1.plot(sizes1) ax2.plot(sizes2) # We can also check the acceptance for the trees that generated this sample. The mean of these values across all samples (except the tuning stage) is expected to be the same as `target_accept`, which is 0.8 by default. accept = trace.get_sampler_stats('mean_tree_accept', burn=1000) sns.distplot(accept, kde=False) # ### Divergent transitions # # Recall that simulating Hamiltonian dynamics via a symplectic integrator uses a discrete approximation of a continuous function. This is only a reasonable approximation when the step sizes of the integrator are suitably small. A divergent transition may indicate that the approximation is poor. # # If there are too many divergent transitions, then samples are not being drawn from the full posterior, and inferences based on the resulting sample will be biased # # If there are diverging transitions, PyMC3 will issue warnings indicating how many were discovered. We can obtain the indices of them from the trace. trace['diverging'].nonzero() # ### Bayesian Fraction of Missing Information # # The Bayesian fraction of missing information (BFMI) is a measure of how hard it is to # sample level sets of the posterior at each iteration. Specifically, it quantifies how well momentum resampling matches the marginal energy distribution. A small value indicates that the adaptation phase of the sampler was unsuccessful, and invoking the central limit theorem may not be valid. It indicates whether the sampler is able to adequately explore the posterior distribution. # # Though there is not an established rule of thumb for an adequate threshold, values close to one are optimal. Reparameterizing the model is sometimes helpful for improving this statistic. from pymc3 import bfmi bfmi(trace) # Another way of diagnosting this phenomenon is by comparing the overall distribution of # energy levels with the *change* of energy between successive samples. Ideally, they should be very similar. # # If the distribution of energy transitions is narrow relative to the marginal energy distribution, this is a sign of inefficient sampling, as many transitions are required to completely explore the posterior. On the other hand, if the energy transition distribution is similar to that of the marginal energy, this is evidence of efficient sampling, resulting in near-independent samples from the posterior. energy = trace['energy'] energy_diff = np.diff(energy) sns.distplot(energy - energy.mean(), label='energy') sns.distplot(energy_diff, label='energy diff') plt.legend() # If the overall distribution of energy levels has longer tails, the efficiency of the sampler will deteriorate quickly. # ## Goodness of Fit # # Checking for model convergence is only the first step in the evaluation # of MCMC model outputs. It is possible for an entirely unsuitable model # to converge, so additional steps are needed to ensure that the estimated # model adequately fits the data. One intuitive way of evaluating model # fit is to compare model predictions with the observations used to fit # the model. In other words, the fitted model can be used to simulate # data, and the distribution of the simulated data should resemble the # distribution of the actual data. # # Fortunately, simulating data from the model is a natural component of # the Bayesian modelling framework. Recall, from the discussion on # imputation of missing data, the posterior predictive distribution: # # $$p(\tilde{y}|y) = \int p(\tilde{y}|\theta) f(\theta|y) d\theta$$ # # Here, $\tilde{y}$ represents some hypothetical new data that would be # expected, taking into account the posterior uncertainty in the model # parameters. # # Sampling from the posterior predictive distribution is easy # in PyMC. The `sample_ppc` function draws posterior predictive checks from all of the data likelhioods. Consider the `gelman_bioassay` example, # where deaths are modeled as a binomial random variable for which # the probability of death is a logit-linear function of the dose of a # particular drug. # The posterior predictive distribution of deaths uses the same functional # form as the data likelihood, in this case a binomial stochastic. Here is # the corresponding sample from the posterior predictive distribution (we typically need very few samples relative to the MCMC sample): # + from pymc3 import sample_ppc with bioassay_model: deaths_sim = sample_ppc(bioassay_trace, samples=500) # - # The degree to which simulated data correspond to observations can be evaluated in at least two ways. First, these quantities can simply be compared visually. This allows for a qualitative comparison of model-based replicates and observations. If there is poor fit, the true value of the data may appear in the tails of the histogram of replicated data, while a good fit will tend to show the true data in high-probability regions of the posterior predictive distribution. The Matplot package in PyMC provides an easy way of producing such plots, via the `gof_plot` function. # + fig, axes = plt.subplots(1, 4, figsize=(14, 4)) for obs, sim, ax in zip(deaths, deaths_sim['obs_deaths'].T, axes): ax.hist(sim, bins=range(7)) ax.plot(obs+0.5, 1, 'ro') # - # ## Exercise: Meta-analysis of beta blocker effectiveness # # Carlin (1992) considers a Bayesian approach to meta-analysis, and includes the following examples of 22 trials of beta-blockers to prevent mortality after myocardial infarction. # # In a random effects meta-analysis we assume the true effect (on a log-odds scale) $d_i$ in a trial $i$ # is drawn from some population distribution. Let $r^C_i$ denote number of events in the control group in trial $i$, # and $r^T_i$ denote events under active treatment in trial $i$. Our model is: # # $$\begin{aligned} # r^C_i &\sim \text{Binomial}\left(p^C_i, n^C_i\right) \\ # r^T_i &\sim \text{Binomial}\left(p^T_i, n^T_i\right) \\ # \text{logit}\left(p^C_i\right) &= \mu_i \\ # \text{logit}\left(p^T_i\right) &= \mu_i + \delta_i \\ # \delta_i &\sim \text{Normal}(d, t) \\ # \mu_i &\sim \text{Normal}(m, s) # \end{aligned}$$ # # We want to make inferences about the population effect $d$, and the predictive distribution for the effect $\delta_{\text{new}}$ in a new trial. Build a model to estimate these quantities in PyMC, and (1) use convergence diagnostics to check for convergence and (2) use posterior predictive checks to assess goodness-of-fit. # # Here are the data: r_t_obs = [3, 7, 5, 102, 28, 4, 98, 60, 25, 138, 64, 45, 9, 57, 25, 33, 28, 8, 6, 32, 27, 22] n_t_obs = [38, 114, 69, 1533, 355, 59, 945, 632, 278,1916, 873, 263, 291, 858, 154, 207, 251, 151, 174, 209, 391, 680] r_c_obs = [3, 14, 11, 127, 27, 6, 152, 48, 37, 188, 52, 47, 16, 45, 31, 38, 12, 6, 3, 40, 43, 39] n_c_obs = [39, 116, 93, 1520, 365, 52, 939, 471, 282, 1921, 583, 266, 293, 883, 147, 213, 122, 154, 134, 218, 364, 674] N = len(n_c_obs) with Model() as meta_analysis: δ = Normal('δ', 0, sd = 10) μ = Normal('μ', -1, sd=10) p_control = invlogit(μ) p_treat = invlogit(μ + δ) control_obs = Binomial('control_obs', n=n_c_obs = p=p_control, observed=r_c_obs) treat_obs = Binomial('control_obs', n=n_t_obs = p=p_treat, observed=r_t_obs) with meta_analysis: tr = sample(1000, tune=2000, cores=2) traceplot(tr) from pymc3 import plot_posterior plot_posterior(tr) forestplot(tr) pred_data = sample_ppc(tr, samples=500, model=meta_analysis) pred_control_obs = pred_data['control_obs'] from scipy.stats import percentileofscore percentileofscore(pred_control_obs[:, 0], r_c_obs[0]) # --- # ## References # # - <NAME>., & <NAME>. (1992). Inference from iterative simulation using multiple sequences. Statistical Science. A Review Journal of the Institute of Mathematical Statistics, 457–472. # - <NAME>., <NAME>., & <NAME>. (1992). Evaluating the accuracy of sampling-based approaches to the calculation of posterior moments. In Bayesian Statistics 4. # - <NAME>., <NAME>., & <NAME>. (2000). Bayesian Animal Survival Estimation. Statistical Science. A Review Journal of the Institute of Mathematical Statistics, 15(4), 357–376. doi:10.1214/ss/1177010123 # - <NAME>., <NAME>., & <NAME>. (1996). Posterior predicitive assessment of model fitness via realized discrepencies with discussion. Statistica Sinica, 6, 733–807. # - <NAME>. (2017). A Conceptual Introduction to Hamiltonian Monte Carlo. arXiv.org.
notebooks/Day4_4-Model-Checking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # In this Lab you will load a customer dataset, fit the data, and use K-Nearest Neighbors to predict a data point. But what is **K-Nearest Neighbors**? # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **K-Nearest Neighbors** is an algorithm for supervised learning. Where the data is 'trained' with data points corresponding to their classification. Once a point is to be predicted, it takes into account the 'K' nearest points to it to determine it's classification. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Here's an visualization of the K-Nearest Neighbors algorithm. # # <img src = "https://ibm.box.com/shared/static/mgkn92xck0z05v7yjq8pqziukxvc2461.png"> # + [markdown] button=false new_sheet=false run_control={"read_only": false} # In this case, we have data points of Class A and B. We want to predict what the star (test data point) is. If we consider a k value of 3 (3 nearest data points) we will obtain a prediction of Class B. Yet if we consider a k value of 6, we will obtain a prediction of Class A. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # In this sense, it is important to consider the value of k. But hopefully from this diagram, you should get a sense of what the K-Nearest Neighbors algorithm is. It considers the 'K' Nearest Neighbors (points) when it predicts the classification of the test point. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets load requiered libraries # + button=false new_sheet=false run_control={"read_only": false} import itertools import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter import pandas as pd import numpy as np import matplotlib.ticker as ticker from sklearn import preprocessing # %matplotlib inline # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### About dataset # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Imagine a telecommunications provider has segmented its customer base by service usage patterns, categorizing the customers into four groups. If demographic data can be used to predict group membership, the company can customize offers for individual prospective customers. It is a classification problem. That is, given the dataset, with predefined labels, we need to build a model to be used to predict class of a new or unknown case. # # The example focuses on using demographic data, such as region, age, and marital, to predict usage patterns. # # The target field, called __custcat__, has four possible values that correspond to the four customer groups, as follows: # 1- Basic Service # 2- E-Service # 3- Plus Service # 4- Total Service # # Our objective is to build a classifier, to predict the class of unknown cases. We will use a specific type of classification called K nearest neighbour. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load Data From CSV File # + button=false new_sheet=false run_control={"read_only": false} df = pd.read_csv('teleCust1000t.csv') df.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Data Visualization and Anylisis # # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Let’s see how many of each class is in our data set # + button=false new_sheet=false run_control={"read_only": false} df['custcat'].value_counts() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### 281 Plus Service, 266 Basic-service, 236 Total Service, and 217 E-Service customers # # - # You can easily explore your data using visualization techniques: df.hist(column='income', bins=50) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Feature set # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets defind feature sets, X: # - df.columns # To use scikit-learn library, we have to convert the Pandas data frame to a Numpy array: # + button=false new_sheet=false run_control={"read_only": false} X = df[['region', 'tenure','age', 'marital', 'address', 'income', 'ed', 'employ','retire', 'gender', 'reside']] .values #.astype(float) X[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # What are our lables? # + button=false new_sheet=false run_control={"read_only": false} y = df['custcat'].values y[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Normalize Data # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Data Standardization give data zero mean and unit variance, it is good practice, especially for algorithms such as KNN which is based on distance of cases: # + button=false new_sheet=false run_control={"read_only": false} X = preprocessing.StandardScaler().fit(X).transform(X.astype(float)) X[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Train Test Split # Out of Sample Accuracy is the percentage of correct predictions that the model makes on data that that the model has NOT been trained on. Doing a train and test on the same dataset will most likely have low out-of-sample accuracy, due to the likelihood of being over-fit. # # It is important that our models have a high, out-of-sample accuracy, because the purpose of any model, of course, is to make correct predictions on unknown data. So how can we improve out-of-sample accuracy? One way is to use an evaluation approach called Train/Test Split. # Train/Test Split involves splitting the dataset into training and testing sets respectively, which are mutually exclusive. After which, you train with the training set and test with the testing set. # # This will provide a more accurate evaluation on out-of-sample accuracy because the testing dataset is not part of the dataset that have been used to train the data. It is more realistic for real world problems. # # + button=false new_sheet=false run_control={"read_only": false} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Classification # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## K nearest neighbor (K-NN) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Import library # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Classifier implementing the k-nearest neighbors vote. # + button=false new_sheet=false run_control={"read_only": false} from sklearn.neighbors import KNeighborsClassifier # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Training # # Lets start the algorithm with k=4 for now: # + button=false new_sheet=false run_control={"read_only": false} k = 4 #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) neigh # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Predicting # we can use the model to predict the test set: # + button=false new_sheet=false run_control={"read_only": false} yhat = neigh.predict(X_test) yhat[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Accuracy evaluation # In multilabel classification, __accuracy classification score__ function computes subset accuracy. This function is equal to the jaccard_similarity_score function. Essentially, it calculates how match the actual labels and predicted labels are in the test set. # - from sklearn import metrics print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh.predict(X_train))) print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat)) # ## Practice # Can you build the model again, but this time with k=6? # + # write your code here k = 6 neigh6 = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) yhat6 = neigh6.predict(X_test) print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh6.predict(X_train))) print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat6)) # - # Double-click __here__ for the solution. # # <!-- Your answer is below: # # # k = 6 # neigh6 = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) # yhat6 = neigh6.predict(X_test) # print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh6.predict(X_train))) # print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat6)) # # --> # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### What about other K? # K in KNN, is the number of nearest neighbors to examine. It is supposed to be specified by User. So, how we choose right K? # The general solution is to reserve a part of your data for testing the accuracy of the model. Then chose k =1, use the training part for modeling, and calculate the accuracy of prediction using all samples in your test set. Repeat this process, increasing the k, and see which k is the best for your model. # # We can calucalte the accuracy of KNN for different Ks. # + button=false new_sheet=false run_control={"read_only": false} Ks = 10 mean_acc = np.zeros((Ks-1)) std_acc = np.zeros((Ks-1)) ConfustionMx = []; for n in range(1,Ks): #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train) yhat=neigh.predict(X_test) mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) mean_acc # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Plot model accuracy for Different number of Neighbors # + button=false new_sheet=false run_control={"read_only": false} plt.plot(range(1,Ks),mean_acc,'g') plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10) plt.legend(('Accuracy ', '+/- 3xstd')) plt.ylabel('Accuracy ') plt.xlabel('Number of Nabors (K)') plt.tight_layout() plt.show() # + button=false new_sheet=false run_control={"read_only": false} print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1)
.ipynb_checkpoints/K-Nearest-neighbors-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # Exhaustive Search Results # import os import re import sys import qlib import pprint import numpy as np import pandas as pd from pathlib import Path __file__ = os.path.dirname(os.path.realpath("__file__")) root_dir = (Path(__file__).parent / "..").resolve() lib_dir = (root_dir / "lib").resolve() print("The root path: {:}".format(root_dir)) print("The library path: {:}".format(lib_dir)) assert lib_dir.exists(), "{:} does not exist".format(lib_dir) if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) import qlib from qlib import config as qconfig from qlib.workflow import R qlib.init(provider_uri='~/.qlib/qlib_data/cn_data', region=qconfig.REG_CN) # - from utils.qlib_utils import QResult # + def filter_finished(recorders): returned_recorders = dict() not_finished = 0 for key, recorder in recorders.items(): if recorder.status == "FINISHED": returned_recorders[key] = recorder else: not_finished += 1 return returned_recorders, not_finished def query_info(save_dir, verbose, name_filter, key_map): if isinstance(save_dir, list): results = [] for x in save_dir: x = query_info(x, verbose, name_filter, key_map) results.extend(x) return results # Here, the save_dir must be a string R.set_uri(str(save_dir)) experiments = R.list_experiments() if verbose: print("There are {:} experiments.".format(len(experiments))) qresults = [] for idx, (key, experiment) in enumerate(experiments.items()): if experiment.id == "0": continue if name_filter is not None and re.fullmatch(name_filter, experiment.name) is None: continue recorders = experiment.list_recorders() recorders, not_finished = filter_finished(recorders) if verbose: print( "====>>>> {:02d}/{:02d}-th experiment {:9s} has {:02d}/{:02d} finished recorders.".format( idx + 1, len(experiments), experiment.name, len(recorders), len(recorders) + not_finished, ) ) result = QResult(experiment.name) for recorder_id, recorder in recorders.items(): result.update(recorder.list_metrics(), key_map) result.append_path( os.path.join(recorder.uri, recorder.experiment_id, recorder.id) ) if not len(result): print("There are no valid recorders for {:}".format(experiment)) continue else: if verbose: print( "There are {:} valid recorders for {:}".format( len(recorders), experiment.name ) ) qresults.append(result) return qresults # + paths = [root_dir / 'outputs' / 'qlib-baselines-csi300'] paths = [path.resolve() for path in paths] print(paths) key_map = dict() for xset in ("train", "valid", "test"): key_map["{:}-mean-IC".format(xset)] = "IC ({:})".format(xset) key_map["{:}-mean-ICIR".format(xset)] = "ICIR ({:})".format(xset) qresults = query_info(paths, False, 'TSF-.*', key_map) # - import matplotlib from matplotlib import cm matplotlib.use("agg") import matplotlib.pyplot as plt import matplotlib.ticker as ticker def vis_dropouts(qresults, basenames, name2suffix, save_path): save_dir = (save_path / '..').resolve() save_dir.mkdir(parents=True, exist_ok=True) print('There are {:} qlib-results'.format(len(qresults))) name2qresult = dict() for qresult in qresults: name2qresult[qresult.name] = qresult # sort architectures accuracies = [] for basename in basenames: qresult = name2qresult[basename + '-drop0_0'] accuracies.append(qresult['ICIR (train)']) sorted_basenames = sorted(basenames, key=lambda x: accuracies[basenames.index(x)]) dpi, width, height = 200, 4000, 2000 figsize = width / float(dpi), height / float(dpi) LabelSize, LegendFontsize = 22, 22 font_gap = 5 colors = ['k', 'r'] markers = ['*', 'o'] fig = plt.figure(figsize=figsize) def plot_ax(cur_ax, train_or_test): for idx, (legend, suffix) in enumerate(name2suffix.items()): x_values = list(range(len(sorted_basenames))) y_values = [] for i, name in enumerate(sorted_basenames): name = '{:}{:}'.format(name, suffix) qresult = name2qresult[name] if train_or_test: value = qresult['IC (train)'] else: value = qresult['IC (valid)'] y_values.append(value) cur_ax.plot(x_values, y_values, c=colors[idx]) cur_ax.scatter(x_values, y_values, marker=markers[idx], s=3, c=colors[idx], alpha=0.9, label=legend) cur_ax.set_yticks(np.arange(4, 11, 2)) cur_ax.set_xlabel("sorted architectures", fontsize=LabelSize) cur_ax.set_ylabel("{:} IC (%)".format('training' if train_or_test else 'validation'), fontsize=LabelSize) for tick in cur_ax.xaxis.get_major_ticks(): tick.label.set_fontsize(LabelSize - font_gap) for tick in cur_ax.yaxis.get_major_ticks(): tick.label.set_fontsize(LabelSize - font_gap) cur_ax.legend(loc=4, fontsize=LegendFontsize) ax = fig.add_subplot(1, 2, 1) plot_ax(ax, True) ax = fig.add_subplot(1, 2, 2) plot_ax(ax, False) # fig.tight_layout() # plt.subplots_adjust(wspace=0.05)#, hspace=0.4) fig.savefig(save_path, dpi=dpi, bbox_inches="tight", format="pdf") plt.close("all") # + # Visualization names = [qresult.name for qresult in qresults] base_names = set() for name in names: base_name = name.split('-drop')[0] base_names.add(base_name) print(base_names) # filter filtered_base_names = set() for base_name in base_names: if (base_name + '-drop0_0') in names and (base_name + '-drop0.1_0') in names: filtered_base_names.add(base_name) else: print('Cannot find all names for {:}'.format(base_name)) # print(filtered_base_names) home_dir = Path.home() desktop_dir = home_dir / 'Desktop' print('The Desktop is at: {:}'.format(desktop_dir)) vis_dropouts(qresults, list(filtered_base_names), {'No-dropout': '-drop0_0', 'Ratio=0.1' : '-drop0.1_0'}, desktop_dir / 'es_csi300_drop.pdf')
notebooks/TOT/ES-Model-Drop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In yt 3.0, we want to make it easier to access "raw" simulation data that a code writes directly to disk. The new unit system makes it much simpler to convert back and forth between physical coordinates and the unscaled "raw" coordinate system used internally in the simulation code. In some cases, this conversion involves transforming to comoving coordinates, so that is also covered here. # ### Code units # Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units: import yt ds = yt.load('Enzo_64/DD0043/data0043') # The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object. Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset: # + reg = ds.unit_registry for un in reg.keys(): if un.startswith('code_'): fmt_tup = (un, reg.lut[un][0], str(reg.lut[un][1])) print ("Unit name: {:<15}\nCGS conversion: {:<15}\nDimensions: {:<15}\n".format(*fmt_tup)) # - fmt_tup # Most of the time you will not have to deal with the unit registry. For example, the conversion factors to code units are stored as attributes of the dataset object: print ("Length unit: ", ds.length_unit) print ("Time unit: ", ds.time_unit) print ("Mass unit: ", ds.mass_unit) print ("Velocity unit: ", ds.velocity_unit) # Conversion factors will be supplied in CGS by default. We can also ask what the conversion factors are in code units. print ("Length unit: ", ds.length_unit.in_units('code_length')) print ("Time unit: ", ds.time_unit.in_units('code_time')) print ("Mass unit: ", ds.mass_unit.in_units('code_mass')) print ("Velocity unit: ", ds.velocity_unit.in_units('code_velocity')) # as expected, all the conversion factors are unity in code units. # We can also play with unit conversions on `ds.domain_width`. First, we see for enzo how code length units are defined relative to the domain width: ds.domain_width ds.domain_width.in_cgs() ds.domain_width.in_units('Mpccm/h') # ### Comoving units # This last example uses a cosmological unit. In english, I asked for the domain width in comoving megaparsecs, scaled as if the hubble constant were 100 km/s/Mpc. Although $h$ isn't really a unit, yt treats it as one for the purposes of the unit system. # # As an aside, <NAME>'s [research note](http://arxiv.org/abs/1308.4150) on the history, use, and interpretation of $h$ as it appears in the astronomical literature is pretty much required reading for anyone who has to deal with factors of $h$ every now and then. # # In yt, comoving length unit symbols are named following the pattern “(length symbol)cm”, i.e. `pccm` for comoving parsec or `mcm` for a comoving meter. A comoving length unit is different from the normal length unit by a factor of $(1+z)$: # + z = ds.current_redshift print (ds.quan(1, 'Mpc')/ds.quan(1, 'Mpccm')) print (1+z) # - # As we saw before, $h$ is treated like any other unit symbol. It has `dimensionless` units, just like a scalar: print (ds.quan(1, 'Mpc')/ds.quan(1, 'Mpc/h')) print (ds.hubble_constant) # These units can be used in readily used in plots and anywhere a length unit is appropriate in yt. slc = yt.SlicePlot(ds, 0, 'density', width=(128, 'Mpccm/h')) slc.set_figure_size(6) # ### The unit registry # When you create a `YTArray` without referring to a unit registry, yt uses the default unit registry, which does not include code units or comoving units. # + from yt import YTQuantity a = YTQuantity(3, 'cm') print (a.units.registry.keys()) # - # When a dataset is loaded, yt infers conversion factors from the internal simulation unit system to the CGS unit system. These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols. For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table: print (sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])) # Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation. # To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object: # # * `ds.arr()` # * `ds.quan()` # # These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units. For example: # + a = ds.quan(3, 'code_length') print (a) print (a.in_cgs()) print (a.in_units('Mpccm/h')) # - b = ds.arr([3, 4, 5], 'Mpccm/h') print (b) print (b.in_cgs()) # ### Overriding Code Unit Definitions # On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`: ds1 = yt.load("MHDSloshing/virgo_low_res.0054.vtk") print (ds1.length_unit) print (ds1.mass_unit) print (ds1.time_unit) sp1 = ds1.sphere("c",(0.1,"unitary")) print (sp1["density"]) # This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units: units_override = {"length_unit":(1.0,"Mpc"), "time_unit":(1.0,"Myr"), "mass_unit":(1.0e14,"Msun")} # `units_override` can take the following keys: # # * `length_unit` # * `time_unit` # * `mass_unit` # * `magnetic_unit` # * `temperature_unit` # # and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). ds2 = yt.load("MHDSloshing/virgo_low_res.0054.vtk", units_override=units_override) print (ds2.length_unit) print (ds2.mass_unit) print (ds2.time_unit) sp2 = ds2.sphere("c",(0.1,"unitary")) print (sp2["density"]) # This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason.
doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # metadata: # interpreter: # hash: 7981d81b3924aa8f8737ad1f515d2c1560025393c5e8fb9a51b730ca6a994a8e # name: Python 3.6.9 64-bit # --- # + # Python is a dynamic type language # which mean a variable does not hold a data type information # a variable is just a reference (memory address) to an object in memory # and the object in memory itself will store its data type # - a = 'Hello World!' type(a) a = 100 type(a) a = [1, 2, 4, 6] type(a) # + # As we see the same variable a can have differnet type at different time depending on what object it is refering to # - # when we assign a variable to an object, the variable is just pointing to the object # if we re-assign the variable, Python actually create a new object and refer the variable to that new object a = 10 id(a) a = 15 id(a) # We see a is now refering to a new object # Python does not change the value of the original object to the new value # even in this case a = a + 5 id(a) # + tags=[] # it's even more interesting when we declare a new variable with same value 10 a = 100 b = 100 print(id(a), id(b)) # we see that both of variables pointing to the same object # -
variable/dynamic_type.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: research # language: python # name: python3 # --- # # Week 6 - Make the kind of data you want to see in the world # # Welcome back! # # Last week we took a look at importing data into a Pands `DataFrame`, using Pathlib's `Path` objects. We also introduced you to the structure of a `DataFrame` through `class methods` like `.head()`, `.tail()` (which display the top and bottom of your `df` respectively) and `attributes` like the `shape` of the `DataFrame`, the `df.columns`, and the `df.index`. # # This week we're going to look at some options for cleaning a dataframe, such as `dropping` unneeded columns and `rows`, standardising or `renaming` columns, as well as some other cool things (like ) `slicing` your data, and 'fixing' inconsistent data. # # Before we get into that though, we need to import some data to work with so this is a good time to practice what you learned last week. In the cell below: # # 1. Import the packages pandas and pathlib (don't forget the 'as' bit; make sure you have selected the kernel related to your virutal environment) # 2. Create a Path() to either the .csv or .sav version of your data (Last week you should have resolved the issue with importing a .sav file, so feel free to choose either type of file) # 3. Import your data into a Dataframe by using the Path() you have created (n.b. assign the path to a variable, and remember that we often use the shorthand 'df' when doing so) # 4. Display the head and tail of your dataframe to ensure that it has been imported correctly. # # # + # Import the modules you need first # create the Path() to your file # use the pd.read_...() function to read in your data # display the head of your data #display the tail of your data # - # Well done, you now have data to work during this workshop. We also need some data to demonstrate examples. Run the cell below to generate our example data (``movies_df``). # # + # initialise some list objects that contains our data, this could also be a list of lists, or a dict of lists for example import numpy as np #importing a module to allow me to include 'missing data director = ['<NAME>', '', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Coen Brothers', '<NAME>', '<NAME>', 'Coen Brothers'] names = ['The Thing', 'Blade Runner 2049', 'Drive', 'The Thing', 'Whiplash', 'Arrival', 'No Country for Old Men', 'Shrek 2', 'Hot Fuzz', 'Fargo'] genre = ['Horror', 'Sci-Fi', 'Action', 'Horror', 'Drama', 'Sci-Fi', 'Drama', 'Comedy', 'Comedy', 'Dark Comedy'] year = ['1982', '2017', '2011', '2011', '2014', '2016', '2007', '2004', '2007', '1996'] imdb_score = [82, 80, 78, 62, np.nan, 79, 82, 73, 78, 81] rt_critics = [82, 88, 93, 34, np.nan, 94, 93, 89, 91, 94] lead = ['male', 'Male', 'm', 'Female', 'm', 'Male', 'fem', 'Orgre', 'Male', 'Male'] cry = ["No", "No", "No", "No", "No", "Yes", "No", "Yes", "No", "No"] movies_df = pd.DataFrame(# opening brackets but moving to new line for readability, note the uppercase D and F in the call list(zip(director, names, genre, year, imdb_score, rt_critics, lead, cry)), #first argument note the comma at the end of it, this is a couple of nested functions columns = ['Participant Name', 'Title of Thing', 'Genre', 'Year of Release', 'ImdB Score', 'Rotten Tomatoes Score', 'Gender of Lead', "Make Ryan Cry?"]# second argument , passing a list to the columns arguments )# closing the first pair of brackets to complete the function call movies_df.head()# displaying the first 5 columns of the movies_df # - # ## A closer look at the data # # You already know how to take the wide view of your data with `head()`, `tail()` and `.shape`, and those things are really useful but, especially with really large dataframes or unprocessed (messy) dataframes, we also need to be able to get a more focused view of sections of our data. Pandas has a lot of different ways to do this, some of which are interchangeable, and we're going to spend the next little while introducing you to some of them. # # # ### Introduction to Slicing # # One of the most valuable operations you can perform to get a more focused view of your data is slicing. Before we learn about slicing, let's recap on indexing (slicing is essentially an extension of indexing). # # Remember that we have an object, for example if we have a string, list, or dict, then each element in that object has a position. Indexing enables us to identify the which element is in a given position within that object. Let's say we have a list assigned to the variable, my_list # # my_list = ["apples", "bananas", "cauliflower", "dorritos", "enchiladas", "fajitas", "ginger", "honey"] # If we want to identify the first, fourth, and last element within my list, then we can use indexing to do that. # # + #my_list = ["apples", "bananas", "cauliflower", "dorritos", "enchiladas", "fajitas", "ginger", "honey"] # 0, 1, 2, 3, 4, 5, 6, 7 -> positive indexing print(my_list[0]) #Remember that when we index with Python, we start at 0. print(my_list[3]) print(my_list[7]) # - # In this case, we used positive indexing (where the index positions we use to identify elements start from 0 and increase from there). But we can also use negative indexing, which is really handy when you have a large object, and want to identify the last element within that object, and you do not know how many elements are in that object. # + #my_list = ["apples", "bananas", "cauliflower", "dorritos", "enchiladas", "fajitas", "ginger", "honey"] # 0, 1, 2, 3, 4, 5, 6, 7 -> positive indexing # -8, -7, -6, -5, -4, -3, -2, -1 -> negative indexing print(my_list[-8]) print(my_list[-5]) print(my_list[-1]) # - # You can see that while the values we have used to index an element have changed, the output is the same. While indexing is useful, it is also limited. Each time we can extract only one particular element within my object. But what if we wanted to extract several elements all at once? # # This is where slicing comes in. Let's say we wanted to get the first three elements within our list, we can run the following code: print(my_list[0:3]) # We can see that our code returns a list of the first three elements within our object: apples, bananas, and califlower (not exactly my choice of a smoothie, but you do you) # # But what's going on here? Well like indexing, we use [] brackets to select elements within my object. However, unlike indexing, we insert two values to specify a range of elements we want to select within my object. The syntax for how Python slices this code is like this: # # `my_object[start_value:stop_value]` # # You may have noticed that while the element at the position of the start value is included in our list, the element at the position of our stop_value is not. This is because when we are slicing, our start_value is inclusive, but our stop_value is exclusive. If I wanted to include 'dorritos' into this smoothie of hell, I need to select one index position further print(my_list[0:4]) # We can that slicing is a rather apt name - it slices up our object to return a list of elements. We just need to tell Python how we want to slice our cake. # # For example, if I wanted to slice from our second element onwards, I could run the following code print(my_list[1:]) # By leaving an empty space in place of the stop_value, we are telling Python to begin at the start_value, and then include everything after that within our list. # # If we wanted to Python to start at the beginning of the object, and to return each element within a list up to a certain element, then we can leave an empty_space in our slice syntax print(my_list[:4]) # Hopefully you can that slicing gives us a lot of flexibility, even if you are probably wondering when such functionality will come in handy. We will get on to that throughout the workshops (including todays one), but for now, in the next code cell, try slicing the my_list variable in a number of ways. But this time, see what happens when you use negative indexing. # + # Slice 1 - Print the first three elements of my list using negative indexing #Slice 2 - Print the last three elements of my_list using negative indexing #Slice 3 - Print the elements at index positions 2-6 (use positive or negative indexing) #Slice 4 - Repeat slice 3, but this time, see what happens if you combine positive and negative indexing #Slice 5 - See what happens when you slice a string. Uncomment the code and see what happens #my_string = "what happens if we slice this?" #my_string[0:-7] # - # You can see that negative indexing also works with Python, and not only that, but you can also combined positive and negative indexing too. We can also use slicing on strings as well as lists. Again, this part of the workshop is just to get you used to the idea of slicing, and to demonstrate its functionality and flexibility. # # Before we move on to slicing a dataframe, there is one last piece of the slicing syntax that you need to know about. Not only can we specify a range for Python to slice out, we can specify the `steps` to that slicing: # # `my_object[start_value:stop_value: steps]` # # To demonstrate what I mean by steps, run the following code cell # + #example 1 print(my_list[::2]) #example 2 print(my_list[1:5:2]) # - # In example 1, since we have a left a blank space in our start_value and our stop_value, we have asked Python to essentially to span across all the elements within our object. # # # But the steps mean that Python will only return every select element within our specified range to our list. # # Okay, let's see what we can do with slicing in relating to dataframes. # ## Slicing our Dataframe # # Although we have only introduced the concept of slicing to you today, you've already sliced a dataframe before. For example, when you use the .head() or .tail() function on your dataframe, then you are slicing your dataframe to either return the top five rows `.head()` or the last five rows `.tail()`. # # But we can select, or slice, a custom number of rows from our dataframe, using the same syntax from earlier. # + #let's display the first four rows from our df_movies dataframe movies_df[0:4] # + #we can use negative indexing to display the last three rows from our df_movies dataframe movies_df[-3:] # - #write code below to display the first seven rows of our df_movies dataframe # + #write code below to display the last seven rows ouf our df_movies dataframe # + #We can use the step function to print out every second row in the dataframe movies_df[::2] # - # The syntax for slicing a dataframe goes something like this: # # `dataframe[row_start_value:row_end_value:steps]` # # You can see that slicing returns the rows of a dataframe. It will not select specific columns to display. # # If we want to select specific columns, then we need to use a specific method called `iloc`. This method enables us to return rows for specified columns. Those specified columns are located (loc) by it's index position (i) amongst the columns, hence why it is called `iloc`. # # This might be a bit too abstract. The basic idea is that each row will have an index position in relation to other rows; similarly, each column will have an index position in relation to other columns. # # Still too abstract? Well have ever seen a map (for example see this campus map: https://louisiana.edu/sites/louisiana/files/Campus_FullMap_121019.pdf) where the map is divided up into grids? # # To orient you around the map, it will index one axis by letters('A', 'B'....'Z') and the other axis by numbers (1, 2,.....a gazillion). The combination of these two numbers, the co-ordinates, can then orient you to a specific place on the map. You can refer to the directory to find the specific building or location your looking for (e.g., "Starbucks Coffee - B7"). You know then that to find Starbucks on the map, you go to B on the letter axis (in this example, the rows) and then go to 7 on the number axis (in this example, the columns). # # The same is true for slicing our dataframe using iloc. Both our columns and dataframes will have indices, and we can use the co-ordinates to identify specific values. The only difference between this case and our map example, is that both the row and columns co-ordinates are numerical, starting at 0 and increasing. # # The Co-Ordinates of Our Dataframe (this will look weird in normal mode - double click into this markdown cell to see how the table should appear) # # 0 1 2 3 4 # c1 c2 c3 c4 c5 # 0 x x x x x # 1 x x x x x # 2 x x x x x # 3 x x x x x # 4 x x x x x # # # Using iloc, let's return rows values only for specific columns, and then let's isolate specific values based on row x column co-ordinates. # # + ### Let's select all the values for first three columns movies_df.iloc[:, 0:3] # - # So what happened here? Well the syntax for this operation goes like this: # # `dataframe.iloc[row_start_value:row_stop_value, column_start_value:column_stop_value]` # # For this particular example, since I left empty spaces for the row_start_value and row_stop_value, Python selects each row. But it will only return the row values for the columns between index position 0 (because our start_value is inclusive) and index position 2 (because our stop_value is exclusive). # # + #Using the syntax above, return the first two rows for the first three columns # - # One thing you might have noticed is that, so far, is that slicing has always been used to return a continous range (e.g. 2-4, 1-7). What if we wanted to select rows/columns that were not continous (e.g., 1, 3, 8)? # # Slicing enables us to do that as well, we just need to pass a list of the rows/columns that we want to select # + #Let's select the values for the rows 1, 5; and the columns 2, 3, 7 movies_df.iloc[[0, 4], [1, 2, -3]] #You can see that negative indexing also works with iloc. # + # Finally we can also return specific values. Have a think, and select the value that is in the first row, and in the second column # (the output should be "The Thing" - one of the greatest horror movies ever made) print(movies_df.iloc[0:1, 1:2]) # - # There is a lot more ways we can slice or `select` data (For example, you can also select columns based on column names). But that's enough slicing for now, let's start working with our dataframe. # ## Working with Our dataframe # # Okay, so let's actually get working with the dataframe that we have. The first thing we are going to do is view our dataframe, just to remind ourselves what it looks like. # # Since the dataframe is relatively small, I am going to cheat a little bit and view the entire dataframe, rather than using the head or tail functions. # # Let's view our dataframe. movies_df # For the most part, the dataframe looks alright. But it is not perfectly clean. There are missing values, some of column titles are a bit awkward ("Title of Thing"), there are some inconsistent values ("Male" vs "m" vs "male"), classic data entry mistakes (Orgre when clearly it should be spelled Ogre), and there are columns that we are unlikely to need in our analysis (e.g., Did the movie make Ryan cry or not?). # # There also some data points that we may want to add to the dataframe (for example, average review score across ImdB and Rotten Tomatoes, length of the movie). Overall, the dataframe is not ready for meaningful statistical analysis. # # For the rest of this workshop, we are going to be showing you some tools that you can use to clean up the dataframe, so that it is ready for a more exhaustive analysis. # ### Missing Data # # Nearly every dataframe that you will work with is likely to contain missing data. # # There are two main types of missing data that you will come across with Python. Standard and non-standard. A standard missing value is typically recorded in Python as `NaN`, which refers to a "not a number" value, or a "null" value. # # The first thing we can do is check whether we have standard missing values labelled 'NaN'. We can do this by using the is.null() function. # movies_df.isnull() #This will go through our dataframe and check #for whether we have any missing values # The isnull() function will return our dataframe. It checks each entry our dataframe and asks the question "Is this a NaN" value? # # If there us NaN value, it returns a "False" result. # # If there is a NaN value, it returns a "True" result; meaning that there is NaN data within this cell. # # We can see from the output above that we have a couple of missing values in fifth row (index 4) under the columns `ImDb` score and `Rotten Tomatoes Score` # # In future weeks, we will be showing you what you can do with NaN results other than deleting them. But for now, we do not want to deal with the hassle of having NaN results, so we are going to drop any row that has a NaN result (where isnull() = True) # # We can do this by using the `dropna()` function - which means, "drop any row that has a null/NaN result". # + movies_df_cleanv1 = movies_df.dropna() movies_df_cleanv1 # - # I store the resulting dataframe, without the NA results, in a new variable called `movies_df_cleanv1`. This way we still have access to the raw file within this notebook if we need it. # # The other type of missing value is a non-standard missing value. A non-standard missing value usually occurs when within our dataframe there is a cell that from our perspective is "empty", but from Python's perspective there is a value there. # # For example, we can see that in the second row, that from our perspective, there is an empty cell under `participant name`. However, from Python's perspective, there is a value there: an empty string `" "`. # # There are multiple ways to deal with this, but given that we are only introducing missing values this week, what we are going to do is replace any missing values with a `NaN` result, and then remove that row from the dataframe. Basically, we convert a non-standard missing value into a missing value. # # # + #Replace cell/field that's entirely space or empty with NaN a = movies_df_cleanv1.replace(r'^\s*$', np.nan, regex=True) ## Hold on a second, what the hell is r'^\s*$'? and what the hell is a regex? # A regex is short for "regular expression". # Regex enables us to identify a pattern in text (think about using search function in Word) # The 'r' indicates that we are using a regular expression. # The '^\s*$' is regular expression syntax to identify empty strings #I save the result to a variable a, so I can use dropna() function on it movies_df_cleanv2 = a.dropna() movies_df_cleanv2 # - # In future weeks, we will show you more sophisticated ways of handling NaN data. But for now, we have a dataframe that has usable data in each cell. # # Our next steps are to clean up some entries in our dataframe, and fix inconsistent data entries. # ### Clean Data - Renaming Columns and Incorrect Entries # # #### Changing the name of a column # # Using a for loop, we can print out the names of our columns. Run the cell below. # # # + for col in movies_df_cleanv2: print(col) # The first line says iterate through each column name within our dataframe # The second line says that for each column, print out the name of that column # - # We can see that there are a couple of awkward names for our columns. A more fitting name for "Participant Name" is probably "Director". Similarly, a more appropriate (albeit less poetic due to the loss of alliteration) name for "Title of Thing" is "Movie" # # We can rename these columns with a function called `rename` # + movies_df_cleanv2.rename(columns={'Participant Name':'Director', 'Title of Thing': 'Movie'}, inplace= True) #the rename function takes the old value as the Dictionary Key, and replaces with the new value #{"Old Column Name": "New Column Name"} movies_df_cleanv2 # - # ### Dropping Columns # # Now that we have correct column names, let's fix up inconsistencies within our data. For example, it is unlikely that any scientific journals, even those low impact ones that constantly message you asking to submit any draft emails you have lying around are going to be interested in knowing whether Ryan cried during any of these movies. So for the sake of keeping our data clean and tidy (unlike Ryan's running mascara), we are going to chuck out that column. # # + movies_df_cleanv2.drop("Make Ryan Cry?", 1, inplace=True) movies_df_cleanv2 # - # ### Inconsistent Data # # Now that we have columns that we want to work with, let's clean up inconsistent data entries. # # We can see that within our column, "Gender of Lead", that there are inconsistent values. Male is sometimes coded as "male", or "m". Female is sometimes codes as "fem". But it is good to get a quantitative idea of the different values that are entered within this column. One function we can use to identify the different values that are within a column, and also how many times that value appears within the column, is the function `value_counts()`. # # The first thing we need to do is select (aka slice) the particular column that we want, and then use the value_counts function. movies_df_cleanv2.iloc[:, -1:].value_counts() # Okay we can see that our `Gender of Lead` is all over the place. Rather than having 6 Males and 2 females, we have 3 'Males', 1 'm', 1 'male', 1 'Orgre', 1 'Female', 1 'fem'. The values used to code our data is inconsistent. So let's fix it by using the `replace` function. # # # # # + movies_df_cleanv2.iloc[:, -1:] = movies_df_cleanv2.iloc[:, -1:].replace(to_replace = ["Orgre", "m"], value = "Male") #the syntax for the following code is: #dataframe.replace(list_with_values_to_replace = ["Old Value1"...."Old ValueN"], value = "New Value1") # - # Now we could have also replaced the lowercase `male`, but we will show another method you use to handle cases where inconsistent values are related to capitalisation. # # First let's fix the female values # + #Take the syntax from the previous code cell and use replace "fem" with "female" for the "Gender of Lead" column # - # Okay, now let's fix our capitalisation issue. We can do that by using the str.capitalise() method. # + movies_df_cleanv2['Gender of Lead'] = movies_df_cleanv2['Gender of Lead'].str.capitalize() movies_df_cleanv2 # - # Now there's a few things going on here. # # 1st. Instead of slicing our dataframe the way we have been showing you so far `movies_df_cleanv2.iloc[:, -1:]`, we instead selected our column by using actual name of the column. This is a perfectably acceptable way to select a column. # # 2nd. We use the str. function to treat everything inside our column as a string. # # 3rd. Then we call the capitalise() method on our strings in order to capitalise the first letter. There are other functions that we could apply instead of capitalise. For example, we could use str.upper() to transform every value into UPPER CASE. Or we could str.lower to transform every value into lower case. Feel free to toy around with them in the example above. # # # + #Use the value counts function on the Genre column and see what type of movie pops up the most often in our dataset. # - #Let's say for the sake of our analysis, we do not need to make the distinction between a comedy and a dark comedy #Using the replace function, change "Dark Comedy" to "Comedy" in our dataset for the Genre column. # + #Convert the genre column to either upper or lower case (dealers choice!) # - # ## Adding to our Dataframe # # Run the cell below to let us look at our dataframe again. # movies_df_cleanv2 # Hopefully if you have gotten to this stage, the columns are appropriately labelled, there are no missing values, and values are consistent within columns. It should look in a lot better shape. If not, then do not worry, just let us know. :) # # ### Add Rows # # Our dataframe is pretty small though right. And there aren't that many movies in there with a female lead, so how can we rectify that? # # Well with pandas, it is relatively handy to add (or append) rows to a dataframe. # + new_rows = {'Director': ['<NAME>', '<NAME>'], 'Movie': ['Alien', 'Lady Bird'], 'Genre': ['Horror', 'Drama'], "Year of Release": ['1979', '2017'], "ImdB Score": [85, 74], "Rotten Tomatoes Score": [98, 99], "Gender of Lead": ["Female", "Female"] } df2 = pd.DataFrame(new_rows) movies_df_cleanv3 = movies_df_cleanv2.append(df2, ignore_index="True") movies_df_cleanv3 # - # Wahey! Although there is still a gender imbalance, we are definitely healthier on male/female actor lead (the director situation is a different story). # # But what went on to achieve that? Well, there's a few things to note: # # The first thing is we created a dict object, `new_rows`, containing our column names (dict keys) and column values (dict values). Because we are adding more than one value per column name, we enter the column values as a list. This enables Python to create multiple rows per each column. # # The second thing is that we turned these dict into a dataframe, called `df`. This is because it is much more straightforward to add multiple rows to an existing dataframe, if those rows are in the shape of a dataframe. # # The third thing is that we set `ignore_index = True`. The index refers to the co-ordinate system we have been using to slice our dataframe. The two dataframes, movies_df_cleanv2 and df2, will have their own indices. For example, the value in row 1, column 1, in movies_df_cleanv2 is "<NAME>", whereas the same co-ordinates in df2 return "<NAME>". # # To set ignore_index to true simply means to combine the two dataframes under one co-ordinate system, rather than keeping their own specific co-ordinate systems. Don't worry if this is still sounding too abstract, you do not need to know it for now. # # # # + # Try to add another row to the dataframe. # Google any movie you like, create a dictionary containing its value for each of the columns # Turn that dict into a Dataframe # Then append that dataframe to our existing dataframe # - # ### Add column # # At the moment, our columns contain Rotten Tomatoes score from critics. But what if we wanted to add in the Rotten tomatoe scores from fans? We can do that fairly easily with pandas dataframes. # # + #first let's create a list that will be converted into a column within our dataframe rt_fans = [92.0, 79.0, 42.0, 82.0, 86.0, 69.0, 89.0, 93.0, 94.0, 79.0] #By the way, how in the hell does Shrek 2 only have a 69% on rotten tomatoes? What kind of sick joke is that? #Now let's create a column within our dataframe, and then set its values to rt_fans movies_df_cleanv3["Rotten Tomatoes Fans' Scores"] = rt_fans movies_df_cleanv3 # - # Nice, we have the fans opinion in there too for Rotten Tomatoes. But ideally we would want it next to the Rotten Tomatoes Critics scores. Let's re-order our dataframe # + movies_df_cleanv3 = movies_df_cleanv3[["Director", "Movie", "Genre", "Year of Release", "ImdB Score", "Rotten Tomatoes Score", "Rotten Tomatoes Fans' Scores", "Gender of Lead"]] movies_df_cleanv3.head() # + ## What if we wanted to know whether the lead actor won an oscar or not? #Take the following list, and add it as a column in the dataframe #oscar = ["No", "No", "No", "No", "Yes", "No", "No", "Yes", "No", "No"] # - # Well done. We have convered a lot this week. Do not worry if a lot of these ideas have not sunk in. That's normal. # # Our recommendation would be to practice with your own dataframes. Practice will help this all sink in. # # In the cells below this one you can practice with the csv version of the raw data. # # 1. save a version of the your df that doesn't have any empty cells # 2. scan for inconsistent data # 3. see if you can fix it # 4. drop columns you don't need, or use `iloc` to save a version of the data that doesn't have the extra columns in it. # # Next week, we will start looking at more sophisticated methods to handle missing values, how we can sort our dataframe, and start with calculating some descriptive statistics. # # Until next time, # # Kev & Ryan.
src/Week 6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Locality Exploration # + pycharm={"is_executing": false} # %load_ext autoreload # %autoreload 2 # + pycharm={"is_executing": false} import itertools from src.autoks.core.active_set import ActiveSet from src.autoks.core.gp_model import GPModel from src.autoks.core.grammar import BomsGrammar from src.autoks.core.kernel_encoding import hd_kern_nodes from src.autoks.distance.distance import HellingerDistanceBuilder from src.evalg.fitness import structural_hamming_dist # %matplotlib inline from GPy.core.parameterization.priors import Gaussian from GPy.models import GPRegression from scipy.stats.stats import pearsonr, spearmanr import numpy as np from scipy.spatial.distance import squareform import matplotlib.pyplot as plt # + pycharm={"is_executing": false} def create_hellinger_db(active_models, data_X): """Create Hellinger distance builder with all active models being candidates""" lik_noise_std = np.log(0.01) lik_noise_mean = 1 noise_prior = Gaussian(lik_noise_std, lik_noise_mean) initial_model_indices = active_models.get_candidate_indices() num_samples = 20 max_num_hyperparameters = 40 max_num_kernels = 1000 builder = HellingerDistanceBuilder(noise_prior, num_samples, max_num_hyperparameters, max_num_kernels, active_models, initial_model_indices, data_X=data_X) return builder # + pycharm={"is_executing": false} num_points = 100 num_dimensions = 2 data_x = np.random.randn(num_points, num_dimensions) w = np.random.randn(num_dimensions, 1) y = data_x @ w # + pycharm={"is_executing": false} base_kernel_names = ['SE', 'RQ'] # + pycharm={"is_executing": false} grammar = BomsGrammar(base_kernel_names) grammar.build(num_dimensions) # + pycharm={"is_executing": false} se_1, se_2, rq_1, rq_2 = grammar.base_kernels # + pycharm={"is_executing": false} starting_cov = se_1 neighbors = grammar.expand_single_kernel(starting_cov) neighbors_2 = [] for n in neighbors: for nn in grammar.expand_single_kernel(n): neighbors_2.append(nn) # + pycharm={"is_executing": false} neighbors_0_1_2 = grammar.base_kernels + neighbors + neighbors_2 # + pycharm={"is_executing": false} random_kernels = grammar.expand_random(100) # + pycharm={"is_executing": false} all_kernels = random_kernels # + pycharm={"is_executing": false} all_kernels[0] # + pycharm={"is_executing": false} # Optionally, convert kernels to canonical form # for n in all_kernels: # n.raw_kernel = n.canonical() # + pycharm={"is_executing": false} def covariance_shd(cov_1, cov_2): tree_1 = cov_1.to_binary_tree() tree_2 = cov_2.to_binary_tree() return structural_hamming_dist(tree_1, tree_2, hd=hd_kern_nodes) # + pycharm={"is_executing": false} shd_dists = [] for cov_1, cov_2 in list(itertools.combinations(all_kernels, 2)): shd_dists.append(covariance_shd(cov_1, cov_2)) shd_dists = np.array(shd_dists) # - # Create Active Set # + pycharm={"is_executing": false} active_set = ActiveSet(1000) # + pycharm={"is_executing": false} models = [GPModel(cov) for cov in all_kernels] # + pycharm={"is_executing": false} all_candidate_ind = active_set.update(models) len(all_candidate_ind) # + pycharm={"is_executing": false} distance_builder = create_hellinger_db(active_set, data_x) # + pycharm={"is_executing": false} distance_builder.compute_distance(active_set, all_candidate_ind, all_candidate_ind) # + pycharm={"is_executing": false} hellinger_K = distance_builder.get_kernel(len(all_candidate_ind)) # + pycharm={"is_executing": false} pearsonr(shd_dists, squareform(hellinger_K)) # + pycharm={"is_executing": false} spearmanr(shd_dists, squareform(hellinger_K)) # + pycharm={"is_executing": false} plt.imshow(squareform(shd_dists)) # + pycharm={"is_executing": false} plt.imshow((hellinger_K - hellinger_K.mean()) / hellinger_K.std()) # + pycharm={"is_executing": false} r = np.random.randn(shd_dists.shape[0]) # + pycharm={"is_executing": false} spearmanr(shd_dists, r) # + pycharm={"is_executing": false} spearmanr(r, squareform(hellinger_K)) # + pycharm={"is_executing": false} norm = (shd_dists - shd_dists.mean()) / shd_dists.std() # + pycharm={"is_executing": false} a = squareform(hellinger_K) a = (a - a.mean()) / a.std() # + pycharm={"is_executing": false} plt.scatter(np.arange(shd_dists.shape[0]), a) plt.scatter(np.arange(shd_dists.shape[0]), norm) # - # ### Now, investigate fitness value correlation (TODO) # + pycharm={"is_executing": false} def fitness_val(model): gp_reg = GPRegression(data_x, y, kernel=model.covariance.raw_kernel) gp_reg.optimize() return gp_reg.log_likelihood() # + pycharm={"is_executing": false} fitness_values = [fitness_val(model) for model in models]
notebooks/exploratory/locality-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #remove cell visibility from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''') display(tag) # ## Jordanova oblika - realne lastne vrednosti # # Ta interaktivni primer prikazuje uporabo Jordanove oblike dinamične matrike za linearne časovno neodvisne (LTI) sisteme. Jordanova matrika predstavlja posebno obliko matrike, ki poenostavi modalno analizo LTI sistemov na način, da poudari lastne vrednosti sistema in odvisnosti med posameznimi stanji; to velja še posebej v primeru več enakih lastnih vrednosti. # # Jordanovo obliko dinamične matrike $A$ sistema $\dot x = A x$ zapišemo kot: # # $$ # J = V^{-1}AV, # $$ # # pri čemer je $V$ matrika, ki je sestavljena iz t.i. generaliziranih lastnih vektorjev lastnih vrednosti matrike $A$. # # Ker sta $A$ in $J$ podobni matriki, imata enake lastne vrednosti in tudi modalne oblike sistema. # # Primer matrike zapisane v Jordanovi obliki (gre za blokovno diagonalno matriko) je npr.: # # $$ # J = \begin{bmatrix} # J_1 & 0 & 0 & ... \\ # 0 & J_2 & 0 & ... \\ # 0 & 0 & J_3 & ... \\ # ... & ... & ... & ...\\ # \end{bmatrix}, # $$ # # kjer so $J_i$ t.i. Jordanovi bloki, določeni kot: # # $$ # J_i = \begin{bmatrix} # \lambda_i & 1 & 0 & ... \\ # 0 & \lambda_i & 1 & ... \\ # 0 & 0 & \lambda_i & ... \\ # ... & ... & ... & ...\\ # \end{bmatrix}, # $$ # # kjer je $\lambda_i$ $i$-ta lastna vrednost matrike $J$ (in s tem tudi matrike $A$). # Število in dimenzija Jordanovih blokov sta odvisni od večkratnosti (angl. multiplicity) lastnih vrednosti. # # ### Kako upravljati s tem interaktivnim primerom? # # - Definiraj poljubno matriko in razišči njeno Jordanovo obliko; uporabi tako realne kot kompleksne lastne vrednosti. # - Bodi pozoren na to, da je Jordanova oblika matrike sistema s samimi različnimi lastnimi vrednostmi enaka diagonalni matriki (vsi Jordanovi bloki so velikost $1\times1$. # - Opazuj odvisnost med številom in velikostjo Jordanovih blokov lastne vrednosti in MODES, ki so povezani s to lastno vrednostjo. # - Bodi pozoren na to, da imata, v primeru kompleksnih lastnih vrednosti, tako matrika $J$ kot tudi marika generaliziranih lastnih vektorjev $V$ kompleksne elemente na diagonali. # - Ustvari poljubno matriko $A$ ali uporabi prednastavljene, izbrane primere matrik. # # [//]: # "This example shows the Jordan Form for the dynamic matrix of Linear Time Invariant (LTI) systems. # The Jordan form is a particular matrix form that simplifies the modal analysis of a LTI system by highlighting the eigenvalues of the system and the relations between states especially in the case of repeated eigenvalues. # # The Jordan form can be obtained from the dynamic matrix $A$ of the system $\dot x = A x$ as: # # $$ # J = V^{-1}AV, # $$ # # where $V$ is a matrix composed of the, so called, generalized eigenvectors of the eigenvalues of $A$. # # Clearly, since $A$ and $J$ are similar matrices, they share the same eigenvalues and also the same system modes. # # A matrix in Jordan form is a block diagonal matrix like: # # $$ # J = \begin{bmatrix} # J_1 & 0 & 0 & ... \\ # 0 & J_2 & 0 & ... \\ # 0 & 0 & J_3 & ... \\ # ... & ... & ... & ...\\ # \end{bmatrix}, # $$ # # where the elements $J_i$ are the so called Jordan mini-blocks that look like: # # $$ # J_i = \begin{bmatrix} # \lambda_i & 1 & 0 & ... \\ # 0 & \lambda_i & 1 & ... \\ # 0 & 0 & \lambda_i & ... \\ # ... & ... & ... & ...\\ # \end{bmatrix}, # $$ # # where $\lambda_i$ is the $i$-th eigenvalue of $J$ (and of $A$). # The number and dimensions of mini-blocks depends on the multiplicity of the eigenvalues. # # ### How to use this notebook? # # - Define a matrix and watch its Jordan form; experiment with both real and complex eigenvalues. # - Note how the Jordan form of a system matrix with distinct eigenvalues is actually a diagonal matrix (all Jordan mini-blocks of dimension 1). # - Note the relation between the number and dimension of the Jordan mini-blocks of an eigenvalue and the modes associated with it. # - Note that in the case of complex eigenvalues the matrix $J$ has complex elements on the diagonal and so does the generalized eigenvectors matrix $V$. # - Explore the effects of changing matrix $A$ values or load example matrices." # + #Preparatory Cell import control import numpy import sympy from IPython.display import display, Markdown import ipywidgets as widgets import matplotlib.pyplot as plt from sympy import Matrix #print a matrix latex-like def bmatrix(a): """Returns a LaTeX bmatrix - by <NAME> (ICCT project) :a: numpy array :returns: LaTeX bmatrix as a string """ if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{bmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{bmatrix}'] return '\n'.join(rv) # Display formatted matrix: def vmatrix(a): if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{vmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{vmatrix}'] return '\n'.join(rv) #create a NxM matrix widget def createMatrixWidget(n,m): M = widgets.GridBox(children=[widgets.FloatText(layout=widgets.Layout(width='100px', height='40px'), value=0.0, disabled=False, label=i) for i in range(n*m)], layout=widgets.Layout( #width='50%', grid_template_columns= ''.join(['100px ' for i in range(m)]), #grid_template_rows='80px 80px 80px', grid_row_gap='0px', track_size='0px') ) return M #extract matrix from widgets and convert to numpy matrix def getNumpyMatFromWidget(M,n,m): #get W gridbox dims M_ = numpy.matrix(numpy.zeros((n,m))) for irow in range(0,n): for icol in range(0,m): M_[irow,icol] = M.children[irow*3+icol].value #this is a simple derived class from FloatText used to experience with interact class floatWidget(widgets.FloatText): def __init__(self,**kwargs): #self.n = n self.value = 30.0 #self.M = widgets.FloatText.__init__(self, **kwargs) # def value(self): # return 0 #self.FloatText.value from traitlets import Unicode from ipywidgets import register #matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value ! class matrixWidget(widgets.VBox): def updateM(self,change): for irow in range(0,self.n): for icol in range(0,self.m): self.M_[irow,icol] = self.children[irow].children[icol].value #print(self.M_[irow,icol]) self.value = self.M_ def dummychangecallback(self,change): pass def __init__(self,n,m): self.n = n self.m = m self.M_ = numpy.matrix(numpy.zeros((self.n,self.m))) self.value = self.M_ widgets.VBox.__init__(self, children = [ widgets.HBox(children = [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)] ) for j in range(n) ]) #fill in widgets and tell interact to call updateM each time a children changes value for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] self.children[irow].children[icol].observe(self.updateM, names='value') #value = Unicode('<EMAIL>', help="The email value.").tag(sync=True) self.observe(self.updateM, names='value', type= 'All') def setM(self, newM): #disable callbacks, change values, and reenable self.unobserve(self.updateM, names='value', type= 'All') for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].unobserve(self.updateM, names='value') self.M_ = newM self.value = self.M_ for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].observe(self.updateM, names='value') self.observe(self.updateM, names='value', type= 'All') #self.children[irow].children[icol].observe(self.updateM, names='value') #overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?) class sss(control.StateSpace): def __init__(self,*args): #call base class init constructor control.StateSpace.__init__(self,*args) #disable function below in base class def _remove_useless_states(self): pass # + #define matrices A = matrixWidget(4,4) #this is the main callback and does all the computations and plots def main_callback(matA,DW,sel): #check if a specific matrix is requested or is manual if sel=='ročna določitev sistema' : pass elif sel == 'stabilni sistem brez para kompleksno konjugiranih polov': matA = numpy.zeros((4,4)) matA[0,0] = -1 matA[1,1] = -2 matA[2,2] = -3 matA[3,3] = -4 A.setM(matA) elif sel == 'stabilni sistem s parom kompleksno konjugiranih polov': matA = numpy.zeros((4,4)) matA[0,0] = -1 matA[0,1] = 3 matA[1,0] = -3 matA[1,1] = -1 matA[2,2] = -3 matA[3,3] = -4 A.setM(matA) elif sel == 'nestabilni sistem z nestabilnim realnim polom': matA = numpy.zeros((4,4)) matA[0,0] = 1 matA[1,1] = -2 matA[2,2] = -3 matA[3,3] = -4 A.setM(matA) elif sel == 'nestabilni sistem z nestabilnim parom kompleksno konjugiranih polov': matA = numpy.zeros((4,4)) matA[0,0] = 1 matA[0,1] = 3 matA[1,0] = -3 matA[1,1] = 1 matA[2,2] = -3 matA[3,3] = -4 A.setM(matA) else : matA = numpy.zeros((4,4)) A.setM(matA) # Work with symbolic matrix matAs = sympy.Matrix(matA) dictEig = matAs.eigenvals() eigs = list(dictEig.keys()) algMult = list(dictEig.values()) # check dimension of jordan blocks dimJblock = [] for i in range(len(eigs)): dimJblock.append(algMult[i]-len((matAs-eigs[i]*sympy.eye(4)).nullspace())+1) # jordan form matAs_P, matAs_J = matAs.jordan_form(chop=True) timeVectors = [] modeVectors = [] # compute modes simulations and prepare modestring modestring = '' for i in range(len(eigs)): sim = [] if sympy.re(eigs[i]) >= 0: # instable or integral like time = numpy.linspace(0,10,1000) for n in range(dimJblock[i]): if n==0: if sympy.im(eigs[i]) != 0 and (sympy.conjugate(eigs[i]) not in eigs[0:i]): sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)*numpy.cos(float(sympy.im(eigs[i]))*time)) modestring = modestring + "$e^{%s t} cos(%s t + \phi)$ " % (str(float(sympy.re(eigs[i]))), str(float(sympy.im(eigs[i])))) elif sympy.im(eigs[i]) == 0: sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)) modestring = modestring + "$e^{%s t}$ " % (str(float(sympy.re(eigs[i])))) else: if sympy.im(eigs[i]) != 0 and (sympy.conjugate(eigs[i]) not in eigs[0:i]): sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)*numpy.cos(float(sympy.im(eigs[i]))*time)) modestring = modestring + "$t^{%s}e^{%s t} cos(%s t + \phi)$ " % (str(n), str(float(sympy.re(eigs[i]))), str(float(sympy.im(eigs[i])))) elif sympy.im(eigs[i]) == 0: sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)) modestring = modestring + "$t^{%s}e^{%s t}$ " % (str(n), str(float(sympy.re(eigs[i])))) else: # stable mode time = numpy.linspace(0,10*(1/float(sympy.Abs(eigs[i]))),1000) for n in range(dimJblock[i]): if n==0: if sympy.im(eigs[i]) != 0 and (sympy.conjugate(eigs[i]) not in eigs[0:i]): sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)*numpy.cos(float(sympy.im(eigs[i]))*time)) modestring = modestring + "$e^{%s t} cos(%s t + \phi)$ " % (str(float(sympy.re(eigs[i]))), str(float(sympy.im(eigs[i])))) elif sympy.im(eigs[i]) == 0: sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)) modestring = modestring + "$e^{%s t}$ " % (str(float(sympy.re(eigs[i])))) else: if sympy.im(eigs[i]) != 0 and (sympy.conjugate(eigs[i]) not in eigs[0:i]): sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)*numpy.cos(float(sympy.im(eigs[i]))*time)) modestring = modestring + "$t^{%s}e^{%s t} cos(%s t + \phi)$ " % (str(n), str(float(sympy.re(eigs[i]))), str(float(sympy.im(eigs[i])))) elif sympy.im(eigs[i]) == 0: sim.append(time**n*numpy.exp(float(sympy.re(eigs[i]))*time)) modestring = modestring + "$t^{%s}e^{%s t}$ " % (str(n), str(float(sympy.re(eigs[i])))) if len(sim) != 0: timeVectors.append(time) modeVectors.append(sim) #print(dimJblock) #print(len(modeVectors)) #create textual output display(Markdown('Matrika $%s$ ima lastne vrednost $%s$' % (vmatrix(matA), vmatrix(numpy.array(numpy.linalg.eig(matA)[0]))))) #for better visualization matJlist = [] for i in range(4): temp = [] for j in range(4): if sympy.im(matAs_J[i,j]) != 0: temp.append(numpy.complex(matAs_J[i,j])) else: temp.append(numpy.real(matAs_J[i,j])) matJlist.append(temp) matJ = numpy.matrix(matJlist) display(Markdown('in njena Jordanova oblika je enaka: $%s$' %str(vmatrix(matJ)))) #for better visualization matPlist = [] for i in range(4): temp = [] for j in range(4): if sympy.im(matAs_P[i,j]) != 0: temp.append(numpy.complex(matAs_P[i,j])) else: temp.append(numpy.real(matAs_P[i,j])) matPlist.append(temp) matP = numpy.matrix(matPlist) display(Markdown('z generaliziranimi lastnimi vektorji $%s$.' %str(vmatrix(matP)))) display(Markdown('MODES so: %s' % modestring)) #compute total number of figures totfig=0 for i in range(len(modeVectors)): totfig = totfig + len(modeVectors[i]) #plot each single mode fig = plt.figure(figsize=(20, 4)) idx = 1 for i in range(len(timeVectors)): for j in range(len(modeVectors[i])): sf = fig.add_subplot(1,totfig,idx) idx = idx + 1 sf.plot(timeVectors[i],modeVectors[i][j]) sf.grid(True) plt.xlabel(r'$t$ [s]') plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) #create dummy widget DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px')) #create button widget START = widgets.Button( description='Test', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Test', icon='check' ) def on_start_button_clicked(b): #This is a workaround to have intreactive_output call the callback: # force the value of the dummy widget to change if DW.value> 0 : DW.value = -1 else: DW.value = 1 pass START.on_click(on_start_button_clicked) #define type of ipout SELECT = widgets.Dropdown( options=['ročna določitev sistema', 'ponastavi', 'stabilni sistem brez para kompleksno konjugiranih polov', 'stabilni sistem s parom kompleksno konjugiranih polov', 'nestabilni sistem z nestabilnim realnim polom', 'nestabilni sistem z nestabilnim parom kompleksno konjugiranih polov'], value='ročna določitev sistema', description='Primeri:', disabled=False, ) #create a graphic structure to hold all widgets alltogether = widgets.VBox([SELECT, widgets.Label(''), widgets.HBox([widgets.Label('$\dot{x}(t) = $',border=3), A,widgets.Label('$x(t)$',border=3), START])] ) out = widgets.interactive_output(main_callback,{'matA': A, 'DW': DW, 'sel': SELECT}) out.layout.height = '600px' display(alltogether,out)
ICCT_si/.ipynb_checkpoints/SS-05-Jordanova_oblika_realni_lastni_vektorji-checkpoint.ipynb
% --- % jupyter: % jupytext: % text_representation: % extension: .m % format_name: light % format_version: '1.5' % jupytext_version: 1.14.4 % kernelspec: % display_name: Octave % language: octave % name: octave % --- graphics_toolkit("gnuplot"); % use if "plot" does not work % ## Analytical solution % Diffusion in 1d + adding material interface, reactive term and convective term <br> % ### Mixed boundary conditions (Dirichlet + Robin) % ## Tasks: % 1) Solve the following problems analytically. <br> % 2) Plot solutions. <br> % 3) Try changing input parameters ($L,f,k,U_0,U_L,\ldots$). % + magic_args="Diffusion with mixed boundary conditions" % -k*u''(x)=f in (0,L) % u(0)=U % -k*u'(L)=alfa*(u(L)-Uhat) L=10; f=0.2; % sources (constant in whole domain) k=1; % material parameter (e.g. conductivity) U=0; % Dirichlet boundary condition alfa=2; % e.g. heat transfer coefficient Uhat=2; % e.g. surrounding temperature C2=U; C1= u=@(x)-f/(2*k)*x.^2+C1*x+C2; % - % Let's plot the solution. x=linspace(0,L,100); % points of visualization points_items={x}; solution_items={u}; % create cell of solution handles legend_items={'diffusion mixed'}; % create cell of legends % Plotting function: function plot_solutions(poi,sol,leg) figure; hold on for i=1:length(sol) plot(poi{i},sol{i}(poi{i})) end legend(leg); % show legend grid on xlabel('x') ylabel('u(x)') end plot_solutions(points_items,solution_items,legend_items) % + magic_args="Diffusion with mixed boundary conditions + reaction" % -k*u''(x)+k0*u(x)=g in (0,L) % u(0)=U % -k*u'(L)=alfa*(u(L)-Uhat) k0=0.5; g=f+k0*Uhat; % analytical solution: K=sqrt(k0/k); C1= C2= u=@(x)C1*exp(-K*x)+C2*exp(K*x)+g/k0; points_items{end+1}=x; solution_items{end+1}=u; legend_items{end+1}='d.+reaction mixed'; % append legend plot_solutions(points_items,solution_items,legend_items) % - % ## Numerical experiments % 1. Check that for $k_0 \rightarrow 0$ the solution of the boundary value problem with reaction converges to the solution of the problem without reaction.
exercises/Exercise02c_eng_analytical_mixed_Robin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # language: python # name: python3 # --- # # &#x1f4dd; Exercise # **Suppose you are given a photon with an energy of 2 eV. What is its momentum in # $\frac{\text{m} \cdot \text{kg}}{\text{s}}$? What is its frequency in Hz?** # First we can compute the frequency of the photon as: # # $$ \nu = \frac{E}{h} $$ # # but there is the slight complication that the energy was given in electron-volts. Fortunately we have this constant built into to scipy.constants. # # The momentum of the photon can be computed from the De Broglie relation, # # $$ p = \frac{h}{\lambda} = \frac{h}{\tfrac{c}{\nu}} = \frac{h \nu}{c} = \frac{E}{c} $$ # # Where the last formula, which was proposed long ago by Einstein and Compton and appeared in the notes, could have been used directly had you remembered it. However, because our energy is in electron-volts, it's a bit easier to use the next-to-last formula. # # + import scipy from scipy import constants # frequency = E/h # get Planck's constant in useful units. h_in_eVs = scipy.constants.value("Planck constant in eV/Hz") frequency_d9 = 2.0/h_in_eVs #Now useful to use Planck's constant in nice units. momentum_d9 = constants.h*frequency_d9/constants.c print("The frequency of a photon with an energy of 2 eV is {0:.3e} Hz".format(frequency_d9)) print("The momentum of a photon with an energy of 2 eV is {0:.3e} m kg/s".format(momentum_d9)) # -
book/IntroExercise1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Disciplina - DQF10648 Eletromagnetismo I # ## Aula em 06/07/2021 - Semestre 2021/1 EARTE # ### [DQF - CCENS](http://alegre.ufes.br/ccens/departamento-de-quimica-e-fisica) - [UFES/Alegre](http://alegre.ufes.br/) # + [markdown] slideshow={"slide_type": "slide"} # # Discussão sobre dificuldades na disciplina, ensino remoto, etc # # Discussão com toda a turma sobre dificuldades na disciplina, ensino remoto EARTE, etc. # # O que tomou bastante tempo, aproximadamente 1h, mas resultou em sugestões importantes para eventuais mudanças nas aulas e atividades da disciplina. # + [markdown] slideshow={"slide_type": "slide"} # # Configurando Jupyter Notebook com [RISE](https://rise.readthedocs.io/en/stable/) # + [markdown] slideshow={"slide_type": "fragment"} # Para visualizar os documentos Jupyter Notebook no modo apresentação tal como são mostrados durante as aulas de "Eletromagnetismo I", instale a extensão [RISE (Reveal.js - Jupyter/IPython Slideshow Extension](https://rise.readthedocs.io/en/stable/), que é um software gratuito e de código-fonte aberto, disponível em [repositório público GitHub](https://github.com/damianavila/RISE). # + [markdown] slideshow={"slide_type": "fragment"} # Para instalar RISE, com [Anaconda (distribuição Python)](https://www.anaconda.com/products/individual) instalado, digite no terminal : # ```bash # $ pip install RISE # ``` # + [markdown] slideshow={"slide_type": "slide"} # # Tirando dúvidas sobre resolução dos exercícios e trabalhos # + [markdown] slideshow={"slide_type": "fragment"} # ## Trabalho em Grupo : Operadores Diferenciais Vetoriais # + [markdown] slideshow={"slide_type": "fragment"} # Trabalho em grupo (até 3 alunos/as) sobre "Operadores Diferenciais Vetoriais". # # **Prazo de envio : 4a-feira, 07/07/2021, até 23h59.** # + [markdown] slideshow={"slide_type": "subslide"} # #### Aviso # # Aviso que grupos acima de 3 alunos serão penalizados, por exemplo de 4 alunos terá nota multiplicada por 3/4, então recomendo que quebrem o grupo de 4 alunos. # # No próximo trabalho em grupo não aceitarei grupo com número de alunos acima do especificado (nem corrigirei e a nota será 0). # + [markdown] slideshow={"slide_type": "subslide"} # Dicas : # + [markdown] slideshow={"slide_type": "fragment"} # - basear-se nas aulas das últimas 3 semanas e nas bibliografias oficial e alternativa para obter as expressões genéricas para coordenadas curvilíneas (que não precisam ser deduzidas) para gradiente, divergente, rotacional e laplaciano. Também não precisa deduzir os $h_i$ e $\hat{e}_i$ respectivos; # + [markdown] slideshow={"slide_type": "fragment"} # - checar seta de vetor em todos os operadores e no nabla/del : # # $$ \vec{F} $$ # # $$ \vec{\nabla} \,\,\,(\text{exceto em laplaciano}) $$ # # $$ \hat{i}, \hat{j}, \hat{k} $$ # # etc. # + [markdown] slideshow={"slide_type": "subslide"} # - o "operador" $\vec{\nabla}$ só deve ser escrito em coordenadas curvilíneas para criar gradiente, sendo que para divergente e rotacional tem $h_i$ extras (vide fórmula, aparece $h_1 h_2 h_3$, etc). Então use as expressões genéricas para coordenadas curvilíneas para divergente, rotacional e laplaciano, ao invés de $\vec{\nabla}$ com produto escalar e vetorial aplicados em funções. # Essa dúvida é muito comum, vide : # * [Improper use of [nabla operator] in vector analysis - PhysicsForum](https://www.physicsforums.com/threads/improper-use-of-nabla-operator-in-vector-analysis.131416/); # * [Why is the del operator sometimes considered to be a vector?](https://www.quora.com/Why-is-the-del-operator-sometimes-considered-to-be-a-vector) # + [markdown] slideshow={"slide_type": "fragment"} # Dúvidas dos alunos ? # # Alguns alunos mostraram a resolução, sendo feita discussão e dadas sugestões de melhorias nas resoluções. # + [markdown] slideshow={"slide_type": "subslide"} # ## Trabalho Individual : Coordenadas Curvilíneas # + [markdown] slideshow={"slide_type": "fragment"} # Trabalho Individual sobre coordenadas curvilíneas. # # **Prazo de envio : 5a-feira, 08/07/2021, até 23h59.** # + [markdown] slideshow={"slide_type": "fragment"} # Dicas : # + [markdown] slideshow={"slide_type": "fragment"} # - nos itens (2), (3) e (4), fazer sim para coord. cartesianas 2D e 3D, embora alguns cálculos sejam triviais, é importante vocês fazerem ao menos 1 vez na vida, por exemplo, h_x, h_y e h_z (todos resultam em 1, mas é para verificar esse resultado fazendo as contas); # + [markdown] slideshow={"slide_type": "fragment"} # - no item (4), é para colocar corretamente nomes nos eixos, desenhar e descrever as curvas u_i (2 para cada), e em 2 pontos desenhar e nomear os vetores unitários \hat{e}_i. Por exemplo, para coord. cartesianas 2D precisa mostrar curva x_1 (é uma reta horizontal), curva x_2 (idem), curva y_1 (reta vertical), curva y_2 (idem), escrevendo os nomes ao lado dessas curvas; # + [markdown] slideshow={"slide_type": "subslide"} # - essa dificuldade de se fazer os gráficos será motivação nossa para aprendermos depois a fazê-los via ferramentas computacionais, o que vai ser mostrado aos poucas nessa disciplina. Começamos com gráficos de campos vetoriais na semana passada. Porém é intencional vê-los fazendo gráficos à mão (no papel ou via software de desenho) para só depois usarmos ferramentas computacionais (com gráficos gerados via programação). # + [markdown] slideshow={"slide_type": "fragment"} # Dúvidas dos alunos ? # # Uso de papel para desenhos, ou softwares de desenho, ou GeoGebra, etc. # # Faltou tempo, só um aluno que começou a mostrar a resolução. # Isso será continuado na aula seguinte.
Aulas/Aula_20210706/Aula_EletromagI_20210706.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MSDA Jupyterhub server # # ## What is Jupyterhub? # # Jupyterhub is a multi-user jupyter environment # # One person (me and research IT team) deals with installation and management of infrastructure # # Many people can use the server under their own account # # # ## Features (of our server) # # No individual installation necessary -- point your browser to the right url and you are good to go # # Hosted on AWS and "always on" # # Will scale to a more powerful machine during seminars so we can all use it at the same time (will scale back down on other days) # # Will have attached GPUs for accelerated model training, especially useful when we get to deep learning # # ## Usage # # You can use it too! How? # # 1. Send me a message with your desired username. This should be one word, all lowercase. E.g. my username is `slyon` # 2. Once I give you the OK, point your browser to https://172.16.58.3 # - You will get a warning about an unsecure certificate. This is expected and temporary. For now do what your browser needs you to do in order to accept the warnings and visit the site anyway # - This is a temporary access url that will be replaced by something more formal this week # 3. Sign in with the username you provided in step (1) and the password I sent out in slack # 4. You should land on a page that looks like this: ![jlab_landing](./jlab_landing.png) # 5. Open a terminal by clicking the terminal icon. You should see something like this ![terminal](terminal.png) # 6. Change the password by ... # - Enter the command `passwd` followed by Return or Enter # - Then enter the original password I sent you in slack. No characters will appear as you type, but it is working! # - Then enter your new password two times, pressing enter in between # 7. Enjoy! # # ## Getting these materials # # After logging in to the server you can automatically fetch the latest version of the materials using this link: https://172.16.58.3/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fsglyon%2FUCF-MSDA-workshop&urlpath=lab%2Ftree%2FUCF-MSDA-workshop%2FREADME.md # # I will post in slack also so you can click it and immediately pull all the latest materials from github # # In future sessions you can click the same link and it will grab any new materials you don't have yet
Year19-20/2019-07-15__sklearn/02_aws.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/worldbank/OpenNightLights/blob/master/onl/tutorials/mod4_2_histograms.ipynb) # # # Histograms # # Histograms are another very common and useful graph to create and can show important shifts in the distribution of data, such as nighttime light intensity. # # In this exercise, we'll look at Berlin, Germany. And as in {doc}`mod4_1_time_series_charts`, we'll need to extract the data from the raster. # # **Our tasks in this exercise:** # 1. Extract data for Berlin from a 2019 VIIRS-DNB composite and convert it to a numpy array # 2. Create a histogram of VIIRS data for Berlin in Dec 2019 and compare to Dec 2018 # 3. Create a histogram of DMSP-OLS data for Berlin in 1992 and 2013 # # ## Extract data from a region around Atlanta # # Let's define a region around the metro area of Atlanta, Georgia and we'll get a composite of VIIRS-DNB `avg_rad` representing the median radiance for 2019. # # We'll visualize it quickly to get our bearings (clipping our VIIRS layer to our rectangle). # + import numpy as np # reminder that if you are installing libraries in a Google Colab instance you will be prompted to restart your kernal try: import geemap, ee import seaborn as sns import matplotlib.pyplot as plt except ModuleNotFoundError: if 'google.colab' in str(get_ipython()): print("package not found, installing w/ pip in Google Colab...") # !pip install geemap seaborn matplotlib else: print("package not found, installing w/ conda...") # !conda install mamba -c conda-forge -y # !mamba install geemap -c conda-forge -y # !conda install seaborn matplotlib -y import geemap, ee import seaborn as sns import matplotlib.pyplot as plt # + try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() # define region of Berline berlin = ee.Feature(ee.FeatureCollection( "FAO/GAUL_SIMPLIFIED_500m/2015/level1").filter(ee.Filter.eq('ADM1_NAME', 'Berlin')).first()).geometry() # viirs for December 2019 viirsDec2019 = ee.ImageCollection("NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG").filterDate('2019-12-01','2019-12-31').select('avg_rad').first() berlinMap = geemap.Map() berlinMap.centerObject(berlin, zoom=10) berlinMap.addLayer(viirsDec2019.clip(berlin), {'min':1,'max':20}, opacity=.6) berlinMap # - # ### Extracting data to array # We'll use `geemap`'s `sampleRectangle()` method to extract an array of radiance values from pixels in our raster within our region and use this to create our numpy array from the `avg_rad` band we've already selected. dec19arr = np.array(viirsDec2019.sampleRectangle(region=berlin).get('avg_rad').getInfo()) # ### Plot histogram # # Once again, we use `seaborn` to plot a histogram with our array. # + # first, we flatten our array to a 1-d array for the plot data = dec19arr.flatten() fig, ax = plt.subplots(figsize=(15,5)) sns.histplot(data, bins=100, label='Per-pixel radiance',legend=True, ax=ax) plt.legend(fontsize=20) plt.title('Distribution of VIIRS Nov 2019 nighttime lights (Berlin)', fontsize=20); # - # Here we have a histogram of the region. We see that the vast majority of values are close to zero while there seems to be some extreme values past 100 (Watts/cm2/sr). # # Often with such an extreme skew, it's helpful to visualize by applying a logscale, which we can do with `numpy`'s `.log()` function. # + # logscale the data data = np.log(dec19arr).flatten() fig, ax = plt.subplots(figsize=(15,5)) sns.histplot(data, bins=100, label='Per-pixel radiance',legend=True, ax=ax) plt.legend(fontsize=20) plt.title('Distribution of VIIRS Dec 2019 nighttime lights Berlin (logscale)', fontsize=20); # - # This let's us see the subtlety of the distribution a bit more. We can see somewhat of a bi-model distribution: the large tendency near zero, but another concentration higher up. # # This may be useful in looking at trends, because we can see if the distribution changes. Let's compare this distribution from Dec 2019 to that of the previous year, Dec 2018. # # We'll use `seaborn`'s `kdeplot()`, which plots the probability density smoothed with a Gaussian kernel...it makes it a bit easier to compare multiple distributions. # + viirsDec18 = ee.ImageCollection("NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG").filterDate('2018-12-01','2018-12-31').select('avg_rad').first() dec18arr = np.array(viirsDec18.sampleRectangle(region=berlin).get('avg_rad').getInfo()) fig, ax = plt.subplots(figsize=(15,5)) sns.kdeplot(np.log(dec19arr).flatten(), label='Dec 2019',legend=True, ax=ax) sns.kdeplot(np.log(dec18arr).flatten(), label='Dec 2018',legend=True, ax=ax) plt.legend(fontsize=20) plt.title('Distribution of VIIRS Dec 2019 vs 2020 nighttime lights (logscale)', fontsize=20); # - # 2019 seems to show a shift rightward for the right-most mode of our distribution. As we get data in for 2020, it would be interesting to compare this. # # We're looking at an entire region, but you can also think about ways to compare urban vs suburban change by creating masks for urban core versus suburban areas. And with distributions, you can use statistical tests of variance to support analytical comparisons. # # ## Histogram of DMSP-OLS for Berlin in 2013 # # Because we'd need to calibrate for DMSP-OLS comparisons across satellites we won't compare years, but we can look at a histogram for 2013. # + # get annual composites for 2013, stable lights dmsp13 = ee.ImageCollection("NOAA/DMSP-OLS/NIGHTTIME_LIGHTS").filterDate('2013-01-01','2013-12-31').select('stable_lights').first() # extract data to array arr13 = np.array(dmsp13.sampleRectangle(region=berlin).get('stable_lights').getInfo()) # - berlinMap2 = geemap.Map() berlinMap2.centerObject(berlin, zoom=10) berlinMap2.addLayer(dmsp13.clip(berlin), {'min':0,'max':63}, opacity=.6) berlinMap2 # You can see that the lower resolution and saturation issue with DMSP-OLS means this distribution likely wont be as dynamic. print(f'VIIRS-DNB composite for Berlin has {dec19arr.size} datapoints') print(f'DMSP-OLS composite for Berlin has {arr13.size} datapoints') fig, ax = plt.subplots(figsize=(15,5)) sns.kdeplot(np.log(arr13).flatten(), legend=False, ax=ax) plt.title('Distribution of DMSP-OLS 2013 nighttime lights Berlin (logscale)', fontsize=20); # We have a lot of saturated pixels, which would explain our high concentration at the high end of the range. # ## References: # ```{bibliography} ../references.bib # :filter: docname in docnames # ```
onl/tutorials/mod4_2_histograms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluating KNetworks # + import matplotlib.pyplot as plt from knetworks import knetworks, Network import numpy as np from testdata.generator import generateFromGenFile import torch import os from sklearn.model_selection import KFold # %matplotlib inline # %config InlineBackend.figure_format='retina' if torch.cuda.is_available(): train_on = "cuda" else: train_on = "cpu" device = torch.device("cuda") # I AM SPEEEEED # - # ## Read in the features and the user data # We use a dataset of 19 users and over 500 shopping lists. # + filename = os.getcwd() + "/../" + "allproducts2.txt" #may not work for windows with open(filename, "r") as file: f = file.read() products = f.split("\n") products = [p.upper() for p in products] features = [] vectors = [] data = [[] for i in range(19)] # warning #cleancode for f in range(1, 20): file = open("testdata/gu_" + str(f) + ".txt", "r") lines = file.readlines() features = sorted(str(lines[0]).replace(" ", "").upper().strip().split(",")) vect = [] for l in range(len(lines)-1): data[f-1].append([]) vec = lines[l+1].upper().split(",") for i in range(len(vec)): if int(float(vec[i])) == 1: data[f-1][l].append(features[i]) vectors.append(np.array(vect)) vectors = np.array([np.array([np.zeros(len(products), dtype=np.int) for gr_list in range(len(data[user]))]) for user in range(len(data))]) for i,x in enumerate(data): for j,y in enumerate(x): for k,f in enumerate(products): if f in y: vectors[i][j][k] = 1 # - # ## Kmeans Test # This cell plots a scatter plot with two features of the users and the calculated centroids. i_butter = products.index("BUTTER") i_apple = products.index("APFEL") meanVecs = [] for u in vectors: meanVec = np.mean(u, axis=0) meanVecs.append(meanVec) meanVecs = np.array(meanVecs) meanVecs.shape plt.scatter(meanVecs[:,i_butter], meanVecs[:,i_apple], marker="x", s=90, c=knet.km.affiliations) plt.xlabel("bought butter") plt.ylabel("bought apple(s)") plt.scatter(knet.km.centroids[:,i_butter], knet.km.centroids[:,i_apple], c=[0,1,2,3,4,5])#why is the purple dot not in the middle of the purple crosses? # ## The eval functions # The following are functions used to evaluate the performance of the trained model. def eval1(knet): r_gs = [] f_ps = [] f_ns = [] t_ps = [] predictions = [] n = len(products) for user in range(19): prediction = np.array(np.squeeze(knet.predict(vectors[user][:-4,:], future=1))) # the prediction for user wrong_guesses = np.sum(np.absolute((np.absolute(np.round(prediction)) - vectors[user][-4,:]))) # the right guessed features for one receipt into the future false_postives = np.sum(np.maximum((np.absolute(np.round(prediction)) - vectors[user][-4,:]), 0)) false_negatives = np.sum(np.maximum((vectors[user][-4,:] - np.absolute(np.round(prediction))), 0)) true_positives = np.sum((np.round(prediction) + vectors[user][-4,:])//2) right_guesses = n - wrong_guesses true_negatives = right_guesses - true_positives r_gs.append(right_guesses) f_ps.append(false_postives) f_ns.append(false_negatives) t_ps.append(true_positives) predictions.append(prediction) predictfourlast = np.array(np.squeeze(knet.predict(vectors[user][:-8,:], future=4))) predictionfourlasts.append(predictfourlast) #print(false_postives, false_negatives) #print("["+ str(user) + "] Accuracy: " + str(len(products)-int(wrong_guesses)) + "/" + str(len(products)) + " (" + str((len(products)-int(wrong_guesses))/len(products)) + ")") m_r_gs = np.mean(r_gs) m_f_ps = np.mean(f_ps) m_f_ns = np.mean(f_ns) m_t_ps = np.mean(t_ps) acc = m_r_gs / n recall = m_t_ps / (m_t_ps + m_f_ns) precision = m_t_ps / (m_t_ps + m_f_ps) print("TP,FP,FN: ", m_t_ps, m_f_ps, m_f_ns) print("Accuracy: ", acc) print("True positive rate (recall) (correctly predicted of all YES values): ", recall) print("Precision (correctly predicted of all YES predicted): ", precision) return acc, recall, precision, predictions def eval2(knet, u1=0, u2=19): r_gs = [] f_ps = [] f_ns = [] t_ps = [] predictions = [] n = len(products) for user in range(u1,u2): prediction = np.array(np.squeeze(knet.predict(vectors[user][:-5,:], future=1)+0.15)) # the prediction for user wrong_guesses = np.sum(np.absolute((np.absolute(np.round(prediction)) - vectors[user][-5,:]))) # the right guessed features for one receipt into the future false_postives = np.sum(np.maximum((np.absolute(np.round(prediction)) - vectors[user][-5,:]), 0)) false_negatives = np.sum(np.maximum((vectors[user][-5,:] - np.absolute(np.round(prediction))), 0)) true_positives = np.sum((np.round(prediction) + vectors[user][-5,:])//2) right_guesses = n - wrong_guesses true_negatives = right_guesses - true_positives r_gs.append(right_guesses) f_ps.append(false_postives) f_ns.append(false_negatives) t_ps.append(true_positives) predictions.append(prediction) predictfourlast = np.array(np.squeeze(knet.predict(vectors[user][:-8,:], future=4))) predictionfourlasts.append(predictfourlast) #print(false_postives, false_negatives) #print("["+ str(user) + "] Accuracy: " + str(len(products)-int(wrong_guesses)) + "/" + str(len(products)) + " (" + str((len(products)-int(wrong_guesses))/len(products)) + ")") m_r_gs = np.mean(r_gs) m_f_ps = np.mean(f_ps) m_f_ns = np.mean(f_ns) m_t_ps = np.mean(t_ps) acc = m_r_gs / n recall = m_t_ps / (m_t_ps + m_f_ns) precision = m_t_ps / (m_t_ps + m_f_ps) print("TP,FP,FN: ", m_t_ps, m_f_ps, m_f_ns) print("Accuracy: ", acc) print("True positive rate (recall) (correctly predicted of all YES values): ", recall) print("Precision (correctly predicted of all YES predicted): ", precision) return acc, recall, precision, predictions # ## Testing KNetworks # The following function trains KNetworks with $k=1\dots15$ and plots the performances. # We divide the dataset in a training (`vectors[:][:-4,:]`) and a test (`vectors[:][-4:,:]`) dataset. # The four last list of every user are not feeded into the network during training and are beeing predicted. # The `eval1()` function evaluates the model based on this prediction. performances = [] predictions = [] predictionfourlasts = [] def test_diffrent_ks(): for i in [1,2,3,4,5,6,7,8,10,12,15]: knet = knetworks(i, vectors[:][:-4][:], len(products), device) knet.fit() knet.train(round(500/np.sqrt(i)),1)#not the same amount for every network, to be a little more fair #training time is now O(sqrt(k)) print("k: ", i) acc, rec, prec, preds = eval1(knet) print("\n") performances.append([i,acc,rec,prec]) predictions.append(preds) torch.cuda.empty_cache() return np.array(performances) performances = test_diffrent_ks() # calulate the confidence confidencess = [] for k in range(11): confidences = [] for user in range(19): pos = predictions[k][user][np.round(predictions[k][user]) == 1] if len(pos) > 0: confidences.append(np.sum(pos) / len(pos)) #print("k =", k ,":", np.sum(np.array(confidences)) / len(confidences)) confidencess.append(np.sum(np.array(confidences)) / len(confidences)) def plot_performances(performances): plt.plot(performances[:,0], performances[:,1]) plt.plot(performances[:,0], performances[:,2]) plt.plot(performances[:,0], performances[:,3]) plt.plot(performances[:,0], confidencess) plt.xlabel("k") plt.legend(['Accuracy', 'Recall', "Precision", "Confidence"], loc=4) plt.savefig("performances.png") plt.show() plot_performances(np.array(performances)) predictionfourlasts = np.array(predictionfourlasts).reshape(11,19,4,333) def plotForK(k): f, axes = plt.subplots(19, 5, figsize=(8, 12)) X1 = np.linspace(0, 5, 5) X2 = np.linspace(0, 4, 4) X3 = np.linspace(3, 5, 2) X4 = np.linspace(0,5,5) for user in range(19): summ = np.sum(np.round(predictionfourlasts[k][user]), axis=0) interests = np.where(summ > 0) for j in range(5): future = predictions[k][user][j] if len(interests[0]) >= 5: predictionsFour = np.append(predictionfourlasts[k][user][:,interests[0][j]], future) gt = vectors[user][-8:-3,interests[0][j]] else: predictionsFour = np.append(predictionfourlasts[k][user][:,j], future) gt = vectors[user][-8:-3,j] axes[user][j].plot(X1, predictionsFour, c="green") axes[user][j].plot(X1, gt, c="red") axes[user][j].axvline(4, 0, 1) axes[user][j].plot(X1, np.round(predictionsFour), c="purple") f.legend(['Predictions', 'Ground truth', 'End of training dataset', 'Rounded prediction'], bbox_to_anchor=(0., 0, 1, .102), loc='lower left', ncol=2, mode="expand", borderaxespad=0.) f.savefig(str(k) + "_plot.png") for k in range(11): plotForK(k) # ## Testing KNetworks using cross-validation (KFold) (TODO) X = [0,1,2,3,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19] kf = KFold(n_splits=8, random_state=None, shuffle=False) folds = [] tests = [] for train_index, test_index in kf.split(X): folds.append(train_index) tests.append(test_index) print("TRAIN:", train_index, "TEST:", test_index) for fold, test in zip(folds, tests): print("test:", test[0], "-" , test[-1]) knet = knetworks(8, vectors[:test[0]][:-4], len(products), device) knet.fit() knet.train(500,1) for t in test: eval2(knet, t, t+1) print("\n") eval1(knet) #knet.save("saves") # # Training a good model for the webapp knet = knetworks(7, vectors, len(products), device) knet.fit() knet.train(500,1) knet.save("saves")
webapp/precommender/KNetworks_Evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import os import copy import matplotlib.pyplot as plt # ### Build # ### Run # ###### Running the cell below will generate the sensorfusion.txt file, which is needed for the rest of the analysis. I have included the file already; so, I made this a markup cell. If you have set up the environment, and wish to run it, please convert to code cell. # import subprocess # command = "D:/code/C++/RT-Cadmium-FDD/top_model/main.exe" # completed_process = subprocess.run(command, shell=False, capture_output=True, text=True) # #print(completed_process.stdout) # # ### Read from file # + fileName = "SensorFusion.txt" with open(fileName, "r") as f: lines = f.readlines() faults = {"{1}": "A1", "{2}": "B1", "{3}": "C1", "{4}": "D1", "{1, 2}": "A1B1", "{1, 3}": "A1C1", "{1, 4}": "A1D1", "{2, 3}": "B1C1", "{3, 4}": "C1D1", } pure_fault_dict = {'A1': 1, 'B1': 2, 'C1': 3, 'D1': 4} fault_freq = {} for key in faults.values(): fault_freq[key] = 0 for line in lines: if (re.search("supervisor", line) == None): continue interest = re.findall("\{\d+[,]*\s*\d*\}", line)[0] fault_freq[faults[interest]] += 1 with open(fileName, "r") as f: output = f.read() generators = {'A': 0, 'B': 0, 'C': 0, 'D': 0} for key in generators.keys(): generators[key] = len(re.findall("faultGen" + key, output)) values = list(fault_codes.values()) keys = list(fault_codes.keys()) # - generators fault_freq def sumFromSupervisor(code): ''' Returns the number of times faults associated with a particular pure fault (the parameter) were output by the supervisor @param code: string @return int ''' sum = 0 for key, value in fault_freq.items(): if code in key: sum += value; return sum; # ### ANALYSIS / VERIFICATION # #### Definitions # **Pure Fault**: Faults from a single generator. # **Compound Faults**: Faults formed from the combination of pure faults. # ### Premise # Fault $A1$: Should have no discarded entry, because it has the highest priority # Fault $B1$: Should have some discarded value, for the case $BD$, which is not available # Fault $C1$: Higher percentage of discarded cases than $C$, because of its lower priority # Fault $D1$: Highest percentage of discarded cases, because it has the lowest priority # Generator $output_{A1} = n({A1}) + n({A1} \cap {B1}) + n({A1} \cap {C1}) + n({A1} \cap {D1}) + discarded_{A1}$ # Generator $output_{B1} = n({B1}) + n({A1} \cap {B1}) + n({B1} \cap {C1}) + discarded_{B1}$ # Generator $output_{C1} = n({C1}) + n({A1} \cap {C1}) + n({B1} \cap {C1}) + n({C1} \cap {D1}) + discarded_{C1}$ # Generator $output_{D1} = n({D1}) + n({A1} \cap {D1}) + n({C1} \cap {D1}) + discarded_{D1}$ # # Where $discarded_{D1} \equiv 0$, because A has the highest priority # The sole reason for the existence of a discarded $B1$ fault is the priority system. It happens at line $226$, time $00:00:05:580$, where fault generators $A1$ and $B1$ send their signals simultaneously, and state $A1$ alone gets set to true, because by the priority system, the external transition function returns immediately after the first loop it hits, which in this case, is the $A1$ loop. If you remove the return statements in the loops of the external transition function, all $B1$ faults are handled, and $discarded_{B1}$ becomes $0$. # ### Discarded discarded = {} discarded['A'] = generators['A'] - sumFromSupervisor("A1") discarded['B'] = generators['B'] - sumFromSupervisor("B1") discarded['C'] = generators['C'] - sumFromSupervisor("C1") discarded['D'] = generators['D'] - sumFromSupervisor("D1") discarded #plt.title('Discarded Bar') plt.bar(discarded.keys(), discarded.values()) plt.grid() #plt.show() plt.savefig('discarded bar.png', format='png') # + keys, values = list(discarded.keys()), list(discarded.values()) legend_keys = copy.copy(keys) for i in range(len(keys)): legend_keys[i] = str(legend_keys[i]) + " = " + str(values[i]) # Remove wedgeprops to make pie wedges, texts = plt.pie(values, textprops=dict(color="w"), wedgeprops=dict(width=0.5)) plt.legend(wedges, legend_keys, title="Fault Codes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1)) #plt.title("Discarded Pie") #plt.show() plt.savefig('discard pie.png', format='png') # - # ### Single Run # + chart_data = copy.copy(fault_freq) values = list(chart_data.values()) keys = list(chart_data.keys()) plt.title('Single-Run') plt.bar(keys, values) plt.grid() plt.show() #plt.savefig('single-run bar.png') # + # Remove wedgeprops to make pie wedges, texts = plt.pie(values, textprops=dict(color="w"), wedgeprops=dict(width=0.5)) legend_keys = copy.copy(keys) for i in range(len(keys)): legend_keys[i] = str(legend_keys[i]) + " " + str(values[i]) + " " + "times" plt.legend(wedges, legend_keys, title="Fault Codes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1)) plt.title("Single-Run") plt.show() #plt.savefig('single-run pie.png') # - # ### Bar Chat #plt.title('Sensor Fusion') plt.bar(keys, values) plt.grid() #plt.show() plt.savefig('single-run pie.png') # ### Doughnut # + legend_keys = copy.copy(keys) for i in range(len(keys)): legend_keys[i] += " with frequency " + str(values[i]) # Remove wedgeprops to make pie wedges, texts = plt.pie(values, textprops=dict(color="w"), wedgeprops=dict(width=0.5)) plt.legend(wedges, legend_keys, title="Fault Codes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1)) plt.title("Sensor Fusion") plt.show() # - # ### Internal Transition # + from graphviz import Digraph dot = Digraph(node_attr={'shape': 'box'}, format='png', filename='internal transition.png', graph_attr = {'splines': 'ortho'}) dot.edge_attr.update(arrowhead='vee', arrowsize='1') dot.node('S', shape='oval', label='Start') dot.node('0', label='state.idle = false') dot.node('1', label='Reset all fault states\nto 0') dot.node('E', shape='oval', label='End') dot.edge('S', '0') dot.edge('0', '1') dot.edge('1', 'E') dot.save() dot.render(view=False) dot # - # ### External Transition # + from graphviz import Digraph dot = Digraph(node_attr={'shape': 'box'}, format='png', filename='external transition.png', graph_attr = {'splines': 'ortho'}) dot.edge_attr.update(arrowhead='vee', arrowsize='1') dot.node('S', shape='oval', label='Start') dot.node('0', label='state.idle = false') dot.node('1', label='Save only the output of the\ngenerator of highest priority\nthat produced an output\nin corresponding state variables,\nand ignore any other that\nproduced an output simultaneously') dot.node('E', shape='oval', label='End') dot.edge('S', '0') dot.edge('0', '1') dot.edge('1', 'E') dot.save() dot.render(view=False) dot # - # ### Output # + dot = Digraph(node_attr={'shape': 'box'}, format='png', filename='output.png') dot.edge_attr.update(arrowhead='vee', arrowsize='1') dot.node('S', shape='oval', label='Start') dot.node('0', label='Get first pure\nfault state\nand\nput into key') dot.node('1', label='key = "" & fault_key = ""') dot.node('2', label='Is this\nfault present?', shape='diamond') dot.node('3', label='Print to cout\nthat the fault\nis present') dot.node('4', label='Increment the\nfrequency of the\nfault') dot.node('5', label='Print to cout\nthat a new fault type\nis present') dot.node('6', label='Add new fault\ntype to fault\nlist with frequency of 1') dot.node('7', label='Fault_key = key') dot.node('8', label='Push fault_key into message bag') dot.node('9', label='More\nfaults?', shape='diamond') dot.node('11', label='Move to next fault') dot.node('12', label='Is this\nfault present?', shape='diamond') dot.node('13', label='Print to cout\nthat the fault\nis present') dot.node('14', label='Increment the\nfrequency of the\nfault') dot.node('15', label='Print to cout\nthat a new fault type\nis present') dot.node('16', label='Add new fault\ntype to fault\nlist with frequency of 1') dot.node('17', label='Is key C1\nor D1?', shape='diamond') dot.node('18', label='Is fault_key\nlength < 4?', shape='diamond') dot.node('19', label='Fault_key = fault_key + key') dot.node('20', label='Push fault_key into message bag') dot.node('21', label='Is fault_key\npresent in\nfault list?', shape='diamond') dot.node('22', label='Print to cout that fault_key is present') dot.node('23', label='Increment fault frequency') dot.node('24', label='Print to cout\nthat a new fault type\nis present') dot.node('25', label='Add new fault\ntype to fault\nlist with frequency of 1') dot.node('26', label='Save fault codes') dot.node('27', label='Return message bag') dot.node('E', shape='oval', label='End') dot.edge('S', '0') dot.edge('0', '1') dot.edge('1', '2') dot.edge('2', '3', 'Yes') dot.edge('3', '4') dot.edge('2', '5', 'No') dot.edge('5', '6') dot.edge('6', '7') dot.edge('4', '7') dot.edge('7', '8') dot.edge('8', '9') dot.edge('9', '11') dot.edge('11', '12') dot.edge('12', '13', 'Yes') dot.edge('13', '14') dot.edge('12', '15', 'No') dot.edge('15', '16') dot.edge('14', '17') dot.edge('16', '17') dot.edge('17', '18') dot.edge('18', '19') dot.edge('19', '20') dot.edge('20', '21') dot.edge('21', '22', 'Yes') dot.edge('22', '23') dot.edge('21', '24', 'No') dot.edge('24', '25') dot.edge('23', '26') dot.edge('25', '26') dot.edge('26', '27') dot.edge('27', 'E') dot.save() dot.render(view=False) dot # -
Sensor Fusion/.ipynb_checkpoints/sensor_fusion-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Check data from hubs # Testing the 29 limit proximity scans # # Badges - # 32 badges with older version # One badge with new version # 5 beacons (with old version, doesn’t matter) # # I started the hub, activated the badges, then marke all old version badges as active and restarted the hub (so that there is no issue with pulling data) # # # Had some issues with other badges around. Had to restart the whole thing and change the project id to 13 # # tests: # ~14:16- - 32 badges, 5 beacons, all very close. Expecting to see at least 4 beacons in a scan # ~14:22 - 32 badges very close, 5 beacons further away. Expecting to see only 4 # ~14:24 ~ - moved them even further away. Expecting to see only 4 # ~14:27~ - Only 2 beacons very close, 3 are turned off. Expecing to see 2 # ~14:30~ - turn off all # # # # The badge that was used for the data itself: # FA:6F:8C:0C:39:0D, E92D598WEX # # # + #import sys # #!{sys.executable} -m pip install sklearn # + from __future__ import absolute_import, division, print_function import pandas as pd import numpy as np import pytz import gzip import os # Import the data analysis tools import openbadge_analysis as ob import openbadge_analysis.preprocessing # + # Matplotlib for additional customization from matplotlib import pyplot as plt # %matplotlib inline # Seaborn for plotting and styling import seaborn as sns # + # Settings time_zone = 'US/Eastern' # Your local timezone hublog_time_zone = 'UTC' log_version = '2.0' time_bins_size = '15S' # For proximity scans folder = '../data/raw/20181121_test_fw3_proximity/' proximity_filename = folder+'proximity_archive.txt.gz' audio_filename = folder+'audio_archive.txt.gz' hublog_filename = folder+'hub.log.gz' # - # # Load data with gzip.open(proximity_filename, 'r') as f: m2badge = ob.preprocessing.member_to_badge_proximity(f, time_bins_size, tz=time_zone) m2badge = m2badge.reset_index() m2badge.head() # # Calc data m2badge['is_beacon'] = m2badge.apply(lambda row: 1 if row.observed_id >= 16000 else 0 ,axis=1) m2badge.groupby('datetime')['is_beacon'].sum().plot() # # plot number of beacons and RSSIs #rssi_means = m2badge.groupby(['datetime','is_beacon'])[['rssi']].mean() #rssi_means #ax = sns.lineplot(rssi_means) ax = sns.lineplot(x="datetime", y="rssi", hue="is_beacon", style="is_beacon", data=m2badge)
notebooks/20181121_test_fw3_proximity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Example of training GNNs for Node Classification # GRB provides easy-to-use APIs to train GNNs, facilitating the entire process from loading graph data, building GNN models, to evaluation and inference. Here is an example for the task of graph classification. # Contents # - [Load Dataset](#Load-Dataset) # - [Build Model](#Build-Model) # - [Training](#Training) # - [Inference](#Inference) # - [Evaluation](#Evaluation) # + tags=[] import os import torch import grb.utils as utils # + [markdown] tags=[] # ## 1.1. Load Dataset # - # GRB datasets are named by the prefix *grb-*. There are four *mode* ('easy', 'medium', 'hard', 'full') for test set, representing different average degrees of test nodes, thus different difficulty for attacking them. The node features are processed by *arctan* normalization (first standardization then arctan function), which makes node features fall in the same scale. # + tags=[] from grb.dataset import Dataset dataset_name = "grb-cora" dataset = Dataset(name=dataset_name, data_dir="../data/", mode="full", feat_norm="arctan") # + [markdown] tags=[] # ## 1.2. Build Model # - # GRB supports models based on pure Pytorch, CogDL or DGL. The following is an example of GCN implemented by pure Pytorch. Other models can be found in ``grb/model/torch``, ``grb/model/cogdl``, or ``grb/model/dgl``. # + [markdown] tags=[] # ### 1.2.1. GCN (Graph Convolutional Network) # + tags=[] from grb.model.torch import GCN from grb.utils.normalize import GCNAdjNorm model_name = "gcn" model = GCN(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, adj_norm_func=GCNAdjNorm, layer_norm=True, residual=False, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.2. GAT (Graph Attention Network) # + tags=[] from grb.model.dgl import GAT model_name = "gat" model = GAT(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, n_heads=4, adj_norm_func=None, layer_norm=False, residual=False, feat_dropout=0.6, attn_dropout=0.6, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.3. APPNP (Approximated Personalized Propagation of Neural Predictions) # + tags=[] from grb.model.torch import APPNP from grb.utils.normalize import GCNAdjNorm model_name = "appnp" model = APPNP(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=128, n_layers=3, adj_norm_func=GCNAdjNorm, layer_norm=False, edge_drop=0.1, alpha=0.01, k=3, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.4. GIN (Graph Isomorph Network) # + tags=[] from grb.model.torch import GIN model_name = "gin" model = GIN(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, adj_norm_func=None, layer_norm=False, batch_norm=True, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.5. GraphSAGE # + tags=[] from grb.model.torch import GraphSAGE from grb.utils.normalize import SAGEAdjNorm model_name = "graphsage" model = GraphSAGE(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, adj_norm_func=SAGEAdjNorm, layer_norm=False, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.6. SGCN (Simplified Graph Convolutional Network) # + tags=[] from grb.model.torch import SGCN from grb.utils.normalize import GCNAdjNorm model_name = "sgcn" model = SGCN(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, adj_norm_func=GCNAdjNorm, k=4, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.7. TAGCN (Topological Adaptive Graph Convolutional Network) # + tags=[] from grb.model.torch import TAGCN from grb.utils.normalize import GCNAdjNorm model_name = "tagcn" model = TAGCN(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, adj_norm_func=GCNAdjNorm, k=2, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ### 1.2.8. MLP (Multi-layer Perceptron) # + tags=[] from grb.model.torch import MLP model_name = "mlp" model = MLP(in_features=dataset.num_features, out_features=dataset.num_classes, hidden_features=64, n_layers=3, dropout=0.5) print("Number of parameters: {}.".format(utils.get_num_params(model))) print(model) # + [markdown] tags=[] # ## 1.3. Training # - # GRB provides ``grb.utils.trainer`` that facilitates the training process of GNNs. The training mode can be chosen from ``inductive`` or ``transductive``. In the inductive mode, only train nodes can be seen during training, train+val nodes can be seen during validation, train+val+test nodes can be seen during testing. In the transductive mode, all nodes are available for each process. # + tags=[] save_dir = "./saved_models/{}/{}".format(dataset_name, model_name) save_name = "model.pt" device = "cuda:0" feat_norm = None train_mode = "inductive" # "transductive" # + tags=[] from grb.trainer.trainer import Trainer trainer = Trainer(dataset=dataset, optimizer=torch.optim.Adam(model.parameters(), lr=0.01), loss=torch.nn.functional.cross_entropy, lr_scheduler=False, early_stop=True, early_stop_patience=500, feat_norm=feat_norm, device=device) # + tags=[] trainer.train(model=model, n_epoch=2000, eval_every=1, save_after=0, save_dir=save_dir, save_name=save_name, train_mode=train_mode, verbose=False) # - # ## 1.4. Inference # + tags=[] model = torch.load(os.path.join(save_dir, save_name)) model = model.to(device) model.eval() # + tags=[] # by trainer pred = trainer.inference(model) # + tags=[] # by utils pred = utils.inference(model, features=dataset.features, feat_norm=feat_norm, adj=dataset.adj, adj_norm_func=model.adj_norm_func, device=device) # - # ## 1.5. Evaluation # + tags=[] # by trainer test_score = trainer.evaluate(model, dataset.test_mask) print("Test score: {:.4f}".format(test_score)) # + tags=[] # by utils test_score = utils.evaluate(model, features=dataset.features, adj=dataset.adj, labels=dataset.labels, feat_norm=feat_norm, adj_norm_func=model.adj_norm_func, mask=dataset.test_mask, device=device) print("Test score: {:.4f}".format(test_score)) # - # For further information, please refer to the [GRB Documentation](https://grb.readthedocs.io/en/latest/).
examples/node_classification/training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # [ATM 623: Climate Modeling](../index.ipynb) # [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany # # Lecture 17: Ice albedo feedback in the EBM # ### About these notes: # # This document uses the interactive [`IPython notebook`](http://ipython.org/notebook.html) format (now also called [`Jupyter`](https://jupyter.org)). The notes can be accessed in several different ways: # # - The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware # - The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb) # - A complete snapshot of the notes as of May 2015 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html). # # Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab # ## Contents # # 1. [Interactive snow and ice line in the EBM](#section1) # 2. [Polar-amplified warming in the EBM](#section2) # 3. [Effects of diffusivity in the annual mean EBM with albedo feedback](#section3) # 4. [Diffusive response to a point source of energy](#section4) # ____________ # <a id='section1'></a> # # ## 1. Interactive snow and ice line in the EBM # ____________ # # ### The annual mean EBM # # the equation is # # $$ C(\phi) \frac{\partial T_s}{\partial t} = (1-\alpha) ~ Q - \left( A + B~T_s \right) + \frac{D}{\cos⁡\phi } \frac{\partial }{\partial \phi} \left( \cos⁡\phi ~ \frac{\partial T_s}{\partial \phi} \right) $$ # # # # ### Temperature-dependent ice line # # Let the surface albedo be larger wherever the temperature is below some threshold $T_f$: # # $$ \alpha\left(\phi, T(\phi) \right) = \left\{\begin{array}{ccc} # \alpha_0 + \alpha_2 P_2(\sin\phi) & ~ & T(\phi) > T_f \\ # a_i & ~ & T(\phi) \le T_f \\ # \end{array} \right. $$ # # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import climlab # for convenience, set up a dictionary with our reference parameters param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.} model1 = climlab.EBM_annual( num_lat=180, D=0.55, **param ) print model1 # Because we provided a parameter `ai` for the icy albedo, our model now contains several sub-processes contained within the process called `albedo`. Together these implement the step-function formula above. # # The process called `iceline` simply looks for grid cells with temperature below $T_f$. print model1.param def ebm_plot( model, figsize=(8,12), show=True ): '''This function makes a plot of the current state of the model, including temperature, energy budget, and heat transport.''' templimits = -30,35 radlimits = -340, 340 htlimits = -7,7 latlimits = -90,90 lat_ticks = np.arange(-90,90,30) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(3,1,1) ax1.plot(model.lat, model.Ts) ax1.set_xlim(latlimits) ax1.set_ylim(templimits) ax1.set_ylabel('Temperature (deg C)') ax1.set_xticks( lat_ticks ) ax1.grid() ax2 = fig.add_subplot(3,1,2) ax2.plot(model.lat, model.diagnostics['ASR'], 'k--', label='SW' ) ax2.plot(model.lat, -model.diagnostics['OLR'], 'r--', label='LW' ) ax2.plot(model.lat, model.diagnostics['net_radiation'], 'c-', label='net rad' ) ax2.plot(model.lat, model.heat_transport_convergence(), 'g--', label='dyn' ) ax2.plot(model.lat, model.diagnostics['net_radiation'].squeeze() + model.heat_transport_convergence(), 'b-', label='total' ) ax2.set_xlim(latlimits) ax2.set_ylim(radlimits) ax2.set_ylabel('Energy budget (W m$^{-2}$)') ax2.set_xticks( lat_ticks ) ax2.grid() ax2.legend() ax3 = fig.add_subplot(3,1,3) ax3.plot(model.lat_bounds, model.heat_transport() ) ax3.set_xlim(latlimits) ax3.set_ylim(htlimits) ax3.set_ylabel('Heat transport (PW)') ax3.set_xlabel('Latitude') ax3.set_xticks( lat_ticks ) ax3.grid() return fig model1.integrate_years(5) f = ebm_plot(model1) model1.diagnostics['icelat'] # ____________ # <a id='section2'></a> # # ## 2. Polar-amplified warming in the EBM # ____________ # # # ### Add a small radiative forcing # # The equivalent of doubling CO2 in this model is something like # # $$ A \rightarrow A - \delta A $$ # # where $\delta A = 4$ W m$^{-2}$. # # + deltaA = 4. model2 = climlab.process_like(model1) model2.subprocess['LW'].A = param['A'] - deltaA model2.integrate_years(5, verbose=False) plt.plot(model1.lat, model1.Ts) plt.plot(model2.lat, model2.Ts) # - # The warming is polar-amplified: more warming at the poles than elsewhere. # # Why? # # Also, the current ice line is now: model2.diagnostics['icelat'] # There is no ice left! # # Let's do some more greenhouse warming: # + model3 = climlab.process_like(model1) model3.subprocess['LW'].A = param['A'] - 2*deltaA model3.integrate_years(5, verbose=False) plt.plot(model1.lat, model1.Ts) plt.plot(model2.lat, model2.Ts) plt.plot(model3.lat, model3.Ts) plt.xlim(-90, 90) plt.grid() # - # In the ice-free regime, there is no polar-amplified warming. A uniform radiative forcing produces a uniform warming. # ____________ # <a id='section3'></a> # # ## 3. Effects of diffusivity in the annual mean EBM with albedo feedback # ____________ # # # ### In-class investigation: # # We will repeat the exercise from Lecture 14, but this time with albedo feedback included in our model. # # - Solve the annual-mean EBM (integrate out to equilibrium) over a range of different diffusivity parameters. # - Make three plots: # - Global-mean temperature as a function of $D$ # - Equator-to-pole temperature difference $\Delta T$ as a function of $D$ # - Poleward heat transport across 35 degrees $\mathcal{H}_{max}$ as a function of $D$ # - Choose a value of $D$ that gives a reasonable approximation to observations: # - $\Delta T \approx 45$ ºC # # Use these parameter values: param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.} print param # ### One possible way to do this: Darray = np.arange(0., 2.05, 0.05) model_list = [] Tmean_list = [] deltaT_list = [] Hmax_list = [] for D in Darray: ebm = climlab.EBM_annual(num_lat=360, D=D, **param ) #ebm.subprocess['insolation'].s2 = -0.473 ebm.integrate_years(5., verbose=False) Tmean = ebm.global_mean_temperature() deltaT = np.max(ebm.Ts) - np.min(ebm.Ts) HT = ebm.heat_transport() #Hmax = np.max(np.abs(HT)) ind = np.where(ebm.lat_bounds==35.5)[0] Hmax = HT[ind] model_list.append(ebm) Tmean_list.append(Tmean) deltaT_list.append(deltaT) Hmax_list.append(Hmax) # + color1 = 'b' color2 = 'r' fig = plt.figure(figsize=(8,6)) ax1 = fig.add_subplot(111) ax1.plot(Darray, deltaT_list, color=color1, label='$\Delta T$') ax1.plot(Darray, Tmean_list, '--', color=color1, label='$\overline{T}$') ax1.set_xlabel('D (W m$^{-2}$ K$^{-1}$)', fontsize=14) ax1.set_xticks(np.arange(Darray[0], Darray[-1], 0.2)) ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=14, color=color1) for tl in ax1.get_yticklabels(): tl.set_color(color1) ax1.legend(loc='center right') ax2 = ax1.twinx() ax2.plot(Darray, Hmax_list, color=color2) ax2.set_ylabel('Poleward heat transport across 35.5$^\circ$ (PW)', fontsize=14, color=color2) for tl in ax2.get_yticklabels(): tl.set_color(color2) ax1.set_title('Effect of diffusivity on EBM with albedo feedback', fontsize=16) ax1.grid() # - # ____________ # <a id='section4'></a> # # ## 4. Diffusive response to a point source of energy # ____________ # # Let's add a point heat source to the EBM and see what sets the spatial structure of the response. # # We will add a heat source at about 45º latitude. # # First, we will calculate the response in a model **without albedo feedback**. param_noalb = {'A': 210, 'B': 2, 'D': 0.55, 'Tf': -10.0, 'a0': 0.3, 'a2': 0.078} m1 = climlab.EBM_annual(num_lat=180, **param_noalb) print m1 m1.integrate_years(5.) m2 = climlab.process_like(m1) # + point_source = climlab.process.energy_budget.ExternalEnergySource(state=m2.state) ind = np.where(m2.lat == 45.5) point_source.heating_rate['Ts'][ind] = 100. m2.add_subprocess('point source', point_source) print m2 # - m2.integrate_years(5.) plt.plot(m2.lat, m2.Ts - m1.Ts) plt.xlim(-90,90) plt.grid() # The warming effects of our point source are felt **at all latitudes** but the effects decay away from the heat source. # # Some analysis will show that the length scale of the warming is proportional to # # $$ \sqrt{\frac{D}{B}} $$ # # so increases with the diffusivity. # Now repeat this calculate **with ice albedo feedback** m3 = climlab.EBM_annual(num_lat=180, **param) m3.integrate_years(5.) m4 = climlab.process_like(m3) point_source = climlab.process.energy_budget.ExternalEnergySource(state=m4.state) point_source.heating_rate['Ts'][ind] = 100. m4.add_subprocess('point source', point_source) m4.integrate_years(5.) plt.plot(m4.lat, m4.Ts - m3.Ts) plt.xlim(-90,90) plt.grid() # Now the maximum warming **does not coincide with the heat source at 45º**! # # Our heat source has led to melting of snow and ice, which induces an additional heat source in the high northern latitudes. # # **Heat transport communicates the external warming to the ice cap, and also commuicates the increased shortwave absorption due to ice melt globally!** # <div class="alert alert-success"> # [Back to ATM 623 notebook home](../index.ipynb) # </div> # ____________ # ## Credits # # The author of this notebook is [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. # # It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php), offered in Spring 2015. # ____________ # ____________ # ## Version information # ____________ # # %install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py # %load_ext version_information # %version_information numpy, climlab
Lectures/Lecture17 -- Ice albedo feedback in the EBM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Fingerprints # # In this notebook I will be exclusively looking at the QM9 dataset. # # ### QM9 dataset # # QM9 provides quantum chemical properties for a relevant, consistent, and comprehensive chemical space of small organic molecules. This dataset consists of 130,831 molecules with 12 regression targets. # - import os import pandas as pd import numpy as np import tqdm.notebook as tqdm # + # RDkit imports import rdkit from rdkit import Chem from rdkit.Chem import Draw from rdkit.Chem.Draw import IPythonConsole #Needed to show molecules print(rdkit.__version__) Chem.WrapLogs() lg = rdkit.RDLogger.logger() lg.setLevel(rdkit.RDLogger.CRITICAL) # - qm9_data_csv = pd.read_csv(os.path.join('small_molecule_data/qm9.csv')) qm9_data_csv.head(3) qm9_data_csv.shape # The QM9 dataset from the [MoleculeNet: A Benchmark for Molecular Machine Learning](https://arxiv.org/abs/1703.00564) paper, consisting of about 130,000 molecules with multiple regression targets. # # Each molecule includes complete spatial information for the single low energy conformation of the atoms in the molecule. # # More information on each descriptor [here](https://www.kaggle.com/zaharch/quantum-machine-9-qm9) # # * [Recent paper on exploring the QM9 dataset](https://pubs.acs.org/doi/10.1021/acs.jpca.0c05969) mol_temp = qm9_data_csv.iloc[125559] mol_temp mol_obj = Chem.MolFromSmiles(mol_temp['smiles']) mol_obj # To output x y z of the molecule print(Chem.MolToMolBlock(mol_obj)) # Take a small sample from QM9 dataset QM9_df_smol = qm9_data_csv.sample(10).reset_index(drop=True) QM9_df_smol.head(2) QM9_df_smol.shape # `PandasTools` module helps add `mol` molecule objects from RDKit as per the SMILES in the dataframe from rdkit.Chem import PandasTools PandasTools.AddMoleculeColumnToFrame(QM9_df_smol, smilesCol='smiles') # Check the new `ROMol` columns being appended in the dataframe QM9_df_smol.columns QM9_df_smol['ROMol'][0] # Visualize the dataframe, add properties of interest at the bottom, you can add index too if need PandasTools.FrameToGridImage(QM9_df_smol, legendsCol='gap', molsPerRow=3, subImgSize=(200,200)) # + [markdown] tags=[] # ## Fingerprints # # Compress molecules into vectors for mathetical operations and comparisons. First we will look at `MorganFingerprint` method. For this method we have to define the radius and the size of the vector being used. More information on Morgan Fingerprints can be read at this [blogpost](https://depth-first.com/articles/2019/01/11/extended-connectivity-fingerprints/) # # * [Nice Review on this matter from <NAME>](https://pubmed.ncbi.nlm.nih.gov/20838967/) # * [Presentation by <NAME> (creator of RDkit) on Fingerprints](https://www.rdkit.org/UGM/2012/Landrum_RDKit_UGM.Fingerprints.Final.pptx.pdf) # * [Official Documentation on Fingerprints in RDkit](https://www.rdkit.org/docs/GettingStartedInPython.html#fingerprinting-and-molecular-similarity) # - # Fingerprints from rdkit.Chem import AllChem _radius = 2 _nBits = 2 ** 10 ECFP6 = [AllChem.GetMorganFingerprint(m, radius) for m in QM9_df_smol['ROMol']] len(ECFP6) # ### Types of fingerprints to consider: # # 1. Descriptor based fingerprints - more information [here](https://www.rdkit.org/docs/GettingStartedInPython.html#list-of-available-descriptors) # # 2. Count or binary-based fingerprints # # 2.1. Circular Fingerprints (Morgan) - Extended Connectivity (ECFP) # # 2.2. Atom pair # # 2.3. Torsion # # 2.4. MACCS Keys # # 2.5. RDkit # # 3. Data-driven fingerprints # # ```python # fps1 = [Chem.RDKFingerprint(x, fpSize=1024, minPath=1, maxPath=4) for x in suppl] # fps2 = [Chem.GetHashedMorganFingerprint(x, radius=2, nBits=1024) for x in suppl] # fps3 = [Chem.GetMorganFingerprint(x, radius=2, useCounts= True) for x in suppl] # fps4 = [Pairs.GetAtomPairFingerprintAsIntVect(x) for x in suppl] # arr = np.zeros((4,1024), dtype = np.int8) # for i in range(0,len(suppl)): # DataStructs.ConvertToNumpyArray(fps2[i], arr[i]) # print(arr) # ``` # + [markdown] tags=[] # ## 2. Count or binary fingerprint # - from rdkit.Chem import AllChem fp = AllChem.GetMorganFingerprintAsBitVect(mol_obj, _radius, nBits= _nBits) fp_array = [int(x) for x in fp.ToBitString()] # Pairs.GetHashedAtomPairFingerprint # GetMorganFingerprintAsBitVect # GetHashedMorganFingerprint from rdkit.Chem.AtomPairs import Pairs, Torsions fpvect1 = Pairs.GetHashedAtomPairFingerprint(mol_obj) fpvect2 = Pairs.GetAtomPairFingerprint(mol_obj) fp1 = np.zeros((1,)) fp2 = np.zeros((1,)) #DataStructs.ConvertToNumpyArray(fp_vect, fp) #print(type(fp)) DataStructs.ConvertToNumpyArray(fpvect1, fp1) DataStructs.ConvertToNumpyArray(fpvect2, fp2) fp1.shape fp2.shape from rdkit.Chem.AtomPairs import Pairs, Torsions from rdkit import Chem, DataStructs fpvect1 = AllChem.GetHashedMorganFingerprint(mol_obj, 2, nBits= 1024) fpvect2 = AllChem.GetMorganFingerprint(mol_obj, 2) fp1 = np.zeros((1,)) fp2 = np.zeros((1,)) DataStructs.ConvertToNumpyArray(fpvect1, fp1) DataStructs.ConvertToNumpyArray(fpvect2, fp2) # + from rdkit import Chem, DataStructs from rdkit.Chem import MACCSkeys from rdkit.Chem.AtomPairs import Pairs, Torsions def get_fingerprint(smiles: str, radius: int = 2, num_bits: int = 2048, use_counts: bool = False, type_fp: str = 'Morgan') -> np.ndarray: """ Generates a morgan fingerprint for a smiles string. :param smiles: A smiles string for a molecule. :param radius: The radius of the fingerprint. :param num_bits: The number of bits to use in the fingerprint. :param use_counts: Whether to use counts or just a bit vector for the fingerprint :return: A 1-D numpy array containing the morgan fingerprint. """ if type(smiles) == str: mol = Chem.MolFromSmiles(smiles) else: mol = smiles if type_fp == 'Morgan': if use_counts: fp_vect = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=num_bits) else: fp_vect = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=num_bits) if type_fp == 'MACCS': fp_vect = MACCSkeys.GenMACCSKeys(mol) if type_fp == 'RDkit': Chem.RDKFingerprint(x) fp = np.zeros((1,)) DataStructs.ConvertToNumpyArray(fp_vect, fp) return fp # - # ## Similarity # RDKit provides tools for different kinds of similarity search, including Tanimoto, Dice, Cosine, Sokal, Russel… and more. Tanimoto is a very widely use similarity search metric because it incorporates substructure matching. Here is an example ref_mol = QM9_df_smol.iloc[3]['ROMol'] ref_mol # Generate finger print based representation for that molecule ref_ECFP4_fps = AllChem.GetMorganFingerprintAsBitVect(ref_mol, radius= _radius, nBits=_nBits) QM9_smol_ECFP4_fps = [AllChem.GetMorganFingerprintAsBitVect(x, _radius, _nBits) for x in QM9_df_smol['ROMol']] from rdkit import DataStructs similarity_efcp4 = [DataStructs.FingerprintSimilarity(ref_ECFP4_fps, x) for x in QM9_smol_ECFP4_fps] QM9_df_smol = QM9_df_smol.sort_values(['Tanimoto_Similarity (ECFP4)'], ascending=False) PandasTools.FrameToGridImage(QM9_df_smol, legendsCol="Tanimoto_Similarity (ECFP4)", molsPerRow=4)
_notebooks/data/Fingerprints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a id="top"> # # Machine Learning Data Preparation Using scikit-learn # **Features Data Preparation:** # # - Numerical # - [min-max scaling](#min_max) (a.k.a. normalization) # - susceptible to outliers # - [standardization](#standardization) # - Text # - nominal (order does not matter) # - [label encode (0 to n) and then one-hot encode (matrix of 0s and 1s)](#label_1hot) # - ordinal (order does matter) # - [label encode](#label_1hot) # - document type (free-hand text) # - [CountVectorize()](#count_vectorize) # - [remove STOP WORDS to improve model accuracy](#stop_words) # - ensure such words can be safely removed # - [tfidftransform()](#tfidf) # # ** Target/Class Data Preparation:** # - Text: [LabelBinarizer](#labelbinarizer) # ## Features Data Preparation # <a id="min_max"> # ### Min-Max Scaling # **WARNING:** Be careful of outliers. Remove them if using min-max scaling # [[back to top]](#top) # Rescale data (between 0 and 1) import pandas import scipy import numpy from sklearn.preprocessing import MinMaxScaler url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data" names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] dataframe = pandas.read_csv(url, names=names) array = dataframe.values # separate array into input and output components X = array[:,0:8] Y = array[:,8] scaler = MinMaxScaler(feature_range=(0, 1)) rescaledX = scaler.fit_transform(X) # summarize transformed data numpy.set_printoptions(precision=3) print(rescaledX[0:5,:]) # <a id="standardization"> # ### Standardization # [[back to top]](#top) # Standardize data (0 mean, 1 stdev) from sklearn.preprocessing import StandardScaler import pandas import numpy url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data" names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] dataframe = pandas.read_csv(url, names=names) array = dataframe.values # separate array into input and output components X = array[:,0:8] Y = array[:,8] scaler = StandardScaler().fit(X) rescaledX = scaler.transform(X) # summarize transformed data numpy.set_printoptions(precision=3) print(rescaledX[0:5,:]) # <a id="label_1hot"> # ### Label Encode and One-Hot Encode Multiple Columns # [[back to top]](#top) import pandas as pd from sklearn.preprocessing import LabelEncoder, OneHotEncoder df = pd.read_csv('titanic_data.csv') df.head() # limit to categorical data using df.select_dtypes() X = df.select_dtypes(include=[object]).fillna('') X.columns X.head() # LabelEncoder() only accepts 1-D array, so need to use DataFrame's apply() function per this SO [question](https://stackoverflow.com/questions/24458645/label-encoding-across-multiple-columns-in-scikit-learn) to label encode across all columns: le = LabelEncoder() X_le = X.apply(le.fit_transform) X_le.head() X_le.shape # **OneHotEncoder() accepts multidimensional array, but it returns sparse matrix. Use .toarray() to obtain just the array** onehot_enc = OneHotEncoder() X_1hot = onehot_enc.fit_transform(X_le).toarray() X_1hot.shape X_1hot # Alternatively, instead of using scikit-learn's OneHotEncoder(), you can use pd.get_dummies() X_1hot2 = pd.get_dummies(data=X_le, columns=X_le.columns) X_1hot2.head() X_1hot2.shape # <a id="count_vectorize"> # ### CountVectorize() # [[back to top]](#top) # + from sklearn.datasets import fetch_20newsgroups categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med'] twenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42) # + from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(twenty_train.data) X_train_counts.shape # - # <a id="stop_words"> # ### Stop Words # [[back to top]](#top) # + from sklearn.feature_extraction import text len(text.ENGLISH_STOP_WORDS) # - text.ENGLISH_STOP_WORDS # If you want to add additional stop words, then use the union() function since the built-in English stop word is of type Python **```set```** data structure my_additional_stop_words = ['customer','state','states','cust','advise'] updated_stop_words = text.ENGLISH_STOP_WORDS.union(my_additional_stop_words) len(updated_stop_words) updated_stop_words # With updated stop words list, pass the new list to the CountVectorizer constructor: from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer(stop_words=updated_stop_words) # <a id="tfidf"> # ### TfidfTransform() # [[back to top]](#top) # + from sklearn.feature_extraction.text import TfidfTransformer tfidf_transformer = TfidfTransformer() X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts) X_train_tfidf.shape # - # <a id="labelbinarizer"> # ## Target or Label Data Preparation # [[back to top]](#top) # **If your target or label data is text, you can apply both transformations (label encode and one-hot encode) in one shot using ```LabelBinarizer```:** # + from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() class_1hot3 = encoder.fit_transform(X) # - # This returns a regular/dense matrix. To return a sparse matrix, just pass ```sparse_output=True``` to the constructor: encoder = LabelBinarizer(sparse_output=True)
jupyter_notebooks/machine_learning/2_Data_Preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Link prediction example and calibration: GraphSAGE on the PubMed-Diabetes citation dataset # In this example, we use our implementation of the [GraphSAGE](http://snap.stanford.edu/graphsage/) algorithm to build a model that predicts citation links in the PubMed-Diabetes dataset (see below). The problem is treated as a supervised link prediction problem on a homogeneous citation network with nodes representing papers (with attributes such as binary keyword indicators and categorical subject) and links corresponding to paper-paper citations. # # To address this problem, we build a model with the following architecture. First we build a two-layer GraphSAGE model that takes labeled `(paper1, paper2)` node pairs corresponding to possible citation links, and outputs a pair of node embeddings for the `paper1` and `paper2` nodes of the pair. These embeddings are then fed into a link classification layer, which first applies a binary operator to those node embeddings (e.g., concatenating them) to construct the embedding of the potential link. Thus obtained link embeddings are passed through the dense link classification layer to obtain link predictions - probability for these candidate links to actually exist in the network. The entire model is trained end-to-end by minimizing the loss function of choice (e.g., binary cross-entropy between predicted link probabilities and true link labels, with true/false citation links having labels 1/0) using stochastic gradient descent (SGD) updates of the model parameters, with minibatches of 'training' links fed into the model. # # Lastly, we investigate the nature of prediction probabilities. We want to know if GraphSAGE's prediction probabilities are well calibrated or not. In the latter case, we present two methods for calibrating the model's output. # # **References** # # 1. Inductive Representation Learning on Large Graphs. W.L. Hamilton, <NAME>, and <NAME> arXiv:1706.02216 # [cs.SI], 2017. ([link](http://snap.stanford.edu/graphsage/)) # # 2. On Calibration of Modern Neural Networks. <NAME>, <NAME>, <NAME>, and <NAME>. # ICML 2017. ([link](https://geoffpleiss.com/nn_calibration)) # ### Loading the PubMed Diabetes network data # + import networkx as nx import pandas as pd import numpy as np import itertools import os import matplotlib.pyplot as plt import stellargraph as sg from stellargraph.data import EdgeSplitter from stellargraph.mapper import GraphSAGELinkGenerator from stellargraph.layer import GraphSAGE, link_classification from stellargraph import expected_calibration_error, plot_reliability_diagram from stellargraph import IsotonicCalibration, TemperatureCalibration from tensorflow import keras from sklearn import preprocessing, feature_extraction, model_selection from sklearn.calibration import calibration_curve from sklearn.isotonic import IsotonicRegression from sklearn.metrics import accuracy_score from stellargraph import globalvar from stellargraph import datasets from IPython.display import display, HTML # %matplotlib inline # - # ### Global parameters # Specify the minibatch size (number of node pairs per minibatch) and the number of epochs for training the model: # + tags=["parameters"] batch_size = 50 epochs = 20 # The number of training epochs for training the GraphSAGE model. # train, test, validation split train_size = 0.2 test_size = 0.15 val_size = 0.2 # - # ### Loading the PubMed Diabetes network data dataset = datasets.PubMedDiabetes() display(HTML(dataset.description)) dataset.download() # Load the graph from edgelist edgelist = pd.read_csv( os.path.join(dataset.data_directory, "Pubmed-Diabetes.DIRECTED.cites.tab"), sep="\t", skiprows=2, header=None, ) edgelist.drop(columns=[0, 2], inplace=True) edgelist.columns = ["source", "target"] # delete unneccessary prefix edgelist["source"] = edgelist["source"].map(lambda x: x.lstrip("paper:")) edgelist["target"] = edgelist["target"].map(lambda x: x.lstrip("paper:")) edgelist["label"] = "cites" # set the edge type edgelist.head() G = nx.from_pandas_edgelist(edgelist, edge_attr="label") # Load the features and subject for the nodes # + nodes_as_dict = [] with open(os.path.join(dataset.data_directory, "Pubmed-Diabetes.NODE.paper.tab")) as fp: for line in itertools.islice(fp, 2, None): line_res = line.split("\t") pid = line_res[0] feat_name = ["pid"] + [l.split("=")[0] for l in line_res[1:]][ :-1 ] # delete summary feat_value = [l.split("=")[1] for l in line_res[1:]][:-1] # delete summary feat_value = [pid] + [float(x) for x in feat_value] # change to numeric from str row = dict(zip(feat_name, feat_value)) nodes_as_dict.append(row) # Create a Pandas dataframe holding the node data node_data = pd.DataFrame(nodes_as_dict) node_data.fillna(0, inplace=True) node_data["label"] = node_data["label"].astype(int) node_data["label"] = node_data["label"].astype(str) # - node_data.head() node_data.index = node_data["pid"] node_data.drop(columns=["pid", "label"], inplace=True) node_data.head() # Define a set of node features that will be used by the model as the difference between the set of all node features and a list of user-defined node attributes to ignore: feature_names = node_data.columns feature_names # We need to convert node features that will be used by the model to numeric values that are required for GraphSAGE input. Note that all node features in the PubMed dataset, except the categorical "label" feature, are already numeric, and don't require the conversion. # node features are already numeric, no further conversion is needed node_features = node_data[feature_names].values node_features.shape # Add node data to G: for nid, f in zip(node_data.index, node_features): G.nodes[str(nid)][globalvar.TYPE_ATTR_NAME] = "paper" # specify node type G.nodes[str(nid)]["feature"] = f # We aim to train a link prediction model, hence we need to prepare the train and test sets of links and the corresponding graphs with those links removed. # # We are going to split our input graph into a train and test graphs using the EdgeSplitter class in `stellargraph.data`. We will use the train graph for training the model (a binary classifier that, given two nodes, predicts whether a link between these two nodes should exist or not) and the test graph for evaluating the model's performance on hold out data. # Each of these graphs will have the same number of nodes as the input graph, but the number of links will differ (be reduced) as some of the links will be removed during each split and used as the positive samples for training/testing the link prediction classifier. # From the original graph G, extract a randomly sampled subset of validation edges (true and false citation links) and the reduced graph G_test with the positive test edges removed: # + # Define an edge splitter on the original graph G: edge_splitter_test = EdgeSplitter(G) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G, and obtain the # reduced graph G_test with the sampled links removed: G_test, edge_ids_test, edge_labels_test = edge_splitter_test.train_test_split( p=test_size, method="global", keep_connected=True ) # - # The reduced graph G_test, together with the test ground truth set of links (edge_ids_test, edge_labels_test), will be used for testing the model. # # Now repeat this procedure to obtain the validation data for the model. From the reduced graph G_test, extract a randomly sampled subset of validation edges (true and false citation links) and the reduced graph G_val with the positive train edges removed: # + # Define an edge splitter on the reduced graph G_test: edge_splitter_val = EdgeSplitter(G_test) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G_test, and obtain the # reduced graph G_train with the sampled links removed: G_val, edge_ids_val, edge_labels_val = edge_splitter_val.train_test_split( p=val_size, method="global", keep_connected=True ) # - # The reduced graph G_val, together with the validation ground truth set of links (edge_ids_val, edge_labels_val), will be used for validating the model (can also be used to tune the model parameters). # # Now repeat this procedure to obtain the training data for the model. From the reduced graph G_val, extract a randomly sampled subset of train edges (true and false citation links) and the reduced graph G_train with the positive train edges removed: # + # Define an edge splitter on the reduced graph G_test: edge_splitter_train = EdgeSplitter(G_val) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G_test, and obtain the # reduced graph G_train with the sampled links removed: G_train, edge_ids_train, edge_labels_train = edge_splitter_train.train_test_split( p=train_size, method="global", keep_connected=True ) # - # G_train, together with the train ground truth set of links (edge_ids_train, edge_labels_train), will be used for training the model. # Convert G_val, G_train and G_test to StellarGraph objects (undirected, as required by GraphSAGE) for ML: G_val = sg.StellarGraph(G_val, node_features="feature") G_train = sg.StellarGraph(G_train, node_features="feature") G_test = sg.StellarGraph(G_test, node_features="feature") # Summary of G_train, G_val and G_test - note that they have the same set of nodes, only differing in their edge sets: print(G_train.info()) print(G_val.info()) print(G_test.info()) # Next, we create the link generators for sampling and streaming train and test link examples to the model. The link generators essentially "map" pairs of nodes `(paper1, paper2)` to the input of GraphSAGE: they take minibatches of node pairs, sample 2-hop subgraphs with `(paper1, paper2)` head nodes extracted from those pairs, and feed them, together with the corresponding binary labels indicating whether those pairs represent true or false citation links, to the input layer of the GraphSAGE model, for SGD updates of the model parameters. # # # Specify the sizes of 1- and 2-hop neighbour samples for GraphSAGE: # # Note that the length of `num_samples` list defines the number of layers/iterations in the GraphSAGE model. In this example, we are defining a 2-layer GraphSAGE model. num_samples = [10, 5] train_gen = GraphSAGELinkGenerator(G_train, batch_size, num_samples) val_gen = GraphSAGELinkGenerator(G_val, batch_size, num_samples) test_gen = GraphSAGELinkGenerator(G_test, batch_size, num_samples) # # GraphSAGE part of the model, with hidden layer sizes of 50 for both GraphSAGE layers, a bias term, and dropout. # # Note that the length of layer_sizes list must be equal to the length of num_samples, as len(num_samples) defines the number of hops (layers) in the GraphSAGE model. layer_sizes = [32, 32] graphsage = GraphSAGE( layer_sizes=layer_sizes, generator=train_gen, bias=True, dropout=0.2 ) # Build the model and expose input and output sockets of graphsage, for node pair inputs: x_inp, x_out = graphsage.build() # Final link classification layer that takes a pair of node embeddings produced by graphsage, applies a binary operator to them to produce the corresponding link embedding ('ip' for inner product; other options for the binary operator can be seen by running a cell with `?link_classification` in it), and passes it through a dense layer: # + logits = link_classification( output_dim=1, output_act="linear", edge_embedding_method="ip" )(x_out) prediction = keras.layers.Activation(keras.activations.sigmoid)(logits) # - # Stack the GraphSAGE and prediction layers into a Keras model, and specify the loss # + model = keras.Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=keras.optimizers.Adam(lr=1e-3), loss=keras.losses.binary_crossentropy, metrics=[keras.metrics.binary_accuracy], ) # - # Evaluate the initial (untrained) model on the train, val and test sets: train_flow = train_gen.flow(edge_ids_train, edge_labels_train, shuffle=True) val_flow = val_gen.flow(edge_ids_val, edge_labels_val) test_flow = test_gen.flow(edge_ids_test, edge_labels_test) # + init_train_metrics = model.evaluate_generator(train_flow) init_val_metrics = model.evaluate_generator(val_flow) init_test_metrics = model.evaluate_generator(test_flow) print("\nTrain Set Metrics of the initial (untrained) model:") for name, val in zip(model.metrics_names, init_train_metrics): print("\t{}: {:0.4f}".format(name, val)) print("\nValidation Set Metrics of the initial (untrained) model:") for name, val in zip(model.metrics_names, init_val_metrics): print("\t{}: {:0.4f}".format(name, val)) print("\nTest Set Metrics of the initial (untrained) model:") for name, val in zip(model.metrics_names, init_test_metrics): print("\t{}: {:0.4f}".format(name, val)) # - # Train the model: history = model.fit_generator( train_flow, epochs=epochs, validation_data=val_flow, verbose=0, shuffle=True, ) # Plot the training history: def plot_history(history): metrics = sorted(history.history.keys()) metrics = metrics[: len(metrics) // 2] for m in metrics: # summarize history for metric m plt.plot(history.history[m]) plt.plot(history.history["val_" + m]) plt.title(m) plt.ylabel(m) plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper right") plt.show() plot_history(history) # Evaluate the trained model on test citation links: # + train_metrics = model.evaluate_generator(train_flow) val_metrics = model.evaluate_generator(val_flow) test_metrics = model.evaluate_generator(test_flow) print("\nTrain Set Metrics of the trained model:") for name, val in zip(model.metrics_names, train_metrics): print("\t{}: {:0.4f}".format(name, val)) print("\nValidation Set Metrics of the trained model:") for name, val in zip(model.metrics_names, val_metrics): print("\t{}: {:0.4f}".format(name, val)) print("\nTest Set Metrics of the trained model:") for name, val in zip(model.metrics_names, test_metrics): print("\t{}: {:0.4f}".format(name, val)) # - num_tests = 1 # the number of times to generate predictions all_test_predictions = [ model.predict_generator(test_flow, verbose=True) for _ in np.arange(num_tests) ] # ### Diagnosing model miscalibration # # We are going to use method from scikit-learn.calibration module to calibrate the binary classifier. calibration_data = [ calibration_curve( y_prob=test_predictions, y_true=edge_labels_test, n_bins=10, normalize=True ) for test_predictions in all_test_predictions ] # Let' calculate the expected calibration error on the test set before calibration. for fraction_of_positives, mean_predicted_value in calibration_data: ece_pre_calibration = expected_calibration_error( prediction_probabilities=all_test_predictions[0], accuracy=fraction_of_positives, confidence=mean_predicted_value, ) print("ECE: (before calibration) {:.4f}".format(ece_pre_calibration)) # Now let's plot the reliability diagram. This is a visual aid for the diagnosis of a poorly calibrated binary classifier. plot_reliability_diagram( calibration_data, np.array(all_test_predictions[0]), ece=[ece_pre_calibration] ) # ## Model Calibration # # Next, we are going to use our validation set to calibrate the model. # # We will consider two different approaches for calibrating a binary classifier, Platt scaling and Isotonic regression. # # ### Platt Scaling # # $q_i = \sigma(\alpha z_i+\beta)$ where $z_i$ is the GraphSAGE output (before the last layer's activation function is applied), $q_i$ is the calibrated probability, and $\sigma()$ is the sigmoid function. # # $\alpha$ and $\beta$ are the model's trainable parameters. # # For more information see: # - https://en.wikipedia.org/wiki/Platt_scaling # # ### Isotonic Regression # # Isotonic Regression is a regression technique that fits a piece-wise, non-decreasing, linear function to data. For more information see: # - https://scikit-learn.org/stable/modules/generated/sklearn.isotonic.IsotonicRegression.html#sklearn.isotonic.IsotonicRegression # - https://en.wikipedia.org/wiki/Isotonic_regression # Select the calibration method. use_platt = False # True for Platt scaling or False for Isotonic Regression # For simplicity, we are going to calibrate using a single prediction per query point. num_tests = 1 score_model = keras.Model(inputs=x_inp, outputs=logits) if use_platt: all_val_score_predictions = [ score_model.predict_generator(val_flow, verbose=True) for _ in np.arange(num_tests) ] all_test_score_predictions = [ score_model.predict_generator(test_flow, verbose=True) for _ in np.arange(num_tests) ] all_test_probabilistic_predictions = [ model.predict_generator(test_flow, verbose=True) for _ in np.arange(num_tests) ] else: all_val_score_predictions = [ model.predict_generator(val_flow, verbose=True) for _ in np.arange(num_tests) ] all_test_probabilistic_predictions = [ model.predict_generator(test_flow, verbose=True) for _ in np.arange(num_tests) ] val_predictions = np.mean(np.array(all_val_score_predictions), axis=0) val_predictions.shape # These are the uncalibrated prediction probabilities. if use_platt: test_predictions = np.mean(np.array(all_test_score_predictions), axis=0) test_predictions.shape else: test_predictions = np.mean(np.array(all_test_probabilistic_predictions), axis=0) test_predictions.shape if use_platt: # for binary classification this class performs Platt Scaling lr = TemperatureCalibration() else: lr = IsotonicCalibration() val_predictions.shape, edge_labels_val.shape lr.fit(val_predictions, edge_labels_val) lr_test_predictions = lr.predict(test_predictions) lr_test_predictions.shape # Let's check if these predictions are calibrated! # # If calibration is successful then the ECE after calibration will be lower and the calibration curve will track the ideal diagonal line more closely. calibration_data = [ calibration_curve( y_prob=lr_test_predictions, y_true=edge_labels_test, n_bins=10, normalize=True ) ] for fraction_of_positives, mean_predicted_value in calibration_data: ece_post_calibration = expected_calibration_error( prediction_probabilities=lr_test_predictions, accuracy=fraction_of_positives, confidence=mean_predicted_value, ) print("ECE (after calibration): {:.4f}".format(ece_post_calibration)) plot_reliability_diagram( calibration_data, lr_test_predictions, ece=[ece_post_calibration] ) # As a final test, check if the accuracy of the model changes after calibration. y_pred = np.zeros(len(test_predictions)) if use_platt: # the true predictions are the probabilistic outputs test_predictions = np.mean(np.array(all_test_probabilistic_predictions), axis=0) y_pred[test_predictions.reshape(-1) > 0.5] = 1 print( "Accuracy of model before calibration: {:.2f}".format( accuracy_score(y_pred=y_pred, y_true=edge_labels_test) ) ) y_pred = np.zeros(len(lr_test_predictions)) y_pred[lr_test_predictions[:, 0] > 0.5] = 1 print( "Accuracy for model after calibration: {:.2f}".format( accuracy_score(y_pred=y_pred, y_true=edge_labels_test) ) ) # ## Conclusion # # This notebook demonstrated how to use Platt scaling and isotonic regression to calibrate a GraphSAGE model used for link prediction in a paper citation network. Importantly, it showed that using calibration can improve the classification model's accuracy.
demos/calibration/calibration-pubmed-link-prediction.ipynb
# + # setup from mlwpy import * # %matplotlib inline import cv2 # - docs = ["the cat in the hat", "the cow jumped over the moon", "the cat mooed and the cow meowed", "the cat said to the cow cow you are not a cat"] vocabulary = set(" ".join(docs).split()) common_words = set(['a', 'to', 'the', 'in', 'and', 'are']) vocabulary = vocabulary - common_words print(textwrap.fill(str(vocabulary))) # {k:v for k in lst} creates a dictionary from keys:values # it is called a "dictionary comprehension" doc_contains = [{w:(w in d) for w in vocabulary} for d in docs] display(pd.DataFrame(doc_contains)) c1 = ['cat', 'cow', 'hat', 'jumped', 'meowed'] c2 = ['mooed', 'moon', 'not', 'over', 'said', 'you'] doc_contains = [{w:(w in d) for w in vocabulary} for d in docs] display(pd.DataFrame(doc_contains)[c1]) display(pd.DataFrame(doc_contains)[c2]) word_count = [{w:d.count(w) for w in vocabulary} for d in docs] wcs = pd.DataFrame(word_count) display(wcs) import sklearn.feature_extraction.text as sk_txt sparse = sk_txt.CountVectorizer(stop_words='english').fit_transform(docs) sparse sparse.todense() # wcs.values.sum(axis=0, keepdims=True) doc_freq = pd.DataFrame(wcs.astype(np.bool).sum(axis='rows')).T display(doc_freq) idf = np.log(len(docs) / doc_freq) # == np.log(len(docs)) - np.log(doc_freq) display(idf) tf_idf = wcs * idf.iloc[0] # aligns columns for multiplication display(tf_idf) skpre.Normalizer(norm='l1').fit_transform(wcs) sparse = (sk_txt.TfidfVectorizer(norm='l1', stop_words='english') .fit_transform(docs)) sparse.todense() from sklearn.datasets import fetch_20newsgroups twenty_train = fetch_20newsgroups(subset='train') print("the groups:") print(textwrap.fill(str(twenty_train.target_names))) print("\n".join(twenty_train.data[0].splitlines()[:10])) # + ct_vect = sk_txt.CountVectorizer() tfidf_xform = sk_txt.TfidfTransformer() docs_as_counts = ct_vect.fit_transform(twenty_train.data) docs_as_tfidf = tfidf_xform.fit_transform(docs_as_counts) # - model = naive_bayes.MultinomialNB().fit(docs_as_tfidf, twenty_train.target) doc_pipeline = pipeline.make_pipeline(sk_txt.CountVectorizer(), sk_txt.TfidfTransformer(), naive_bayes.MultinomialNB()) categories = ['misc.forsale', 'comp.graphics', 'sci.med', 'sci.space'] # + twenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42) doc_pipeline = pipeline.make_pipeline(sk_txt.TfidfVectorizer(), naive_bayes.MultinomialNB()) model = doc_pipeline.fit(twenty_train.data, twenty_train.target) # + twenty_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42) doc_preds = model.predict(twenty_test.data) cm = metrics.confusion_matrix(twenty_test.target, doc_preds) ax = sns.heatmap(cm, annot=True, xticklabels=twenty_test.target_names, yticklabels=twenty_test.target_names, fmt='3d') # cells are counts ax.set_xlabel('Predicted') ax.set_ylabel('Actual'); # + iris = datasets.load_iris() twod_iris = (decomposition.PCA(n_components=2, whiten=True) .fit_transform(iris.data)) clusters = cluster.KMeans(n_clusters=3).fit(twod_iris) fig, axes = plt.subplots(1,2,figsize=(8,4)) axes[0].scatter(*twod_iris.T, c=iris.target) axes[1].scatter(*twod_iris.T, c=clusters.labels_) axes[0].set_title("Truth"), axes[1].set_title("Clustered"); # + # exploring the data objcat_path = "./data/101_ObjectCategories" cat_paths = glob.glob(osp.join(objcat_path, "*")) all_categories = [d.split('/')[-1] for d in cat_paths] print("number of categories:", len(all_categories)) print("first 10 categories:\n", textwrap.fill(str(all_categories[:10]))) # + from skimage.io import imread test_path = osp.join(objcat_path, 'accordion', 'image_0001.jpg') test_img = imread(test_path) fig, ax = plt.subplots(1,1,figsize=(2,2)) ax.imshow(test_img) ax.axis('off'); # + def img_to_local_words(img): ' heavy lifting of creating local visual words from img ' sift = cv2.xfeatures2d.SIFT_create() key_points, descriptors = sift.detectAndCompute(img, None) return descriptors def id_to_path(img_id): ' helper to get file location ' cat, num = img_id return osp.join(objcat_path, cat, "image_{:04d}.jpg".format(num)) def add_local_words_for_img(local_ftrs, img_id): ' update local_ftrs inplace ' cat, _ = img_id img_path = id_to_path(img_id) img = imread(img_path) local_ftrs.setdefault(cat, []).append(img_to_local_words(img)) # + # setup a few constants use_cats = ['accordion', 'airplanes', 'anchor'] use_imgs = range(1,11) img_ids = list(it.product(use_cats, use_imgs)) num_imgs = len(img_ids) global_vocab_size = 20 # - # turn each img into table of local visual words # (1 table per image, 1 word per row) local_words = {} for img_id in img_ids: add_local_words_for_img(local_words, img_id) print(local_words.keys()) # + # itcfi is basically a way to get each individual item from an # iterator of items; it's a long name, so I abbreviate it itcfi = it.chain.from_iterable img_local_word_cts = [lf.shape[0] for lf in itcfi(local_words.values())] print("num of local words for images:") print(textwrap.fill(str(img_local_word_cts), width=50)) # + # how wide are the local word tables num_local_words = local_words[use_cats[0]][0].shape[1] # how many local words are there total? all_local_words = list(itcfi(local_words.values())) tot_num_local_words = sum(lw.shape[0] for lw in all_local_words) print('total num local words:', tot_num_local_words) # construct joined local tables to perform clustering # np_array_fromiter is described at the end of the chapter lwa_shape = (tot_num_local_words, num_local_words) local_word_arr = np_array_fromiter(itcfi(all_local_words), lwa_shape) print('local word tbl:', local_word_arr.shape) # - # cluster (and translate) the local words to global words translator = cluster.KMeans(n_clusters=global_vocab_size) global_words = translator.fit_predict(local_word_arr) print('translated words shape:', global_words.shape) # + # which image do the local words belong to # enumerate_outer is descibed at the end of the chapter which_img = enumerate_outer(all_local_words) print('which img len:', len(which_img)) # img by global words -> img by histogram counts = co.Counter(zip(which_img, global_words)) imgs_as_bogvw = np.zeros((num_imgs, global_vocab_size)) for (img, global_word), count in counts.items(): imgs_as_bogvw[img, global_word] = count print('shape hist table:', imgs_as_bogvw.shape) # - # bit of a hack; local_ftrs.values() gives # [[img1, img2], [img3, img4, img5], etc.] # answers: what category am i from? img_tgts = enumerate_outer(local_words.values()) print('img tgt values:', img_tgts[:10]) # build learning model std_svc = pipeline.make_pipeline(skpre.StandardScaler(), svm.SVC()) svc = std_svc.fit(imgs_as_bogvw, img_tgts) def image_to_example(img_id, translator): ' from an id, produce an example with global words ' img_local = img_to_local_words(imread(id_to_path(img_id))) img_global = translator.predict(img_local) img_bogvw = np.bincount(img_global, minlength=translator.n_clusters) return img_bogvw.reshape(1,-1).astype(np.float64) for cat in use_cats: test = image_to_example((cat, 12), translator) print(svc.predict(test)) class BOVW_XForm: def __init__(self): pass def _to_local_words(self, img_ids): # turn each img into table of local visual words (1 word per row) local_words = {} for img_id in img_ids: add_local_words_for_img(local_words, img_id) itcfi = it.chain.from_iterable all_local_words = list(itcfi(local_words.values())) return all_local_words def fit(self, img_ids, tgt=None): all_local_words = self._to_local_words(img_ids) tot_num_local_words = sum(lw.shape[0] for lw in all_local_words) local_word_arr = np_array_fromiter(itcfi(all_local_words), (tot_num_local_words, num_local_words)) self.translator = cluster.KMeans(n_clusters=global_vocab_size) self.translator.fit(local_word_arr) return self def transform(self, img_ids, tgt=None): all_local_words = self._to_local_words(img_ids) tot_num_local_words = sum(lw.shape[0] for lw in all_local_words) local_word_arr = np_array_fromiter(itcfi(all_local_words), (tot_num_local_words, num_local_words)) global_words = self.translator.predict(local_word_arr) # img by global words -> img by histogram which_img = enumerate_outer(all_local_words) counts = co.Counter(zip(which_img, global_words)) imgs_as_bogvw = np.zeros((len(img_ids), global_vocab_size)) for (img, global_word), count in counts.items(): imgs_as_bogvw[img, global_word] = count return imgs_as_bogvw # + use_cats = ['watch', 'umbrella', 'sunflower', 'kangaroo'] use_imgs = range(1,40) img_ids = list(it.product(use_cats, use_imgs)) num_imgs = len(img_ids) # hack cat_id = {c:i for i,c in enumerate(use_cats)} img_tgts = [cat_id[ii[0]] for ii in img_ids] # - (train_img, test_img, train_tgt, test_tgt) = skms.train_test_split(img_ids, img_tgts) bovw_pipe = pipeline.make_pipeline(BOVW_XForm(), skpre.StandardScaler(), svm.SVC()) bovw_pipe.fit(train_img, train_tgt); img_preds = bovw_pipe.predict(test_img) cm = metrics.confusion_matrix(test_tgt, img_preds) ax = sns.heatmap(cm, annot=True, xticklabels=use_cats, yticklabels=use_cats, fmt='3d') ax.set_xlabel('Predicted') ax.set_ylabel('Actual'); # + def enumerate_outer(outer_seq): '''repeat the outer idx based on len of inner''' return np.repeat(*zip(*enumerate(map(len, outer_seq)))) def np_array_fromiter(itr, shape, dtype=np.float64): ''' helper since np.fromiter only does 1D''' arr = np.empty(shape, dtype=dtype) for idx, itm in enumerate(itr): arr[idx] = itm return arr # - enumerate_outer([[0,1], [10, 20,30], [100,200]]) np_array_fromiter(enumerate(range(0,50,10)), (5,2))
14_Feature_Engineering_III_Domain_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide from fastai_v2_notes.metaclass import * # # fastai_v2_notes # # > Notes relating to Fastai version 2 # ## Usage # `git clone https://github.com/owejow/fastai-v2-notes` # ## How to use # Explore the notebooks in the nbs/ directory # You can also visit the webpage on github.io # # [fastai-v2-notes](https://owejow.github.io/fastai-v2-notes/)
nbs/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Face detection using opencv import numpy as np import matplotlib.pyplot as plt import cv2 # **object recognition/detection with cascade files** # you first need cascade files. For the extremely popular tasks, these already exist. Detecting things like faces, cars, smiles, eyes, and license plates for example are all pretty prevalent. # **download the opencv Api for object detection ** # <a href="https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_eye.xml">Eye detection</a> # + face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') # - # Here, we begin with import cv2 and numpy, then we load in our face and eye cascades. Simple enough so far. cap = cv2.VideoCapture(0) # + active="" # while 1: # ret, img = cap.read() # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # faces = face_cascade.detectMultiScale(gray, 1.3, 5) # - # In all seriousness, "eye detection" probably wouldn't find an eyeball laying around. Most eye detection uses the surrounding skin, eye lids, eye lashes, and eye brows to also make the detection. Thus, our next step is to break down the faces first, before getting to the eyes: # + active="" # for (x,y,w,h) in faces: # cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # roi_gray = gray[y:y+h, x:x+w] # roi_color = img[y:y+h, x:x+w] # - # **Here, we're finding faces, their sizes, drawing rectangles** # Next, we poke around for some eyes: # + active="" # eyes = eye_cascade.detectMultiScale(roi_gray) # for (ex,ey,ew,eh) in eyes: # cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) # - # If we find those, we'll go ahead and make some more rectangles # + active="" # cv2.imshow('img',img) # k = cv2.waitKey(30) & 0xff # if k == 27: # break # # cap.release() # cv2.destroyAllWindows() # - # **Full code ** # + cap = cv2.VideoCapture(0) while 1: ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) cv2.imshow('img',img) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows() # -
student/Face detection using opencv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="pRzAXrPVzsHX" # # Finetune RuGPTs in megatron and deepspeed # How to finetune RuGPTs models with megatron and deepspeed. Example for RuGPT3Small. Note for other models it will take more GPU memory. # # This notebook is valid for all RuGPTs models except RuGPT3XL. # ## Install env # + id="hu1OzWZ6zqQv" # !pip3 install transformers==3.5.0 # + id="Us5HeUgOfors" outputId="43fea060-b3b6-490e-afa4-41e0fb41f055" colab={"base_uri": "https://localhost:8080/"} import subprocess CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1] print("CUDA version:", CUDA_version) if CUDA_version == "10.0": torch_version_suffix = "+cu100" elif CUDA_version == "10.1": torch_version_suffix = "+cu101" elif CUDA_version == "10.2": torch_version_suffix = "" else: torch_version_suffix = "+cu110" # + [markdown] id="Maf99CebV3oT" # If code below doesn't work, check your cuda version and installation here https://pytorch.org/get-started/previous-versions/ # + id="8uNRRWUaVQN0" # !pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html # + colab={"base_uri": "https://localhost:8080/"} id="ozJOYbK-11pk" outputId="255c20ac-262d-4595-b6fc-e110866eca6e" # %%writefile setup.sh git clone https://github.com/NVIDIA/apex # cd apex pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ # + id="M46Pk6DJ19Jk" # !sh setup.sh # + id="_gE7xBM_z-uW" # !git clone https://github.com/sberbank-ai/ru-gpts # + id="-bVWryahFmtx" # !pip install deepspeed==0.3.7 # + [markdown] id="8di4sCoS0Pyw" # ## Download files # + id="96qG_A1n0CiF" # !wget -O train.txt https://www.dropbox.com/s/oa3v9c7g9bp40xw/train.txt?dl=0 # !wget -O valid.txt https://www.dropbox.com/s/mworl3ld6r3bg62/valid.txt?dl=0 # + [markdown] id="jqW38Hni64xH" # ## Prepare data for parallel # We use custom implementation of distributed dataset. For training and evaluating we should specify file `file.list` with list of paths to txt files. All files from `file.list` will be splitted between aviable GPUs. The logic of splitting is described by the following code: # # ```python # shard_size = len(files) // world_size # shard_start = rank * shard_size # shard_end = (rank + 1) * shard_size # files = files[shard_start:shard_end] # ``` # # For more details please see full code of dataset: `src.dataset_rugpt3.RuGpt3TextDataset`. # + id="wtItuLGA38db" # !echo train.txt > train.list # !echo valid.txt > valid.list # + [markdown] id="-EF0JepF0S41" # ## Train # Load model from Huggingface and finetune on essays. # # This will take arount ten minutes. # + id="XHluAlFh0SJo" # !export PYTHONPATH=${PYTHONPATH}:${HOME}/ru-gpts # !USE_DEEPSPEED=1 python -m torch.distributed.launch --nproc_per_node 1 ru-gpts/pretrain_gpt3.py \ # --train-data-path "train.list" \ # --test-data-path "valid.list" \ # --max-files-per-process 100 \ # --logging-dir="log" \ # --save model \ # --load-huggingface sberbank-ai/rugpt3small_based_on_gpt2 \ # --save-interval 1000 \ # --log-interval 100 \ # --eval-interval 1000 \ # --eval-iters 100 \ # --model-parallel-size 1 \ # --num-layers 12 \ # --hidden-size 768 \ # --num-attention-heads 12 \ # --batch-size 1 \ # --seq-length 2048 \ # --max-position-embeddings 2048 \ # --train-iters 2000 \ # --resume-dataloader \ # --distributed-backend "nccl" \ # --lr 0.00015 \ # --lr-decay-style "cosine" \ # --lr-decay-iters 3200 \ # --clip-grad 0.5 \ # --warmup .004 \ # --fp16 \ # --checkpoint-activations \ # --deepspeed-activation-checkpointing \ # --deepspeed \ # --deepspeed_config ru-gpts/src/deepspeed_config/gpt3_small_2048.json \ # # + [markdown] id="ALvcD5SE8RtP" # At the end of training output should be something like this: # # "----------------------------------------------------------------------------------------- # # validation loss at the end of training for test data | LM loss: 3.0002 | LM PPL: 20.090 # # -----------------------------------------------------------------------------------------" # + [markdown] id="0HmKilrb8lQm" # ## Generate # # Load pretrained model from dir and generate. # + colab={"base_uri": "https://localhost:8080/"} id="kAH-WpCG8lmG" outputId="2fa6583f-61af-477e-d3bf-89c6b946a98f" # !export PYTHONPATH=${PYTHONPATH}:${HOME}/ru-gpts # !python ru-gpts/generate_samples.py \ # --load model/ \ # --model-parallel-size 1 \ # --num-layers 12 \ # --hidden-size 768 \ # --num-attention-heads 12 \ # --batch-size 1 \ # --seq-length 500 \ # --max-position-embeddings 2048 \ # --distributed-backend "nccl" \ # --tokenizer-path sberbank-ai/rugpt3small_based_on_gpt2 \ # --no-load-optim # + [markdown] id="VCapfDfeBq0x" # ### Convert checkpoint to Huggingface format # + colab={"base_uri": "https://localhost:8080/"} id="4JnhIyqd-Eeo" outputId="ff100716-ff84-436d-88af-e6f3c5224d8c" # !export PYTHONPATH=${PYTHONPATH}:${HOME}/ru-gpts # !python ru-gpts/convert2huggingface.py \ # --load model/ \ # --model-parallel-size 1 \ # --num-layers 12 \ # --hidden-size 768 \ # --num-attention-heads 12 \ # --max-position-embeddings 2048 \ # --tokenizer-path sberbank-ai/rugpt3small_based_on_gpt2 \ # --no-load-optim \ # --export-huggingface model_hf # + [markdown] id="KRlEwlPdE0L8" # #### Test load # + id="5U81i24aEEm0" from transformers import GPT2LMHeadModel # + id="eBRatZnJEcCX" model = GPT2LMHeadModel.from_pretrained("model_hf") # + id="WoBMYR5ZpnmY"
examples/Finetune_and_generate_RuGPTs_deepspeed_megatron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## import包 import sys sys.path.append('../AutoX') import datetime import pandas as pd import os from autox.autox_recommend.recall_and_rank.feature_engineer import feature_engineer from autox.autox_recommend.recall_and_rank.ranker import ranker, ranker_test, inference from autox.autox_recommend.recall_and_rank.recalls import binary_recall from autox.autox_recommend.recall_and_rank.recalls import history_recall from autox.autox_recommend.recall_and_rank.recalls import itemcf_recall from autox.autox_recommend.recall_and_rank.recalls import popular_recall from autox.autox_recommend.recall_and_rank.recalls import w2v_concent_recall from autox.autox_recommend.metrics import mapk # + # from autox import AutoXRecommend # - import pandas as pd import numpy as np import os from tqdm import tqdm # ## 读取数据 path = '~/AutoX/autox/autox_recommend/datasets/MovieLens_AutoX/' inter_df = pd.read_csv(path + 'inter_df.csv') item_df = pd.read_csv(path + 'item_df.csv') test = pd.read_csv(path + 'test.csv') # ## 配置参数 uid = 'userId' iid = 'movieId' time_col = 'time' recall_num = 100 # ## 准备测试集结果 inter_df[time_col].min(), inter_df[time_col].max() test[time_col].min(), test[time_col].max() assert(test[time_col].min() > inter_df[time_col].max()) # + positive_items_test = test.groupby([uid])[iid].apply(list) test_users = positive_items_test.keys() test_items = [] for i, user in tqdm(enumerate(test_users)): test_items.append(positive_items_test[user]) print("Total users in testidation:", len(test_users)) # - # ## 时间列转化 inter_df[time_col] = pd.to_datetime(inter_df[time_col]) inter_df.head() # ## 执行AutoX class AutoXRecommend(): def __init__(self): pass def fit(self, inter_df, user_df, item_df, uid, iid, time_col, recall_num, time_decay=0.8, debug=False, debug_save_path=None): self.inter_df = inter_df self.user_df = user_df self.item_df = item_df self.uid = uid self.iid = iid self.time_col = time_col self.recall_num = recall_num self.time_decay = time_decay self.debug = debug if debug: assert debug_save_path is not None path_output = debug_save_path self.path_output = path_output os.makedirs(path_output, exist_ok=True) temp_date = datetime.datetime.strptime(str(inter_df[time_col].max()), '%Y-%m-%d %H:%M:%S') + \ datetime.timedelta(days=1) valid_date = str(datetime.datetime(temp_date.year, temp_date.month, temp_date.day)) self.valid_date = valid_date train_date = datetime.datetime.strptime(valid_date, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=7) train_date = str(train_date) print('\npopular_recall') print('train') popular_recall_train = popular_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train') print('valid') popular_recall_valid = popular_recall(None, inter_df, date=valid_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train') print('\nhistory_recall') print('train') history_recall_train = history_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train') print('valid') history_recall_valid = history_recall(None, inter_df, date=valid_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train') print('\nitemcf_recall') print('train') if os.path.exists(f'{path_output}/itemcf_recall_train.hdf'): itemcf_recall_train = pd.read_hdf(f'{path_output}/itemcf_recall_train.hdf') else: itemcf_recall_train = itemcf_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train', topk=1000, use_iif=False, sim_last_days=14, time_decay=time_decay) print('valid') if os.path.exists(f'{path_output}/itemcf_recall_valid.hdf'): itemcf_recall_valid = pd.read_hdf(f'{path_output}/itemcf_recall_valid.hdf') else: itemcf_recall_valid = itemcf_recall(None, inter_df, date=valid_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train', topk=1000, use_iif=False, sim_last_days=14, time_decay=time_decay) if debug: itemcf_recall_train.to_hdf(f'{path_output}/itemcf_recall_train.hdf', 'w', complib='blosc', complevel=5) itemcf_recall_valid.to_hdf(f'{path_output}/itemcf_recall_valid.hdf', 'w', complib='blosc', complevel=5) print('\nbinary_recall') print('train') if os.path.exists(f'{path_output}/binary_recall_train.hdf'): binary_recall_train = pd.read_hdf(f'{path_output}/binary_recall_train.hdf') else: binary_recall_train = binary_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train', topk=1000) print('valid') if os.path.exists(f'{path_output}/binary_recall_valid.hdf'): binary_recall_valid = pd.read_hdf(f'{path_output}/binary_recall_valid.hdf') else: binary_recall_valid = binary_recall(None, inter_df, date=valid_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train', topk=1000) if debug: binary_recall_train.to_hdf(f'{path_output}/binary_recall_train.hdf', 'w', complib='blosc', complevel=5) binary_recall_valid.to_hdf(f'{path_output}/binary_recall_valid.hdf', 'w', complib='blosc', complevel=5) print('\nw2v_content_recall') print('train') if os.path.exists(f'{path_output}/w2v_content_recall_train.hdf'): w2v_content_recall_train = pd.read_hdf(f'{path_output}/w2v_content_recall_train.hdf') else: w2v_content_recall_train = w2v_concent_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, dtype='train', topn = 20, topk = 20, prefix = 'w2v') print('valid') if os.path.exists(f'{path_output}/w2v_content_recall_valid.hdf'): w2v_content_recall_valid = pd.read_hdf(f'{path_output}/w2v_content_recall_valid.hdf') else: w2v_content_recall_valid = w2v_concent_recall(None, inter_df, date=valid_date, uid=uid, iid=iid, time_col=time_col, last_days=7, dtype='train', topn=20, topk=20, prefix='w2v') if debug: w2v_content_recall_train.to_hdf(f'{path_output}/w2v_content_recall_train.hdf', 'w', complib='blosc', complevel=5) w2v_content_recall_valid.to_hdf(f'{path_output}/w2v_content_recall_valid.hdf', 'w', complib='blosc', complevel=5) # 合并召回数据 print('\nmerge recalls') print('train') history_recall_train.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) itemcf_recall_train.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) binary_recall_train.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) train = popular_recall_train.append(history_recall_train) train.drop_duplicates(subset=[uid, iid], keep='first', inplace=True) train = train.merge(itemcf_recall_train, on=[uid, iid, 'label'], how='outer') train = train.merge(binary_recall_train, on=[uid, iid, 'label'], how='outer') train = train.merge(w2v_content_recall_train, on=[uid, iid, 'label'], how='outer') print('valid') history_recall_valid.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) itemcf_recall_valid.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) binary_recall_valid.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) valid = popular_recall_valid.append(history_recall_valid) valid.drop_duplicates(subset=[uid, iid], keep='first', inplace=True) valid = valid.merge(itemcf_recall_valid, on=[uid, iid, 'label'], how='outer') valid = valid.merge(binary_recall_valid, on=[uid, iid, 'label'], how='outer') valid = valid.merge(w2v_content_recall_valid, on=[uid, iid, 'label'], how='outer') # 特征工程 print('\nfeature engineer') print('train') if os.path.exists(f'{path_output}/train_fe.hdf'): train_fe = pd.read_hdf(f'{path_output}/train_fe.hdf') else: train_fe = feature_engineer(train, inter_df, date=train_date, user_df=user_df, item_df=item_df, uid=uid, iid=iid, time_col=time_col, last_days=7, dtype='train') print('valid') if os.path.exists(f'{path_output}/valid_fe.hdf'): valid_fe = pd.read_hdf(f'{path_output}/valid_fe.hdf') else: valid_fe = feature_engineer(valid, inter_df, date=valid_date, user_df=user_df, item_df=item_df, uid=uid, iid=iid, time_col=time_col, last_days=7, dtype='train') if debug: train_fe.to_hdf(f'{path_output}/train_fe.hdf', 'w', complib='blosc', complevel=5) valid_fe.to_hdf(f'{path_output}/valid_fe.hdf', 'w', complib='blosc', complevel=5) iid2idx = {} idx2iid = {} for idx, cur_iid in enumerate(train_fe[iid].unique()): iid2idx[cur_iid] = idx idx2iid[idx] = cur_iid self.iid2idx = iid2idx train_fe[iid + '_idx'] = train_fe[iid].map(iid2idx) valid_fe[iid + '_idx'] = valid_fe[iid].map(iid2idx) print(f"train_fe shape: {train_fe.shape}") print(f"valid_fe shape: {valid_fe.shape}") print('\nranker') # todo: 检查train_fe中是否有冗余特征, 方差为0的特征 lgb_ranker, valid_pred = ranker(train_fe, valid_fe, uid=uid, iid=iid, time_col=time_col) print('\nlocal result calculation') # 离线结果打印 valid_pred = valid_pred.sort_values('prob', ascending=False) valid_pred = valid_pred.groupby(uid).head(12).groupby(uid)[iid].agg(list).reset_index() begin_date = datetime.datetime.strptime(valid_date, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=7) begin_date = str(begin_date) valid_true = inter_df.loc[inter_df[uid].isin(valid_pred[uid])] valid_true = valid_true[(valid_true[time_col] <= valid_date) & (valid_true[time_col] > begin_date)] print(valid_true[time_col].min(), valid_true[time_col].max()) valid_true = valid_true.groupby(uid)[iid].agg(list).reset_index() print("mAP Score on Validation set:", mapk(valid_true[iid], valid_pred[iid])) self.best_iteration_ = lgb_ranker.best_iteration_ print("#" * 30) print('retrain') # 重新训练 train_date = valid_date # train_date = '2022-04-07 00:00:00' print('\npopular_recall') popular_recall_train = popular_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train') print('\nhistory_recall') history_recall_train = history_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train') print('\nitemcf_recall') if os.path.exists(f'{path_output}/itemcf_recall_train_all.hdf'): itemcf_recall_train = pd.read_hdf(f'{path_output}/itemcf_recall_train_all.hdf') else: itemcf_recall_train = itemcf_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train', topk=1000, use_iif=False, sim_last_days=14, time_decay=time_decay) if debug: itemcf_recall_train.to_hdf(f'{path_output}/itemcf_recall_train_all.hdf', 'w', complib='blosc', complevel=5) print('\nbinary_recall') if os.path.exists(f'{path_output}/binary_recall_train_all.hdf'): binary_recall_train = pd.read_hdf(f'{path_output}/binary_recall_train_all.hdf') else: binary_recall_train = binary_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, recall_num=recall_num, dtype='train', topk=1000) if debug: binary_recall_train.to_hdf(f'{path_output}/binary_recall_train_all.hdf', 'w', complib='blosc', complevel=5) print('\nw2v_content_recall') if os.path.exists(f'{path_output}/w2v_content_recall_train_all.hdf'): w2v_content_recall_train = pd.read_hdf(f'{path_output}/w2v_content_recall_train_all.hdf') else: w2v_content_recall_train = w2v_concent_recall(None, inter_df, date=train_date, uid=uid, iid=iid, time_col=time_col, last_days=7, dtype='train', topn=20, topk=20, prefix='w2v') if debug: w2v_content_recall_train.to_hdf(f'{path_output}/w2v_content_recall_train_all.hdf', 'w', complib='blosc', complevel=5) # 合并召回数据 print('\nmerge recalls') history_recall_train.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) itemcf_recall_train.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) binary_recall_train.drop_duplicates(subset=[uid, iid, 'label'], keep='first', inplace=True) train = popular_recall_train.append(history_recall_train) train.drop_duplicates(subset=[uid, iid], keep='first', inplace=True) train = train.merge(itemcf_recall_train, on=[uid, iid, 'label'], how='outer') train = train.merge(binary_recall_train, on=[uid, iid, 'label'], how='outer') train = train.merge(w2v_content_recall_train, on=[uid, iid, 'label'], how='outer') # 特征工程 print('\nfeature engineer') if os.path.exists(f'{path_output}/train_fe_all.hdf'): train_fe = pd.read_hdf(f'{path_output}/train_fe_all.hdf') else: train_fe = feature_engineer(train, inter_df, date=train_date, user_df=user_df, item_df=item_df, uid=uid, iid=iid, time_col=time_col, last_days=7, dtype='train') if debug: train_fe.to_hdf(f'{path_output}/train_fe_all.hdf', 'w', complib='blosc', complevel=5) train_fe[iid + '_idx'] = train_fe[iid].map(iid2idx) print(f"train_fe shape: {train_fe.shape}") print('\nranker') self.model, self.feats = ranker_test(train_fe, self.best_iteration_, uid=uid, iid=iid, time_col=time_col) def transform(self, uids): test_date = self.valid_date # test_date = '2022-04-07 00:00:00' print('\npopular recall, test') popular_recall_test = popular_recall(uids, self.inter_df, date=test_date, uid=self.uid, iid=self.iid, time_col=self.time_col, last_days=7, recall_num=self.recall_num, dtype='test') print('\nhistory recall, test') history_recall_test = history_recall(uids, self.inter_df, date=test_date, uid=self.uid, iid=self.iid, time_col=self.time_col, last_days=7, recall_num=self.recall_num, dtype='test') print('\nitemcf recall, test') if os.path.exists(f'{self.path_output}/itemcf_recall_test.hdf'): itemcf_recall_test = pd.read_hdf(f'{self.path_output}/itemcf_recall_test.hdf') else: itemcf_recall_test = itemcf_recall(uids, self.inter_df, date=test_date, uid=self.uid, iid=self.iid, time_col=self.time_col, last_days=7, recall_num=self.recall_num, dtype='test', topk=1000, use_iif=False, sim_last_days=14, time_decay=self.time_decay) if self.debug: itemcf_recall_test.to_hdf(f'{self.path_output}/itemcf_recall_test.hdf', 'w', complib='blosc', complevel=5) print('\nbinary recall, test') if os.path.exists(f'{self.path_output}/binary_recall_test.hdf'): binary_recall_test = pd.read_hdf(f'{self.path_output}/binary_recall_test.hdf') else: binary_recall_test = binary_recall(uids, self.inter_df, date=test_date, uid=self.uid, iid=self.iid, time_col=self.time_col, last_days=7, recall_num=self.recall_num, dtype='test', topk=1000) if self.debug: binary_recall_test.to_hdf(f'{self.path_output}/binary_recall_test.hdf', 'w', complib='blosc', complevel=5) print('\nw2v_content_recall, test') if os.path.exists(f'{self.path_output}/w2v_content_recall_test.hdf'): w2v_content_recall_test = pd.read_hdf(f'{self.path_output}/w2v_content_recall_test.hdf') else: w2v_content_recall_test = w2v_concent_recall(uids, self.inter_df, date=test_date, uid=self.uid, iid=self.iid, time_col=self.time_col, last_days=7, dtype='test', topn=20, topk=20, prefix='w2v') if self.debug: w2v_content_recall_test.to_hdf(f'{self.path_output}/w2v_content_recall_test.hdf', 'w', complib='blosc', complevel=5) print('\nmerge recalls') history_recall_test.drop_duplicates(subset=[self.uid, self.iid], keep='first', inplace=True) itemcf_recall_test.drop_duplicates(subset=[self.uid, self.iid], keep='first', inplace=True) binary_recall_test.drop_duplicates(subset=[self.uid, self.iid], keep='first', inplace=True) test = popular_recall_test.append(history_recall_test) test.drop_duplicates(subset=[self.uid, self.iid], keep='first', inplace=True) test = test.merge(itemcf_recall_test, on=[self.uid, self.iid], how='outer') test = test.merge(binary_recall_test, on=[self.uid, self.iid], how='outer') test = test.merge(w2v_content_recall_test, on=[self.uid, self.iid], how='outer') print('\nfeature engineer') if os.path.exists(f'{self.path_output}/test_fe.hdf'): test_fe = pd.read_hdf(f'{self.path_output}/test_fe.hdf') else: test_fe = feature_engineer(test, self.inter_df, date=test_date, user_df=self.user_df, item_df=self.item_df, uid=self.uid, iid=self.iid, time_col=self.time_col, last_days=7, dtype='test') # if self.debug: # test_fe.to_hdf(f'{self.path_output}/test_fe.hdf', 'w', complib='blosc', complevel=5) test_fe[self.iid + '_idx'] = test_fe[self.iid].map(self.iid2idx) print(f"test_fe shape: {test_fe.shape}") print('\ninference') bs = 60000 recs = inference(self.model, self.feats, test_fe, uids, uid=self.uid, iid=self.iid, time_col=self.time_col, batch_size=bs) return recs # + autoXRecommend = AutoXRecommend() autoXRecommend.fit(inter_df = inter_df, user_df = None, item_df = item_df, uid = uid, iid = iid, time_col = time_col, recall_num = recall_num, time_decay = 0.99, debug = True, debug_save_path = './temp_MovieLens') # - res = autoXRecommend.transform(test_users) # ## 查看结果 # + def apk(actual, predicted, k=12): if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if not actual: return 0.0 return score / min(len(actual), k) def mapk(actual, predicted, k=12): return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)]) # - outputs = res['prediction'] print("mAP Score on Validation set:", mapk(test_items, outputs)) # mAP Score on Validation set: 0.08030425675382308
autox/autox_recommend/demo/MovieLens/MovieLens_AutoX_recall_and_rank_0520.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Extracting tweets from twitter using tweepy # # #### install tweepy using pip command: pip install tweepy # # Create account on twitter and request for twitter api. # if the request is granted by twitter by entering the consumer key,consumer secret, access token, access secret. # you can access news which you follow on your time line: Etnow,businessstandard etc. # # #### for example in my twitter account I am following ETNOW and Business standard... so I will get all tweets related to it. # Twitter api(tweepy) has some of limitation like- Access rates is limited and type of data you want to access. You cannot access past data using free account you need upgrade to enterprise account. # + import tweepy from tweepy import OAuthHandler import json import pandas as pd import csv ckey="<KEY>" csecret="<KEY>" atoken="<KEY>" asecret="<KEY>" auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) api = tweepy.API(auth) csvFile = open('file.csv','a') csvWriter = csv.writer(csvFile) for status in tweepy.Cursor(api.home_timeline).items(100): # Process a single status print (status.text) print (status.created_at) print (status.id) print (status.in_reply_to_status_id_str) csvWriter.writerow([status.created_at, status.id, status.retweet_count, status.text.encode('utf-8')]) csvFile.close() # - # #### OUTPUT # # RT @talktotarun: #CBDT chairman @sushil Chandra sir got 1 year extension till 31.05.2019. He supposed to retire this month.@IncomeTaxIndia… # 2018-05-25 09:25:24 # 999944513148850177 # None # Delhi PWD asks officials to beautify road stretches, flyover walls https://t.co/cF9smmajGA # 2018-05-25 09:25:04 # 999944427723468800 # None # #EarningsWithETNOW | CADILA HEALTH Q4FY18: https://t.co/F84Rqy10yw # 2018-05-25 09:23:32 # 999944040714878976 # None # India Cements gains 7% post Q4 numbers; profit jumps 3% at Rs 35.3 cr https://t.co/jsCEWNdSd5 # 2018-05-25 09:20:06 # 999943178311651329 # None # India Cements gains 7% post Q4 numbers; profit jumps 3% at Rs 35.3 cr https://t.co/aj1AgFOzIc # 2018-05-25 09:20:06 # 999943177388920832 # None # Q4 review: Global brokerages cut target prices on these 18 largecaps, do you own any? https://t.co/zOlAAoeHZA # 2018-05-25 09:15:09 # 999941931630317569 # None # #4YearReportCard of Modi Govt | Road Minister @nitin_gadkari talks about #TuticorinTurmoil to @SupriyaShrinate, say… https://t.co/FOC8hzDueo # 2018-05-25 09:13:30 # 999941516704526338 # None # #Exclsuive | LoUs were banned by @RBI after #PNBScam. The decision to search for alternatives follows a meeting b/w… https://t.co/vCSocDNkXh # 2018-05-25 09:10:42 # 999940811335729153 # 999940259126304768 # Facebook enables two-factor authentication for privacy - here's how it works https://t.co/WonHskF9kk # 2018-05-25 09:10:06 # 999940662274478080 # None # Govt &amp; @RBI have decided to set up a committee to come up with an alternative trade credit instrument to letters of… https://t.co/Wx0k0QHK9g # 2018-05-25 09:08:30 # 999940259126304768 # None # <NAME> – a sound business at an attractive valuation @NitinAgrawal65 https://t.co/PkKFB1hrRd # 2018-05-25 09:05:06 # 999939402267873285 # None # Crude oil prices have jumped nearly 50% in the last 12 months! # #FuelPriceHike #FuelOnFire https://t.co/5TzWAAyd37 # 2018-05-25 09:01:49 # 999938576598028288 # None # WATCH | Oppo Realme 1 to go on sale today! https://t.co/VRnBUjfLOp # 2018-05-25 09:00:14 # 999938181087879171 # None # Lupin's biosimilar Etanercept accepted by Europe drug regulator for review https://t.co/Yg6K3Rq8ok # 2018-05-25 08:50:04 # 999935621287239680 # None # #Exclusive | Will offer lower prices, faster checkouts and convenience of shopping from home, says Kishore Biyani,… https://t.co/dLnpGMebPZ # 2018-05-25 08:46:52 # 999934817188831233 # 999934302384082945 # NZ minister in charge of aviation safety offers to quit for making a phone call on a plane https://t.co/urZjrXfrWa # 2018-05-25 08:45:07 # 999934373842468865 # None # <NAME>, Founder of Future Group wants to offer a new retail experience to his customers. Listen in to an… https://t.co/eOIo50QXsT # 2018-05-25 08:44:50 # 999934302384082945 # None # Panel formed to suggest measures for cleaning of rivers https://t.co/dWdrDwha3f # 2018-05-25 08:35:04 # 999931845310537729 # None # Panel formed to suggest measures for cleaning of rivers https://t.co/5CE9WQJDmB # 2018-05-25 08:34:20 # 999931662610743297 # None # Mirc Electronics, maker of Onida, looks at 20% revenue from non-captive manufacturing in FY19 https://t.co/ULDQRQu1Fx # 2018-05-25 08:05:07 # 999924310541135873 # None # In an #Exclusive interaction, Siby Antony, chairman and MD, Edelweiss ARC tells @nehabothra_7 that Edelweiss is bui… https://t.co/aic3rguDOb # 2018-05-25 08:00:57 # 999923258987196416 # None # Avenue Supermarts up 5% as promoter offload stake in Co to meet minimum public shareholding https://t.co/YdakbArqDi # 2018-05-25 08:00:18 # 999923096017514498 # None # IBC ordinance: Jaypee homebuyers may approach NCLT https://t.co/Ca87FxXbaa @vandanaramnani1 # 2018-05-25 07:55:04 # 999921779396173824 # None # North Korea open to US talks 'any time' despite Trump axing summit https://t.co/oR3lxv79Fa # 2018-05-25 07:50:17 # 999920573709864963 # None # Special NIA court convicts five IM militants in Bodh Gaya serial blasts case https://t.co/rLxrEsCZaD # 2018-05-25 07:50:05 # 999920526297448448 # None # Gold prices to trade sideways today: Angel Commodities https://t.co/gM3paMj1jy # 2018-05-25 07:45:06 # 999919271172964352 # None # #EarningsWithETNOW | INDIA CEMENTS Q4: https://t.co/OlOxz9TmmP # 2018-05-25 07:42:52 # 999918707257180160 # 999917843821948928 # RT @Maamitalks: Mirc Electronics, maker of Onida, looks at 20% revenue from non-captive manufacturing in FY19 # https://t.co/PbDHqiL0nG # #reve… # 2018-05-25 07:41:44 # 999918422212329474 # None # Reliance Nippon Life AMC appoints <NAME> as fund manager for overseas investments https://t.co/AlfoZe3mRz # 2018-05-25 07:40:05 # 999918008129667072 # None # #EarningsWithETNOW | INDIA CEMENTS Q4 # PAT at Rs 35.3 cr vs Rs 34.3 cr YoY # Revenue at Rs 1397.8 cr vs Rs 1522.6 cr Y… https://t.co/AonVhHxXEo # 2018-05-25 07:39:26 # 999917843821948928 # None # Stock Picks Podcast on May 25, 2018: 3 stocks that could return up to 10% https://t.co/ctO0mx8Oxl via @Audioboom # 2018-05-25 07:35:12 # 999916779693473793 # None # <NAME> returns to cricket with Canadian T20 league https://t.co/qvtzfZncw0 # 2018-05-25 07:35:05 # 999916749427372032 # None # GLOBAL NEWS | China says always encouraged US-North Korea direct talks: Agencies # 2018-05-25 07:32:54 # 999916199310868480 # None # India-US trade dispute may widen further. Here's why # https://t.co/C0TtH4Kv8O # 2018-05-25 07:31:40 # 999915890547159043 # None # #BuffetTrail | Q- The only foreign language in which proceedings of BH AGM were simultaneously translated # Answer- M… https://t.co/ZMRMLOlfAb # 2018-05-25 07:30:21 # 999915557985042438 # None # Western Railway sets up biogas plant at Mumbai Central station https://t.co/VKHHr1uYyq # 2018-05-25 07:30:16 # 999915537873301504 # None # <NAME> returns to cricket with Canadian T20 league # https://t.co/KULmnSRpvP # 2018-05-25 07:30:16 # 999915537688805378 # None # <NAME> retires: Here’s a look at the career stats of 'Mr 360' # https://t.co/TPyYgF1OQe # 2018-05-25 07:25:06 # 999914236145844227 # None # Avenue Supermarts up 5% as promoter offload stake in Co to meet minimum public shareholding https://t.co/9939uOtvkc # 2018-05-25 07:25:05 # 999914232157093889 # None # #Exclusive | <NAME>, Chairman of IBBI cheers the revamped #IBC. He says that amendments propose to protect intere… https://t.co/pTGI4Ms0BK # 2018-05-25 07:22:43 # 999913640110186496 # 999911603804950529 # United Spirits, United Breweries, Associated Spirits, Radico Khaitan to get astrological support: <NAME> https://t.co/7MApQ7qpqg # 2018-05-25 07:20:05 # 999912976701313024 # None # #Exclusive | @rasheshshah of @EdelweissFin welcomes the new #IBC reforms. However, he believes Lenders approval for… https://t.co/FSKQsWNI1k # 2018-05-25 07:18:54 # 999912677651632129 # None # Oppo Realme 1 to go on sale today exclusively on Amazon India; here's a look at its price, specs and features https://t.co/6OuAm5Nn3E # 2018-05-25 07:15:07 # 999911724177215488 # None # <NAME>, Chairman of IBBI cheers the revamped #IBC as it recognises MSMEs' pain. Talking about 270 day deadline, h… https://t.co/6knvKMsptn # 2018-05-25 07:14:38 # 999911603804950529 # None # Special NIA court convicts five IM militants in Bodh Gaya serial blasts case # https://t.co/iu1Vqgd0tJ # 2018-05-25 07:13:41 # 999911363618091008 # None # These 38 multibaggers stocks rose up to 600% in 4 years; do you own any? https://t.co/OQ1msufKCb @kshanand # 2018-05-25 07:10:08 # 999910470180982785 # None # Private oil companies have suggested cutting duties on petrol &amp; diesel rather than reversing deregulation: Reports… https://t.co/Gn3JGfwYVw # 2018-05-25 07:06:50 # 999909639050248194 # None # Mood of the Nation Survey: Key takeaways # https://t.co/KPKQLBii69 # 2018-05-25 07:05:07 # 999909208471433217 # None # KARNATAKA FLOOR TEST: Congress MLA KR <NAME> elected as Speaker # @INCIndia #FloorTest https://t.co/RWF4U5DSfy # 2018-05-25 07:01:33 # 999908311150428160 # None # Media outlets slam Musk after his plans to launch site to rate credibility of journalists https://t.co/kOFJat8gH3 # 2018-05-25 07:00:09 # 999907957226725379 # None # The Karnataka Assembly commences today at 12:30 PM &amp; the #FloorTest is expected after lunch break. @dayamarahul joi… https://t.co/fKPAJa88Lf # 2018-05-25 06:57:55 # 999907398084001792 # None # Vedanta falls 3.5% as Tamil Nadu govt mulls permanent closure of copper smelter at Thoothukudi https://t.co/qEPoKJjqfA # 2018-05-25 06:55:05 # 999906682535690240 # None # Avenue Supermarts up 5% as promoter offload stake in Co to meet minimum public shareholding # https://t.co/z86NNO5Wpf # 2018-05-25 06:53:37 # 999906313495760897 # None # #JUSTNOW | Lupin submits application for Etanercept biosimilar in Europe https://t.co/GDQ802DOjq # 2018-05-25 06:43:46 # 999903838151720961 # None # Sterlite Industries was allowed to bypass mandatroy public hearing by UPA, NDA govts for plant https://t.co/sHUaFQQ2kQ # 2018-05-25 06:42:31 # 999903521599188992 # None # UNITED SPIRITS CONCALL: Have reduced debt by 20% in FY1 https://t.co/NaFXYEXbZG # 2018-05-25 06:38:24 # 999902484721057792 # None # Sun Pharma Q4 preview: Profits to drop due to pricing pressure, lack of approvals # https://t.co/UWOA4llF8P # 2018-05-25 06:35:04 # 999901648620212229 # None # #BuffettTrail | 'Lunch break with Raamdeo' | Ans 4 simple questions, in the comment section below, b/w 9 am &amp; 1 pm… https://t.co/MA5EffI2DG # 2018-05-25 06:31:46 # 999900816000483329 # 999886028713246720 # #BuffetTrail | Q- What was Berkshire Hathaway's original business? # Answer- Textile # The 3rd luck winner of 'Lunch b… https://t.co/o97w6E5hhn # 2018-05-25 06:30:24 # 999900471669145600 # None # Here's a look at Royal Enfield's latest launch - the RE Classic 500 Pegasus # https://t.co/IiCruheMQr # 2018-05-25 06:30:14 # 999900431005368321 # None # Digitised policies have higher renewal rates: CAMS Repository CEO @Maamitalks https://t.co/yDa2bUmoHL # 2018-05-25 06:30:14 # 999900430141308928 # None # #EarningsWithETNOW | KARUR VYSYA BANK Q4FY18: https://t.co/EPG57UzI5a # 2018-05-25 06:26:05 # 999899385914802176 # 999898882988433413 # #EarningsWithETNOW | KARUR VYSYA BANK Q4FY18: # PAT at Rs 50.6 cr vs Rs 218 cr YoY # NII at Rs 642.1 cr vs Rs 580 cr YoY https://t.co/1DlKLNT4MZ # 2018-05-25 06:24:05 # 999898882988433413 # None # By the time you are done reading this, you won't be clueless about PF, Gratuity, HRA, allowances, reimbursements, b… https://t.co/XGW4MK3Val # 2018-05-25 06:16:02 # 999896857198215168 # None # WATCH | Bringing petrol/diesel under GST could bring down prices https://t.co/K6ZewI18HS # 2018-05-25 06:15:09 # 999896636074545152 # None # L&amp;T Construction bags Rs 3,191 cr order for Dhaka Metro https://t.co/lmYt0xwFl7 # 2018-05-25 06:14:17 # 999896418092367873 # None # See Nifty at fresh record high in FY19; 'quality stocks' available post correction: Reliance Sec https://t.co/JiP55iUshS # 2018-05-25 06:05:04 # 999894098277416960 # None # Cox &amp; Kings stock zooms 14% as subsidiary receives NBFC license from RBI https://t.co/Ji8Z2NJ4aO # 2018-05-25 06:00:08 # 999892857304498176 # None # Karnataka LIVE: Congress MLAs arrive at Vidhana Soudha ahead of Speaker election, floor test https://t.co/xHnvFMNMtw # 2018-05-25 05:55:11 # 999891609075384323 # None # India state banks' bailout stumbles as losses mount https://t.co/wTd7c5PCXY # 2018-05-25 05:55:03 # 999891576905060353 # None # Trump Admin | Rescinding of H-4 visa work permit in final stages: PTI # Alert: H-4 visas are issued to the spouses o… https://t.co/bT1IzXalMu # 2018-05-25 05:53:11 # 999891106534838273 # None # About 15 people injured in blast at restaurant in Canada - Media https://t.co/Up5w9gFkIv # 2018-05-25 05:50:03 # 999890318630645761 # None # #MarketUpdate https://t.co/IelJF3lKkA # 2018-05-25 05:46:20 # 999889383779024896 # None # Bank of Baroda likely to post 60% drop in net profit for Q4 FY18 https://t.co/kgHzzcaRmY # 2018-05-25 05:45:05 # 999889069357154304 # None # Bringing auto fuel under GST could bring down prices by as much as Rs 29 https://t.co/NPu2MLLTnE # 2018-05-25 05:45:04 # 999889065599164416 # None # Market Update: Nifty midcap outshines, TCS hits new 52-week high; Bajaj Finserv jumps 3% @im_sandip https://t.co/OMsLO83SaU # 2018-05-25 05:40:04 # 999887803780493313 # None # 4 largecap stock ideas which could return up to 12% in 30 days https://t.co/ps3QoGX6mD # 2018-05-25 05:35:03 # 999886542876299264 # None # #BuffettTrail | 'Lunch break with Raamdeo' | Don't miss this opportunity. Answer four simple questions, in the comm… https://t.co/xbSV70GVmI # 2018-05-25 05:33:00 # 999886028713246720 # 999870599714369536 # #BuffetTrail | Q- Where is Berkshire's HQ situated? # Answer- Omaha # The 2nd luck winner of 'Lunch break with Raamdeo'… https://t.co/wIc3qZDR8r # 2018-05-25 05:31:39 # 999885686512668672 # None # In an #Exclusive conversation with ET NOW's @NayantaraRai, <NAME> # Director of Varun Beverages says acquired… https://t.co/maFDk8eNp4 # 2018-05-25 05:31:36 # 999885675754237957 # 999884853632876544 # SC may mull plea for special probe on #Tuticorin police action on May 28: Cogencies # #SterliteProtests… https://t.co/RrGLt2pPF0 # 2018-05-25 05:29:29 # 999885142075129857 # None # #Exclusive | ET NOW's @NayantaraRai was at the factory of Bottling major Varun Beverages, earlier this week. For th… https://t.co/YVwhUsK4qO # 2018-05-25 05:28:20 # 999884853632876544 # None # <NAME>, ED &amp; COO of Endurance Technologies says that scooters currently contribute about 30% of sales; Aim… https://t.co/uGMjeRvUE8 # 2018-05-25 05:21:38 # 999883168613191681 # 999882740827701248 # #CorporateView | Motorcycles market moved better than scooters in FY18. Have 68% presence in motorcycle mkt; Expect… https://t.co/OpbisnDzzg # 2018-05-25 05:19:56 # 999882740827701248 # None # Bank Nifty resumes uptrend; these 3 stocks could deliver 8-11% returns in the short term https://t.co/8P1ruBXtuE # 2018-05-25 05:15:04 # 999881513704480768 # None # #Exclusive | Kotak Mahindra AMC's @NileshShah68 on #Modi4Years: # # #StateOfTheEconomy @tanvirgill2 @MubinaKapasi https://t.co/DsekF6mXmS # 2018-05-25 05:13:25 # 999881098212532224 # 999878982907584512 # #Exclusive | Tax compliance remained poor for decades; Saw great improvements over the last 4 years. #GST is a game… https://t.co/evg7pKu0EU # 2018-05-25 05:11:21 # 999880577774899200 # None # Granules India touches 52-week low on 55% dip in Q4 profit https://t.co/hMQ4Cyu1Vp # 2018-05-25 05:10:04 # 999880253949427713 # None # #Exclusive | Huge NPAs going unrecognised for a very long time needed to be addressed. Over the last 4 years, we ha… https://t.co/PQUWvk2Bms # 2018-05-25 05:08:42 # 999879913556488192 # None # 4 largecap stock ideas which could return up to 12% in 30 days https://t.co/Eiv7uZ9I4v # 2018-05-25 05:05:04 # 999878995587059713 # None # PNC Infratech gains 3% on declaring lowest bidder for project worth Rs 1738 cr https://t.co/w2hlEh1KR4 # 2018-05-25 05:05:03 # 999878995301761024 # None # #Exclusive | Matter of great pride that our economy is now bigger than that of UK. India has seen continuity in pol… https://t.co/KfFr8AEqpo # 2018-05-25 05:05:01 # 999878982907584512 # None # Strong support for Nifty at 10,418: Dynamic Levels https://t.co/R2ZZvFLeZ7 # 2018-05-25 05:00:06 # 999877748775702529 # None # A month after hitting $100-bn mark, TCS hits m-cap of Rs 7 lakh crore https://t.co/UvQ69Qg8yk # 2018-05-25 04:57:08 # 999876999165464576 # None # Dutch PM <NAME> cuts short India visit, to return to Netherlands https://t.co/48nBdZmxGt # 2018-05-25 04:55:03 # 999876475234017283 # None # United Spirits rises 4% post company reports profit in Q4 https://t.co/AAFthJrjrn # 2018-05-25 04:50:03 # 999875219052474368 # None # Gujarat Government to conduct 'Yagna' for good monsoons https://t.co/UeaheToqDN # 2018-05-25 04:45:05 # 999873967610564608 # None # See Nifty at fresh record high in FY19; 'quality stocks' available post correction: Reliance Sec https://t.co/bm0zv20lwN # 2018-05-25 04:40:03 # 999872701853585408 # None # 60-70% of portfolio must be invested in small caps for next 3 years. Must look at investing in the leaders or likel… https://t.co/xsQNoMmwm5 # 2018-05-25 04:36:19 # 999871761532444673 # None # Petrol rate was hiked by 36 paise, crossing the Rs 80-mark in more than 10 cities # #FuelPriceHike #FuelOnFire # https://t.co/zQ0Kftz8OM # 2018-05-25 04:35:36 # 999871580762136581 # None #
Scraping/TwitterScraping/Accessing Timeline/TwitterScrappingNameWise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="O5m8cxLuGjCn" colab_type="code" colab={} # !pip install datadotworld # !pip install datadotworld[pandas] # + id="hTTAMotQH_bn" colab_type="code" colab={} # !dw configure # + id="9UeTnB2KITJG" colab_type="code" colab={} from google.colab import drive import pandas as pd import numpy as np import datadotworld as dw # + id="GrJ8ZK-jIZiN" colab_type="code" colab={} drive.mount('/content/drive') # + id="1ADacTVGIwzV" colab_type="code" colab={} # cd 'drive/My Drive/Colab Notebooks/dw_matrix' # + id="pqnw1BAMJG-9" colab_type="code" colab={} data = dw.load_dataset('datafiniti/mens-shoe-prices') df = data.dataframes['7004_1'] # + id="fBMrOcQ2JSUl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="83b8ff78-0619-483b-ea93-2c51421aa437" executionInfo={"status": "ok", "timestamp": 1581624270582, "user_tz": -60, "elapsed": 1049, "user": {"displayName": "Micha\u0142 Ma\u0142aj", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCjN8N9Bv0rIEvuhWaN8EP7y12AVRwUsm01FTC8=s64", "userId": "02760722482031341131"}} df_usd = df[df.prices_currency == 'USD'].copy() df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float) filter_max = np.percentile(df_usd['prices_amountmin'],99) df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max] df_usd_filter['prices_amountmin'].hist(bins=100) # + id="B7SYzwo6Knxa" colab_type="code" colab={} df_usd_filter.to_csv('data/shoes_prices_filter.csv',index=False)
matrix_one/4day_meta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # Grover's Algorithm # - # In this section, we introduce Grover's algorithm and how it can be used to solve unstructured search problems. We then implement the quantum algorithm using Qiskit, and run on a simulator and device. # # # ## Contents # # 1. [Introduction](#introduction) # 2. [Example: 2 Qubits](#2qubits) # 2.1 [Simulation](#2qubits-simulation) # 2.2 [Device](#2qubits-device) # 3. [Example: 3 Qubits](#3qubits) # 3.1 [Simulation](#3qubits-simulation) # 3.2 [Device](#3qubits-device) # 4. [Problems](#problems) # 5. [References](#references) # # ## 1. Introduction <a id='introduction'></a> # # You have likely heard that one of the many advantages a quantum computer has over a classical computer is its superior speed searching databases. Grover's algorithm demonstrates this capability. This algorithm can speed up an unstructured search problem quadratically, but its uses extend beyond that; it can serve as a general trick or subroutine to obtain quadratic run time improvements for a variety of other algorithms. This is called the amplitude amplification trick. # # ### Unstructured Search # # Suppose you are given a large list of $N$ items. Among these items there is one item with a unique property that we wish to locate; we will call this one the winner $w$. Think of each item in the list as a box of a particular color. Say all items in the list are gray except the winner $w$, which is pink. # # ![image1](images/grover_search.png) # # To find the pink box -- the *marked item* -- using classical computation, one would have to check on average $N/2$ of these boxes, and in the worst case, all $N$ of them. On a quantum computer, however, we can find the marked item in roughly $\sqrt{N}$ steps with Grover's amplitude amplification trick. A quadratic speedup is indeed a substantial time-saver for finding marked items in long lists. Additionally, the algorithm does not use the list's internal structure, which makes it *generic;* this is why it immediately provides a quadratic quantum speed-up for many classical problems. # # ### Oracle # # How will the list items be provided to the quantum computer? A common way to encode such a list is in terms of a function $f$ which returns $f(x) = 0$ for all unmarked items $x$ and $f(w) = 1$ for the winner. To use a quantum computer for this problem, we must provide the items in superposition to this function, so we encode the function into a unitary matrix called an *oracle*. First we choose a binary encoding of the items $x, w \in \{0,1\}^n$ so that $N = 2^n$; now we can represent it in terms of qubits on a quantum computer. Then we define the oracle matrix $U_f$ to act on any of the simple, standard basis states $| x \rangle$ by $U_f | x \rangle = (-1)^{f(x)} | x \rangle.$ # # We see that if $x$ is an unmarked item, the oracle does nothing to the state. However, when we apply the oracle to the basis state $| w \rangle$, it maps $U_f | w \rangle = -| w \rangle$. Geometrically, this unitary matrix corresponds to a reflection about the origin for the marked item in an $N = 2^n$ dimensional vector space. # # ### Amplitude Amplification # # So how does the algorithm work? Before looking at the list of items, we have no idea where the marked item is. Therefore, any guess of its location is as good as any other, which can be expressed in terms of a # uniform superposition: $|s \rangle = \frac{1}{\sqrt{N}} \sum_{x = 0}^{N -1} | x # \rangle.$ # # If at this point we were to measure in the standard basis $\{ | x \rangle \}$, this superposition would collapse, according to the fifth quantum law, to any one of the basis states with the same probability of $\frac{1}{N} = \frac{1}{2^n}$. Our chances of guessing the right value $w$ is therefore $1$ in $2^n$, as could be expected. Hence, on average we would need to try about $N = 2^n$ times to guess the correct item. # # Enter the procedure called amplitude amplification, which is how a quantum computer significantly enhances this probability. This procedure stretches out (amplifies) the amplitude of the marked item, which shrinks the other items' amplitude, so that measuring the final state will return the right item with near-certainty. # # This algorithm has a nice geometrical interpretation in terms of two reflections, which generate a rotation in a two-dimensional plane. The only two special states we need to consider are the winner $| w \rangle$ and the uniform superposition $| s \rangle$. These two vectors span a two-dimensional plane in the vector space $\mathbb{C}^N.$ They are not quite perpendicular because $| w \rangle$ occurs in the superposition with amplitude $N^{-1/2}$ as well. # We can, however, introduce an additional state $|s'\rangle$ that is in the span of these two vectors, which is perpendicular to $| w \rangle$ and is obtained from $|s \rangle$ by removing $| w \rangle$ and # rescaling. # # **Step 1**: The amplitude amplification procedure starts out in the uniform superposition $| s \rangle$, which is easily constructed from $| s \rangle = H^{\otimes n} | 0 \rangle^n$. # # ![image2](images/grover_step1.png) # # # The left graphic corresponds to the two-dimensional plane spanned by perpendicular vectors $|w\rangle$ and $|s'\rangle$ which allows to express the initial state as $|s\rangle = \sin \theta | w \rangle + \cos \theta | s' \rangle,$ where $\theta = \arcsin \langle s | w \rangle = \arcsin \frac{1}{\sqrt{N}}$. The right graphic is a bar graph of the amplitudes of the state $| s \rangle$ for the case $N = 2^2 = 4$. The average amplitude is indicated by a dashed line. # # **Step 2**: We apply the oracle reflection $U_f$ to the state $|s\rangle$. # # ![image3](images/grover_step2.png) # # Geometrically this corresponds to a reflection of the state $|s\rangle$ about $|s'\rangle$. This transformation means that the amplitude in front of the $|w\rangle$ state becomes negative, which in turn means that the average amplitude has been lowered. # # **Step 3**: We now apply an additional reflection $U_s$ about the state $|s\rangle$: $U_s = 2|s\rangle\langle s| - \mathbb{1}$. This transformation maps the state to $U_s U_f| s \rangle$ and completes the transformation. # # ![image4](images/grover_step3.png) # # Two reflections always correspond to a rotation. The transformation $U_s U_f$ rotates the initial state $|s\rangle$ closer towards the winner $|w\rangle$. The action of the reflection $U_s$ in the amplitude bar diagram can be understood as a reflection about the average amplitude. Since the average amplitude has been lowered by the first reflection, this transformation boosts the negative amplitude of $|w\rangle$ to roughly three times its original value, while it decreases the other amplitudes. We then go to **step 2** to repeat the application. This procedure will be repeated several times to zero in on the winner. # # After $t$ steps we will be in the state $|\psi_t\rangle$ where: $| \psi_t \rangle = (U_s U_f)^t | s \rangle.$ # # How many times do we need to apply the rotation? It turns out that roughly $\sqrt{N}$ rotations suffice. This becomes clear when looking at the amplitudes of the state $| \psi \rangle$. We can see that the amplitude of $| w \rangle$ grows linearly with the number of applications $\sim t N^{-1/2}$. However, since we are dealing with amplitudes and not probabilities, the vector space's dimension enters as a square root. Therefore it is the amplitude, and not just the probability, that is being amplified in this procedure. # # In the case that there are multiple solutions, $M$, it can be shown that roughly $\sqrt{(N/M)}$ rotations will suffice. # # ![image5](images/grover_algorithm.png) # # ## 2. Example: 2 Qubits <a id='2qubits'></a> # # Let's first have a look at the case of Grover's algorithm for $N=4$ which is realized with 2 qubits. In this particular case, contrary to inuition, only <b>one rotation</b> is required which will rotate the initial state $|s\rangle$ to the winner $|w\rangle$ which can easily be shown [3]: # <ol> # <li> # Following the above introduction, in the case $N=4$ we have # # $$\theta = \arcsin \frac{1}{2} = \frac{\pi}{6}.$$ # # # </li> # <li> # After $t$ steps, we have $$(U_s U_f)^t \lvert s \rangle = \sin \theta_t \lvert w \rangle + \cos \theta_t \lvert s' \rangle ,$$where $$\theta_t = (2t+1)\theta.$$ # </li> # <li> # In order to obtain $\lvert w \rangle$ we need $\theta_t = \frac{\pi}{2}$, which with $\theta=\frac{\pi}{6}$ inserted above results to $t=1$. This implies that after $t=1$ rotation the searched element is found. # </li> # </ol> # # Now let us look into the possible oracles. We have $N=4$ possible elements, i.e. $\lvert 00 \rangle, \lvert 01 \rangle, \lvert 10 \rangle, \lvert 11 \rangle$ and hence require in total $4$ oracles.<br> # # #### Oracle for $\lvert w \rangle = \lvert 11 \rangle$ # Let us start with the case $\lvert w \rangle = \lvert 11 \rangle$. The oracle $U_f$ in this case acts as follows: # # $$U_f \lvert s \rangle = U_f\frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right) = \frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle - \lvert 11 \rangle \right).$$ # # In order to realize the sign flip for $\lvert 11 \rangle$ we simply need to apply a controlled Z gate to the initial state. This leads to the following circuit: # # ![image6](images/grover_circuit_2qbuits_oracle_11.png) # # #### Oracle for $\lvert w \rangle = \lvert 00 \rangle$ # In the case of $\lvert w \rangle = \lvert 00 \rangle$ the oracle $U_f$ acts as follows: # # $$U_f \lvert s \rangle = U_f\frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right) = \frac{1}{2}\left( -\lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right).$$ # # In order to realize the sign flip for $\lvert 00 \rangle$ we need to apply an "inverted" controlled Z gate to the initial state leading to the following circuit: # # ![image7](images/grover_circuit_2qbuits_oracle_00.png) # # #### Oracles for $\lvert w \rangle = \lvert 01 \rangle$ and $\lvert w \rangle = \lvert 10 \rangle$ # Following the above logic one can straight forwardly construct the oracles for $\lvert w \rangle = \lvert 01 \rangle$ (left circuit) and $\lvert w \rangle = \lvert 10 \rangle$ (right circuit): # # ![image8](images/grover_circuit_2qbuits_oracle_01_10.png) # # #### Reflection $U_s$ # In order to complete the circuit we need to implement the additional reflection $U_s = 2|s\rangle\langle s| - \mathbb{1}$ which acts as follows # # $$U_s \frac{1}{2}\left( \lvert 00 \rangle + \lvert 01 \rangle + \lvert 10 \rangle + \lvert 11 \rangle \right) = \frac{1}{2}\left( \lvert 00 \rangle - \lvert 01 \rangle - \lvert 10 \rangle - \lvert 11 \rangle \right),$$ # # i.e. the signs of each state are flipped except for $\lvert 00 \rangle$. As can easily be verified, one way of implementing $U_s$ is the following circuit: # # ![image9](images/grover_circuit_2qbuits_reflection.png) # # #### Full Circuit for $\lvert w \rangle = \lvert 00 \rangle$ # Since in the particular case of $N=4$ only one rotation is required we can combine the above components to build the full circuit for Grover's algorithm for the case $\lvert w \rangle = \lvert 00 \rangle$: # # ![image10](images/grover_circuit_2qubits_full_00.png) # # The other three circuits can be constructed in the same way and will not be depicted here. # # ### 2.1 Qiskit Implementation # # We now implement Grover's algorithm for the above case of 2 qubits for $\lvert w \rangle = \lvert 00 \rangle$. # + #initialization import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # %config InlineBackend.figure_format = 'svg' # Makes the images look nice # importing Qiskit from qiskit import IBMQ, Aer from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute # import basic plot tools from qiskit.visualization import plot_histogram # - # We start by preparing a quantum circuit with two qubits: n = 2 grover_circuit = QuantumCircuit(n) # Then we simply need to write out the commands for the circuit depicted above. First, Initialize the state $|s\rangle$: for qubit in range(n): grover_circuit.h(qubit) grover_circuit.draw('mpl') # Apply the Oracle for $|w\rangle = |00\rangle$: # + for qubit in range(n): grover_circuit.x(qubit) grover_circuit.cz(0, 1) for qubit in range(n): grover_circuit.x(qubit) grover_circuit.draw('mpl') # - # Apply a Hadamard operation to both qubits: for qubit in range(n): grover_circuit.h(qubit) grover_circuit.draw('mpl') # Apply the reflection $U_s$: # + for qubit in range(n): grover_circuit.z(qubit) grover_circuit.cz(0, 1) grover_circuit.draw('mpl') # - # Apply the final Hadamard to both qubits: # + for qubit in range(n): grover_circuit.h(qubit) grover_circuit.draw('mpl') # - # And we can see we have assembled the circuit correctly: # ### 2.1.1 Experiment with Simulators <a id='2qubits-simulation'></a> # # Let's run the circuit in simulation. First, we can verify that we have the correct statevector: backend_sim = Aer.get_backend('statevector_simulator') job_sim = execute(grover_circuit, backend_sim) statevec = job_sim.result().get_statevector() from qiskit_textbook.tools import vector2latex vector2latex(statevec, pretext="|\\psi\\rangle =") # Now let us measure the state and create the corresponding histogram experiments: # + grover_circuit.measure_all() backend = Aer.get_backend('qasm_simulator') shots = 1024 results = execute(grover_circuit, backend=backend, shots=shots).result() answer = results.get_counts() plot_histogram(answer) # - # We confirm that in 100% of the cases the element $|00\rangle$ is found. # # ### 2.1.2 Experiment with Real Devices <a id='2qubits-device'></a> # # We can run the circuit on the real device as below. # Load IBM Q account and get the least busy backend device provider = IBMQ.load_account() device = least_busy(provider.backends(simulator=False)) print("Running on current least busy device: ", device) # Run our circuit on the least busy backend. Monitor the execution of the job in the queue from qiskit.tools.monitor import job_monitor job = execute(grover_circuit, backend=device, shots=1024, max_credits=10) job_monitor(job, interval = 2) # Get the results from the computation results = job.result() answer = results.get_counts(grover_circuit) plot_histogram(answer) # We confirm that in the majority of the cases the element $|00\rangle$ is found. The other results are due to errors in the quantum computation. # # ## 3. Example: 3 Qubits <a id='3qubits'></a> # # We now go through the example of Grover's algorithm for 3 qubits with two marked states $\lvert101\rangle$ and $\lvert110\rangle$, following the implementation found in Reference [2]. The quantum circuit to solve the problem using a phase oracle is: # # ![image11](images/grover_circuit_3qubits.png) # # <ol> # <li> # Apply Hadamard gates to $3$ qubits initialised to $\lvert000\rangle$ to create a uniform superposition: # $$\lvert \psi_1 \rangle = \frac{1}{\sqrt{8}} \left( # \lvert000\rangle + \lvert001\rangle + \lvert010\rangle + \lvert011\rangle + # \lvert100\rangle + \lvert101\rangle + \lvert110\rangle + \lvert111\rangle \right) $$ # </li> # # <li> # Mark states $\lvert101\rangle$ and $\lvert110\rangle$ using a phase oracle: # $$\lvert \psi_2 \rangle = \frac{1}{\sqrt{8}} \left( # \lvert000\rangle + \lvert001\rangle + \lvert010\rangle + \lvert011\rangle + # \lvert100\rangle - \lvert101\rangle - \lvert110\rangle + \lvert111\rangle \right) $$ # </li> # # <li> # Perform the reflection around the average amplitute: # # <ol> # <li> Apply Hadamard gates to the qubits # $$\lvert \psi_{3a} \rangle = \frac{1}{2} \left( # \lvert000\rangle +\lvert011\rangle +\lvert100\rangle -\lvert111\rangle \right) $$ # </li> # # <li> Apply X gates to the qubits # $$\lvert \psi_{3b} \rangle = \frac{1}{2} \left( # -\lvert000\rangle +\lvert011\rangle +\lvert100\rangle +\lvert111\rangle \right) $$ # </li> # # <li> Apply a doubly controlled Z gate between the 1, 2 (controls) and 3 (target) qubits # $$\lvert \psi_{3c} \rangle = \frac{1}{2} \left( # -\lvert000\rangle +\lvert011\rangle +\lvert100\rangle -\lvert111\rangle \right) $$ # </li> # <li> Apply X gates to the qubits # $$\lvert \psi_{3d} \rangle = \frac{1}{2} \left( # -\lvert000\rangle +\lvert011\rangle +\lvert100\rangle -\lvert111\rangle \right) $$ # </li> # <li> Apply Hadamard gates to the qubits # $$\lvert \psi_{3e} \rangle = \frac{1}{\sqrt{2}} \left( # -\lvert101\rangle -\lvert110\rangle \right) $$ # </li> # </ol> # </li> # # <li> # Measure the $3$ qubits to retrieve states $\lvert101\rangle$ and $\lvert110\rangle$ # </li> # </ol> # # Note that since there are 2 solutions and 8 possibilities, we will only need to run one iteration (steps 2 & 3). # # ### 3.1 Qiskit Implementation <a id='3qubit-implementation'></a> # # We now implement Grover's algorithm for the above [example](#3qubits) for $3$-qubits and searching for two marked states $\lvert101\rangle$ and $\lvert110\rangle$. **Note:** Remember that Qiskit orders it's qubits the opposite way round to this resource, so the circuit drawn will appear flipped about the horizontal. # # We create a phase oracle that will mark states $\lvert101\rangle$ and $\lvert110\rangle$ as the results (step 1). def phase_oracle(circuit): circuit.cz(0, 2) circuit.cz(1, 2) # Next we set up the circuit for inversion about the average (step 2), also known as the diffusion operator: def diffuser(circuit): """Apply inversion about the average step of Grover's algorithm.""" qubits = circuit.qubits nqubits = len(qubits) for q in range(nqubits): circuit.h(q) circuit.x(q) # Do controlled-Z circuit.h(2) circuit.ccx(0,1,2) circuit.h(2) for q in range(nqubits): circuit.x(q) circuit.h(q) # Now we put the pieces together, with the creation of a uniform superposition at the start of the circuit and a measurement at the end. Note that since there are 2 solutions and 8 possibilities, we will only need to run one iteration. # + n = 3 barriers = True grover_circuit = QuantumCircuit(n) for qubit in range(n): grover_circuit.h(qubit) if barriers: grover_circuit.barrier() phase_oracle(grover_circuit) if barriers: grover_circuit.barrier() diffuser(grover_circuit) grover_circuit.measure_all() # - grover_circuit.draw(output="mpl") # ### 3.1.1 Experiment with Simulators <a id='3qubits-simulation'></a> # # We can run the above circuit on the simulator. backend = Aer.get_backend('qasm_simulator') shots = 1024 results = execute(grover_circuit, backend=backend, shots=shots).result() answer = results.get_counts() plot_histogram(answer) # As we can see, the algorithm discovers our marked states $\lvert101\rangle$ and $\lvert110\rangle$. # ### 3.1.2 Experiment with Real Devices <a id='3qubits-device'></a> # # We can run the circuit on the real device as below. # + tags=["uses-hardware"] backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and not x.configuration().simulator and x.status().operational==True)) print("least busy backend: ", backend) # + tags=["uses-hardware"] # Run our circuit on the least busy backend. Monitor the execution of the job in the queue from qiskit.tools.monitor import job_monitor shots = 1024 job = execute(grover_circuit, backend=backend, shots=shots, optimization_level=3) job_monitor(job, interval = 2) # + tags=["uses-hardware"] # Get the results from the computation results = job.result() answer = results.get_counts(grover_circuit) plot_histogram(answer) # - # As we can (hopefully) see, the algorithm discovers our marked states $\lvert101\rangle$ and $\lvert110\rangle$. The other results are due to errors in the quantum computation. # ## 4. Problems <a id='problems'></a> # # 1. The above [example](#example) and [implementation](#implementation) of Grover is to find the two marked $3$-qubit states $\lvert101\rangle$ and $\lvert110\rangle$. Modify the implementation to find one marked $2$-qubit state $\lvert01\rangle$. Are the results what you expect? Explain. # # 2. The above [example](#example) and [implementation](#implementation) of Grover is to find the two marked $3$-qubit states $\lvert101\rangle$ and $\lvert110\rangle$. Modify the implementation to find one marked $4$-qubit state $\lvert0101\rangle$. Are the results what you expect? Explain. # ## 5. References <a id='references'></a> # # 1. <NAME> (1996), "A fast quantum mechanical algorithm for database search", Proceedings of the 28th Annual ACM Symposium on the Theory of Computing (STOC 1996), [doi:10.1145/237814.237866](http://doi.acm.org/10.1145/237814.237866), [arXiv:quant-ph/9605043](https://arxiv.org/abs/quant-ph/9605043) # 2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME> (2017), "Complete 3-Qubit Grover search on a programmable quantum computer", Nature Communications, Vol 8, Art 1918, [doi:10.1038/s41467-017-01904-7](https://doi.org/10.1038/s41467-017-01904-7), [arXiv:1703.10535 ](https://arxiv.org/abs/1703.10535) # 3. <NAME> & <NAME>, "Quantum Computation and Quantum Information", Cambridge: Cambridge University Press, 2000. import qiskit qiskit.__qiskit_version__
content/ch-algorithms/grover.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow_p36 # language: python # name: conda_tensorflow_p36 # --- # # TensorFlow Script Mode - Using Shell scripts # # Starting from TensorFlow version 1.11, you can use a shell script as # your training entry point. Shell scripts are useful for many use cases including: # # - Invoking Python scripts with specific parameters # - Configuring framework dependencies # - Training using different programming languages # # For this example, we use [a Keras implementation of the Deep Dream algorithm](https://github.com/keras-team/keras/blob/2.2.4/examples/deep_dream.py). We can use the same technique for other scripts or repositories including [TensorFlow Model Zoo](https://github.com/tensorflow/models) and [TensorFlow benchmark scripts](https://github.com/tensorflow/benchmarks/tree/master/scripts/tf_cnn_benchmarks). # # Getting the image for training # For training data, let's download a public domain image: # + import os data_dir = os.path.join(os.getcwd(), 'training') os.makedirs(data_dir, exist_ok=True) data_dir # - # !wget -O training/dark-forest-landscape.jpg https://www.goodfreephotos.com/albums/other-landscapes/dark-forest-landscape.jpg from IPython.display import Image Image(filename='training/dark-forest-landscape.jpg') # ## Download the training script # # Let's start by downloading the [deep_dream](https://github.com/keras-team/keras/blob/2.2.4/examples/deep_dream.py) example script from Keras repository. This script takes an image and uses deep dream algorithm to generate # transformations of that image. # !wget https://raw.githubusercontent.com/keras-team/keras/2.2.4/examples/deep_dream.py # The script **deep_dream.py** takes two positional arguments: # - `base_image_path`: Path to the image to transform. # - `result_prefix`: Prefix of all generated images. # # ### Creating the launcher script # # We need to create a launcher script that sets the `base_image_path` # and `result_prefix`, and invokes **deep_dream.py**: # + # %%writefile launcher.sh BASE_IMAGE_PATH="${SM_CHANNEL_TRAINING}/dark-forest-landscape.jpg" RESULT_PREFIX="${SM_MODEL_DIR}/dream" python deep_dream.py ${BASE_IMAGE_PATH} ${RESULT_PREFIX} # echo "Generated image $(ls ${SM_MODEL_DIR})" # - # **SM_CHANNEL_TRAINING** and **SM_MODEL** are environment variables created by the SageMaker TensorFlow # Container in the beginning of training. Let's take a more detailed look at then: # # - **SM_MODEL_DIR**: the directory inside the container where the training model data must be saved inside the container, i.e. /opt/ml/model. # - **SM_TRAINING_CHANNEL**: the directory containing data in the 'training' channel. # # For more information about training environment variables, please visit [SageMaker Containers](https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers). # ## Test locally using SageMaker Python SDK TensorFlow Estimator # You can use the SageMaker Python SDK TensorFlow estimator to easily train locally and in SageMaker. # Let's set **launcher.sh** as the entry-point and **deep_dream.py** as a dependency: entry_point='launcher.sh' dependencies=['deep_dream.py'] # For more information about the arguments `entry_point` and `dependencies` see the [SageMaker TensorFlow](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/README.rst#sagemakertensorflowtensorflow-class) documentation. # # This notebook shows how to use the SageMaker Python SDK to run your code in a local container before deploying to SageMaker's managed training or hosting environments. Just change your estimator's train_instance_type to local or local_gpu. For more information, see: https://github.com/aws/sagemaker-python-sdk#local-mode. # # In order to use this feature you'll need to install docker-compose (and nvidia-docker if training with a GPU). Running following script will install docker-compose or nvidia-docker-compose and configure the notebook environment for you. # # Note, you can only run a single local notebook at a time. # !/bin/bash ./setup.sh # Let's train locally here to make sure everything runs smoothly first. train_instance_type='local' # We create the TensorFlow Estimator, passing the flag `script_mode=True`. For more information about script mode, see https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/README.rst#preparing-a-script-mode-training-script: # + import sagemaker from sagemaker.tensorflow import TensorFlow estimator = TensorFlow(entry_point=entry_point, dependencies=dependencies, train_instance_type='local', train_instance_count=1, role=sagemaker.get_execution_role(), framework_version='1.12.0', py_version='py3', script_mode=True) # - # To start a training job, we call `estimator.fit(inputs)`, where inputs is a dictionary where the keys, named **channels**, have values pointing to the data location: # + inputs = {'training': f'file://{data_dir}'} estimator.fit(inputs) # - # `estimator.model_data` contains the S3 location where the contents of **/opt/ml/model** # were save as tar.gz file. Let's untar and download the model: # !aws s3 cp {estimator.model_data} model.tar.gz # !tar -xvzf model.tar.gz # We can see the resulting image now: from IPython.display import Image Image(filename='dream.png') # # Training in SageMaker # After you test the training job locally, upload the dataset to an S3 bucket so SageMaker can access the data during training: # + import sagemaker training_data = sagemaker.Session().upload_data(path='training', key_prefix='datasets/deep-dream') # - # The `upload_data` call above returns an S3 location that can be used during the SageMaker Training Job training_data # To train in SageMaker: # - change the estimator argument **train_instance_type** to any SageMaker ML Instance Type available for training. # - set the **training** channel to a S3 location. # + estimator = TensorFlow(entry_point='launcher.sh', dependencies=['deep_dream.py'], train_instance_type='ml.c4.xlarge', train_instance_count=1, role=sagemaker.get_execution_role(), framework_version='1.12.0', py_version='py3', script_mode=True) estimator.fit(training_data) # - # The `estimator.fit` call bellow starts training and creates a data channel named `training` with the contents of the # S3 location `training_data`. estimator.fit(training_data)
sagemaker-python-sdk/tensorflow_script_mode_using_shell_commands/tensorflow_script_mode_using_shell_commands.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: nlp # language: python # name: nlp # --- # + #default_exp ocr_attention_training # - #export from fastai import * from fastai.vision import * import pandas as pd import numpy as np import cv2 from tqdm.notebook import tqdm import matplotlib.pyplot as plt # %matplotlib inline #export from ocr.core import save_inference, load_inference from ocr.ocr_dataset_fontsynth import create_df as create_fontsynth_df from ocr.ocr_dataset_sroie2019 import create_df as create_sroie_df from ocr.ocr_dataset_brno import create_df as create_brno_df from ocr.ocr_dataset_sroie2019 import sroie_ocr_config, char_freq from ocr.ocr_dataset_fontsynth import fontsynth_config, char_freq from ocr.ocr_dataset_brno import brno_ocr_config PAD = sroie_ocr_config.PAD # PAD - how much is data padded PAD = 0 # + #export allowed_chars = {'N', '3', 'V', 'P', '7', '1', '#', '9', '"', 'C', 'Q', 'B', 'E', '>', '@', ',', 'M', '{', ']', ';', '^', "'", '&', '6', 'Z', '*', '<', '+', 'G', 'X', '!', ':', '-', '[', '|', '$', '5', 'I', 'H', '=', 'Y', '.', 'R', 'S', '/', 'T', '}', 'K', '0', '?', 'U', ')', '_', 'D', 'J', 'L', '4', 'W', '%', '(', ' ', 'F', '8', '~', '\\', 'A', '2', 'O'} allowed_chars = fontsynth_config.allowed_chars allowed_fonts = ['Unknown', 'Andale_Mono', 'Arial', 'Arial_Black', 'Arial_Bold', 'Arial_Bold_Italic', 'Arial_Italic', 'Comic_Sans_MS_Bold', 'Courier_New', 'Courier_New_Bold', 'Courier_New_Bold_Italic', 'Courier_New_Italic', 'Georgia', 'Georgia_Bold', 'Georgia_Bold_Italic', 'Georgia_Italic', 'Impact', 'Times_New_Roman', 'Times_New_Roman_Bold', 'Times_New_Roman_Bold_Italic', 'Times_New_Roman_Italic', 'Trebuchet_MS', 'Trebuchet_MS_Bold', 'Trebuchet_MS_Bold_Italic', 'Trebuchet_MS_Italic', 'Verdana', 'Verdana_Bold', 'Verdana_Bold_Italic', 'Verdana_Italic', 'brno_easy', 'brno_medium', 'sroie2019', 'Comic_Sans_MS'] class attention_config: LINE_HEIGHT = 48 USE_DEFAULT_CLASSES = True label_delim = '`' pad_idx = 0 # aka: label_delim idx allowed_chars = allowed_chars allowed_fonts = allowed_fonts # - # ### Prep print(char_freq) chars = list(char_freq.keys()) label_delim = attention_config.label_delim label_delim in chars allowed_chars = set(chars) - set(['·', label_delim]) print(allowed_chars) len(allowed_chars) allowed_chars = attention_config.allowed_chars # + split_chars = lambda string, delim: ''.join([char+delim for char in string])[:-1] def preprocess_string(string): string = string.replace('·', '.') string = string.replace('`', "'") string = split_chars(string, attention_config.label_delim) return string split_chars('qwerty', label_delim) # - sroie_df = create_sroie_df() print(len(sroie_df)) sroie_df.head() brno_df = create_brno_df() brno_df = brno_df[ brno_df['dataset'].apply(lambda x: 'hard' not in x) ] brno_df['valid'] = False print(len(brno_df)) brno_df.head() fontsynth_df = create_fontsynth_df() fontsynth_df['valid'] = False print(len(fontsynth_df)) fontsynth_df.head() # main_df = pd.concat([sroie_df, fontsynth_df, brno_df]) main_df = sroie_df.copy() def filter_elems_with_unknown_chars(string): for char in string: if char not in attention_config.allowed_chars: return False return True valid_elems = main_df['string'].apply(filter_elems_with_unknown_chars) print('bfr: {} aftr: {}'.format(len(main_df), len(main_df[ valid_elems ]))) main_df = main_df[ valid_elems ] main_df['string'] = main_df['string'].map(preprocess_string) main_df c = '\n' assert tensor(list(main_df['string'].apply(lambda x: c not in x))).all() main_df['string'] = (main_df['string'] + c + main_df['dataset']).map(lambda x: x.split(c)) main_df sparse_df = pd.concat([ main_df[ main_df['valid'] == False ].iloc[0:10000:2], main_df[ main_df['valid'] == True ].iloc[:500] ]) sparse_df.head() # ### Databunch #export from ocr.ocr_crnn_training import TextlineProcessor, TextlineAndFont, TextlineList, MyImageList, im2seq_data_collate from ocr.ocr_crnn_training import one_hot_text, decode_single_ctc, decode_ctc, gaussian_blur, rand_resize #export class TextlineProcessor(TextlineProcessor): def __init__(self, ds:ItemList): self.create_classes(ds.classes, ds.font_classes) self.use_default_classes = attention_config.USE_DEFAULT_CLASSES self.default_classes = attention_config.allowed_chars self.default_font_classes = attention_config.allowed_fonts def create_classes(self, classes, font_classes): self.classes, self.font_classes = classes, font_classes if classes is not None: self.classes = [attention_config.label_delim] + classes self.c2i = {v:k for k,v in enumerate(self.classes)} self.f2i = {v:k for k,v in enumerate(font_classes)} #export class TextlineAndFont(ItemBase): ''' F = font, S = string data: tensor(S), tensor(F) obj: str(S), str(F) raw: str(S), list(F) ''' def __init__(self, data, obj, raw):self.data, self.obj, self.raw = data, obj, raw def __str__(self, n=20): string = self.obj[0][:n]+['...'] if len(self.obj[0]) > n else self.obj[0] return self.obj[1][:5] +'...'+ attention_config.label_delim.join([str(o) for o in string]) def __hash__(self): return hash(str(self)) #export class TextlineList(ItemList): _processor = TextlineProcessor def __init__(self, items:Iterator, classes=None, font_classes=None, label_delim:str=None, one_hot:bool=False, **kwargs): self.classes = classes self.font_classes = font_classes items = [(string.split(attention_config.label_delim),font) for string,font in items] # CHANGED super().__init__(items, **kwargs) self.processor = [TextlineProcessor(self)] def get(self, i): stridxs, fontidx = self.items[i] # int, list of ints return TextlineAndFont( (tensor(stridxs), tensor(fontidx)), ([self.classes[c] for c in stridxs], self.font_classes[fontidx]), self.items[i]) def analyze_pred(self, nn_output, thresh=0.5, _=None): font_pred, y_pred = nn_output # [c1], [s_e,c2] assert len(listify(y_pred.shape)) == 2 # (no batch inputs) return font_pred.argmax(dim=-1), decode_single_ctc(y_pred.argmax(dim=-1)), _, _ # [1], [seq_len], _, _ def reconstruct(self, data_out): fontidx, t_argmax, _, lengths = data_out # output from data / output from nn_out -> analyze_pred stridxs = [int(i) for i in t_argmax] fontidx = int(fontidx) return TextlineAndFont((one_hot_text(stridxs, self.c), fontidx), ([self.classes[c] for c in stridxs], self.font_classes[fontidx]), data_out) @property def c(self): return len(self.classes) #export def im2seq_data_collate(batch:ItemsList, pad_idx:int=0)->Tensor: if isinstance(batch[0][1], int): return data_collate(batch) "Convert `batch` items to tensor data." data = to_data(batch) # list of (image, text) pairs # image: [3,48,w], text: [n,c], where n's and w's are different max_w = max([image.shape[2] for image, (text,font) in data]) max_h = max([image.shape[1] for image, (text,font) in data]) max_n = max([text.shape[0] for image, (text,font) in data]) # _, num_classes = data[0][1].shape images = torch.zeros(len(batch), 3, max_h, max_w) fonts = torch.zeros(len(batch)).long() # texts = torch.zeros(len(batch), max_n, num_classes) texts = [] nn_out_seq_len, texts_len = [], [] for i, (image, (text,font)) in enumerate(data): fonts[i] = font c,h,w = image.shape images[i, : , : , :w ] = image images[i, : , : , w: ] = image[:,:,w-1].unsqueeze(2).expand(c,h,max_w-w) nn_out_seq_len.append( image_width2seq_len(w) ) n = text.size(0) texts.append( tensor(text) ) # texts[i, :n , : ] = tensor(text) # texts[i, n: , -1 ] = 1 texts_len.append(n) # texts = torch.cat(texts, axis=0) return images, (fonts, texts, tensor(nn_out_seq_len).type(torch.int), tensor(texts_len).type(torch.int)) # + #export train_transforms = [ rand_resize(pad=(0,PAD), p=1.0), rotate(degrees=(-2, 2), p=0.6), symmetric_warp(magnitude=(-0.03, 0.03), p=0.3), rand_zoom(scale=(0.9,1.03), p=0.5), brightness(change=(0.35, 0.65), p=0.4), contrast(scale=(0.7,1.3), p=0.4), gaussian_blur(size=(1, 7), p=0.2), # squish(scale=(0.85,1.15), p=0.3), # cutout(n_holes=(0,6), length=(1,10)), # black rect # tilt(direction=(0,3), magnitude=(-0.2,0.2), p=0.3) ] valid_transforms = [ rand_resize(pad=(0,0), p=1.0) # (no padding, but need to resize) ] # + #export def create_data(df, bs=32): ''' DataFrame (df) -> Dataloader (dl) ''' data = (MyImageList.from_df(df, path='.', cols='image_path') .split_from_df(col='valid') .label_from_df(cols='string', label_cls=TextlineList, label_delim=attention_config.label_delim) .transform((train_transforms, valid_transforms), tfm_y=False) .databunch(bs=bs, collate_fn=partial(im2seq_data_collate, pad_idx=attention_config.pad_idx)) .normalize() ) # data.train_dl.numworkers=0 # data.valid_dl.numworkers=0 # def add_beggining_and_end(b): # x,y = b # y = F.pad(y, (1, 0), value=bos_idx) # y = F.pad(y, (0, 1), value=eos_idx) # return x,y # data.add_tfm(add_beggining_and_end) return data # - # ### Model #export def conv_output(w, ss, ps=None, ks=3): ''' image width, strides, pools, kernel sizes ''' for s,p,k in zip(ss,ps,ks): s = s[1] if isinstance(s, tuple) else s w = w if w%s == 0 else w + 1 w = (w - k + 2*p)/s + 1 if p is not None else w/s return int(w) conv_output(129, [2, 1, 2, 1, (2,1), (2,1), 1], [None] * 6 + [0], [3, 3, 3, 3, 3, 3, 3]) # + #export _apply_layer = lambda args: args[1](args[0]) # args[0]: x, args[1]: layer => layer(x) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, d_model, d_head=None, p=0., bias=True, scale=True, shared_qk=False): super().__init__() d_head = ifnone(d_head, d_model//n_heads) self.n_heads,self.d_head,self.scale = n_heads,d_head,scale self.q_wgt, self.v_wgt = [nn.Linear(d_model, n_heads*d_head, bias=bias) for o in range(2)] self.k_wgt = self.q_wgt if shared_qk else nn.Linear(d_model, n_heads*d_head, bias=bias) self.out = nn.Linear(n_heads * d_head, d_model, bias=bias) self.drop_att,self.drop_res = nn.Dropout(p),nn.Dropout(p) self.ln = nn.LayerNorm(d_model) def forward(self, q, kv, mask=None): ''' [b,s_d,512], [b,s_e,512], [1,1,s_d,s_e] -> [b,s_d,512] ''' bs,seq_len = q.size(0),q.size(1) wq,wk,wv = map(_apply_layer, zip([q,kv,kv], [self.q_wgt,self.k_wgt,self.v_wgt])) # [b,s_d,h*512], [b,s_e,h*512] x 2 wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv)) # [b,s_d,h,512], [b,s_e,h,512] x 2 wq,wv = map(lambda x:x.permute(0, 2, 1, 3), (wq,wv)) # [b,h,s_d,512], [b,h,s_e,512] wk = wk.permute(0, 2, 3, 1) # [b,h,512,s_e] attn_score = torch.matmul(wq, wk) # [b,h,s_d,s_e] if self.scale: attn_score.div_(self.d_head ** 0.5) if mask is not None: # NOTE: masks only ones, not zeros! attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score) # [b,h,s_d,s_e] attn_prob = self.drop_att(F.softmax(attn_score, dim=-1)) # [b,h,s_d,s_e] attn_vec = torch.matmul(attn_prob, wv) # [b,h,s_d,512] attn_vec = attn_vec.permute(0, 2, 1, 3).contiguous().contiguous() # [b,s_d,h,512] attention = attn_vec.view(bs, seq_len, -1) # [b,s_d,h*512] return self.ln(q + self.drop_res(self.out(attention))) # - mha = MultiHeadAttention(n_heads=8, d_model=512) mha(torch.zeros(4,24,512), torch.zeros(4,16,512), torch.zeros(1,1,24,16, dtype=bool)).shape #export def feed_forward(d_model:int, d_ff:int, ff_p:float=0., activ_func=partial(nn.ReLU, inplace=True), double_drop:bool=True): ''' [...,d] -> [...,d] ''' layers = [nn.Linear(d_model, d_ff), activ_func()] if double_drop: layers.append(nn.Dropout(ff_p)) return SequentialEx(*layers, nn.Linear(d_ff, d_model), nn.Dropout(ff_p), MergeLayer(), nn.LayerNorm(d_model)) #export class EncoderBlock(nn.Module): "Encoder block of a Transformer model." def __init__(self, n_heads, d_model, d_inner, p=0., bias=True, scale=True, double_drop=True): super().__init__() self.mha = MultiHeadAttention(n_heads, d_model, p=p, bias=bias, scale=scale) self.ff = feed_forward(d_model, d_inner, ff_p=p, double_drop=double_drop) def forward(self, x, mask=None): ''' [b,s_e,512], [1,1,s_e,s_e] -> [b,s_e,512] ''' return self.ff(self.mha(x, x, mask=mask)) e = EncoderBlock(n_heads=8, d_model=512, d_inner=1024) e(torch.zeros(4,16,512), torch.zeros(1,1,16,16, dtype=bool)).shape #export class DecoderBlock(nn.Module): "Decoder block of a Transformer model." def __init__(self, n_heads, d_model, d_inner, p=0., bias=True, scale=True, double_drop=True): super().__init__() self.mha1 = MultiHeadAttention(n_heads, d_model, p=p, bias=bias, scale=scale) self.mha2 = MultiHeadAttention(n_heads, d_model, p=p, bias=bias, scale=scale) self.ff = feed_forward(d_model, d_inner, ff_p=p, double_drop=double_drop) def forward(self, x, enc, mask_out=None): ''' [b,s_d,512], [b,s_e,512], [1,1,s_d,s_d] -> [b,s_d,512] ''' return self.ff(self.mha2(self.mha1(x, x, mask_out), enc)) d = DecoderBlock(n_heads=8, d_model=512, d_inner=1024) d(torch.zeros(4,24,512), torch.zeros(4,16,512), torch.zeros(1,1,24,24, dtype=bool)).shape #export def get_output_mask(inp, pad_idx=1): ''' [b,s_e,...] -> [1,1,s_e,s_e] ''' return torch.triu(inp.new_ones(inp.size(1),inp.size(1)), diagonal=1)[None,None].type(torch.bool) get_output_mask(torch.ones(4,4)).type(torch.int).squeeze() #export class PositionalEncoding(Module): "Encode the position with a sinusoid." def __init__(self, d:int): self.register_buffer('freq', 1 / (10000 ** (torch.arange(0., d, 2.)/d))) def forward(self, pos:Tensor): inp = torch.ger(pos, self.freq) enc = torch.cat([inp.sin(), inp.cos()], dim=-1) return enc #export class TransformerEmbedding(nn.Module): "Embedding + positional encoding + dropout" def __init__(self, emb_sz, vocab_sz=None, drop=0.): super().__init__() self.emb_sz = emb_sz if vocab_sz is None: self.embed = None else: self.embed = nn.Embedding(vocab_sz, emb_sz) self.pos_enc = PositionalEncoding(emb_sz) self.drop = nn.Dropout(drop) self.alpha = nn.Parameter(tensor(1.)) def forward(self, inp): ''' [] -> [] ''' pos = torch.arange(0, inp.size(1), device=inp.device).float() if self.embed is not None: inp = self.embed(inp) return self.drop(inp + self.alpha * self.pos_enc(pos)) # return self.drop(inp * math.sqrt(self.emb_sz) + self.pos_enc(pos)) #export def compose(funcs): def func_out(x, *args): for f in listify(funcs): x = f(x, *args) return x return func_out #export class TransformerEncoder(Module): def __init__(self, n_layers=6, n_heads=8, d_model=512, d_inner=1024, p=0.1, bias=True, scale=True, double_drop=True): self.enc_emb = TransformerEmbedding(d_model, drop=0.) # no need to embed encoding from cnn output args = (n_heads, d_model, d_inner, p, bias, scale, double_drop) self.encoders = nn.ModuleList([EncoderBlock(*args) for _ in range(n_layers)]) def forward(self, inp): ''' [b,s_e,512], [b,s_d,c] or [b,s_d] -> [b,s_d,c] (c - num classes) ''' enc = self.enc_emb(inp) # return compose(self.encoders)(enc) # [b,s_e,512] TransformerEncoder()( torch.zeros(4,12,512) ).shape #export class CNN(nn.Module): def __init__(self, d_model, cnn_layers, kernels, strides, channels, padding, nc=3): super().__init__() layers = [] for layer,i,o,k,s,p in zip(cnn_layers, [nc] + channels[:-1], channels, kernels, strides, padding): layers.append( layer(ni=i, nf=o, ks=k, stride=s, padding=p) ) self.cnn = nn.Sequential(*layers) b,c,h,w = self.cnn(torch.zeros(1,3,48,128)).shape self.out = nn.Linear(h*c, d_model) print('CNN output = h:{} c:{}'.format(h,c)) def forward(self, x): x = self.cnn(x).permute(0,3,1,2) b,w,c,h = x.shape return self.out(x.view(b,w,-1)) # [b,c,h,w] # + #export import revtorch as rv def RevConv(ni, nf, ks, stride, padding): assert ni == nf and stride == 1 f_func = conv_layer(ni//2, nf//2, ks, stride=stride, padding=padding) g_func = conv_layer(ni//2, nf//2, ks, stride=stride, padding=padding) layers = nn.ModuleList([rv.ReversibleBlock(f_func, g_func)]) return rv.ReversibleSequence(layers, eagerly_discard_variables = True) # - m = nn.AdaptiveAvgPool2d([2,512]) # this([h,w])([B,H,W]) -> [B,h,w] input = torch.randn(1, 64, 512) output = m(input) print(output.shape) #export def get_normal_cnn(dx=1): strides = [2, 1, (2,1), 1, (2,1), 1, (2,1), 1] channels = [int(c*dx) for c in [64, 64, 128, 128, 256, 256, 512, 512]] cnn_layers = [conv_layer] * len(strides) kernels = [3] * len(strides) padding = [None] * len(strides) # None - out size doesnt change return cnn_layers, channels, kernels, strides, padding cnn_layers, channels, kernels, strides, padding = get_normal_cnn() cnn = CNN(512, cnn_layers, kernels, strides, channels, padding) cnn(torch.zeros(2,3,48,128)).shape #export def get_partially_rev_cnn(dx=1): strides = [2, 1, (2,1), 1, (2,1), 1, (2,1), 1] channels = [int(c*dx) for c in [64, 64, 128, 128, 256, 256, 512, 512]] cnn_layers = [conv_layer, RevConv] * (len(strides)//2) kernels = [3] * len(strides) padding = [None] * len(strides) # None - out size doesnt change return cnn_layers, channels, kernels, strides, padding cnn_layers, channels, kernels, strides, padding = get_partially_rev_cnn(dx=1/2) cnn = CNN(512, cnn_layers, kernels, strides, channels, padding) cnn(torch.zeros(2,3,48,128)).shape #export class AttentionModel(nn.Module): def __init__(self, nclass=10, fclass=10, nc=3, n_layers=6, d_model=512, d_ff=1024, use_rnn=False, bidirectional=False): super().__init__() cnn_layers, self.channels, self.kernels, self.strides, self.padding = get_partially_rev_cnn(dx=1/2) self.cnn = CNN(d_model, cnn_layers, self.kernels, self.strides, self.channels, self.padding, nc=nc) # font prediction h,w = 2,d_model self.adaptive_pool = nn.AdaptiveAvgPool2d([h,w]) # this([h,w])([B,H,W]) -> [B,h,w] f_model = 2 # font embedding self.font_ff = nn.Sequential(nn.Linear(h*w, fclass*f_model), nn.ReLU()) self.font_out = nn.Linear(f_model, 1) # self.font_emb = nn.Linear(f_model, d_model) # text prediction self.transformer = TransformerEncoder(n_layers=n_layers, n_heads=8, d_model=d_model, d_inner=d_ff) self.use_rnn = use_rnn if self.use_rnn: self.rnn = nn.LSTM(d_model, d_model, bidirectional=bidirectional) mult = 1 if not bidirectional else 2 d_model = d_model * mult self.out = nn.Linear(d_model, nclass) self.nclass, self.d_model, self.fclass, self.f_model = nclass, d_model, fclass, f_model def forward(self, x): ''' [b,c,h,w], [b,s_d] ''' b,c,h,w = x.shape x = self.cnn(x) # [b,w,512] f = self.adaptive_pool(x).view(b,-1) # [b,h_a x w_a] (_a = adaptive pool params) f_enc = self.font_ff(f).view(-1, self.fclass, self.f_model) f_out = self.font_out(f_enc).view(-1, self.fclass) # f_emb = self.font_emb(f_enc) # [b,f,512] x = self.transformer(x) if self.use_rnn: x, _ = self.rnn(x) return f_out, self.out(x) font_pred, y_pred = AttentionModel()(torch.zeros(2,3,48,128)) y_pred.shape model = AttentionModel() image_width2seq_len = lambda w: conv_output(w, model.strides, model.padding, model.kernels) image_width2seq_len(129) # ### Data data = create_data(sparse_df, bs=8) data # + # data.show_batch(4, figsize=(10,10)) # - dl = iter(data.valid_dl) i, o = next(dl) i.shape data.c data.x[0] data.y[0] data.y[0].data del data # ### Learner #export from ocr.ocr_crnn_training import CTCFontLoss, AddLossMetrics, WordErrorRate data = create_data(main_df, bs=12) model = AttentionModel(nclass=data.c, fclass=len(data.font_classes)) loss_func = CTCFontLoss(ctc_pad_idx=data.classes.index(attention_config.label_delim)) data learner = Learner(data, model, loss_func=loss_func, callback_fns=[ShowGraph, AddLossMetrics, WordErrorRate]) learner.lr_find() learner.recorder.plot(skip_end=7,suggestion=True) learner.fit_one_cycle(1, 1e-4) save_inference(learner, 'attention_ocr') learner.fit_one_cycle(10, 1e-4) learner.fit_one_cycle(20, 1e-4) save_inference(learner, 'attention_ocr') learner.show_results(DatasetType.Valid, rows=4, figsize=(10,10)) # ### Examples learner = load_inference('attention_ocr') # learner.data = data from pathlib import PosixPath from ocr.core import * im = cv2.imread(str(PosixPath('../data/ocr/fontsynth_lines/').ls()[0])) plot(im) pred,_,_ = learner.predict( Image(tensor(im).permute(2,0,1) / 255.) ) chars, font = pred.obj string = ''.join(chars) string, font # ### Eval def calc_precision_recall(true_words, pred_words): num_preds, num_trues = len(pred_words), len(true_words) correct = 0 for true_word in true_words: if true_word in pred_words: pred_words.remove(true_word) correct += 1 continue precision = correct / num_preds recall = correct / num_trues # print(precision, recall, correct, num_preds) return precision, recall # + def wer(s1,s2): ''' s1 - true text, s2 - pred text ''' d = np.zeros([len(s1)+1,len(s2)+1]) d[:,0] = np.arange(len(s1)+1) d[0,:] = np.arange(len(s2)+1) for j in range(1,len(s2)+1): for i in range(1,len(s1)+1): if s1[i-1] == s2[j-1]: d[i,j] = d[i-1,j-1] else: d[i,j] = min(d[i-1,j]+1, d[i,j-1]+1, d[i-1,j-1]+1) return d[-1,-1]/len(s1) word_error = wer( 'black frog jumped away'.split(' '), 'black frog jumped awayyy'.split(' ') ) char_error = wer( 'black frog jumped away', 'black frog jumped awayyy' ) char_error, word_error # - def idx2text(indexes): for i in indexes: try: data.classes[i] except: print('asd', i) return ''.join([data.classes[i] for i in indexes]) # + def evaluate_model(model, dl, t2list=lambda t: list(t.cpu().numpy()), total=None): total = len(dl) if total is None else total errors = 0 ps, rs, ws, cs = [], [], [], [] for images, (font, texts, im_lens, text_lens) in tqdm(iter(dl), total=total): font_pred, y_pred = model(images) y_pred = y_pred.argmax(-1) pred_texts = decode_ctc(y_pred) for true_words, pred_words in zip(texts, pred_texts): # go through batch if list(pred_words.shape) == []: # print('list(pred_words.shape) == []') errors += 1 continue true_words, pred_words = t2list(true_words), t2list(pred_words) true_str, pred_str = idx2text(true_words), idx2text(pred_words) p,r = calc_precision_recall(true_words, pred_words) c = wer(true_str, pred_str) w = wer(true_str.split(' '), pred_str.split(' ')) ps.append(p); rs.append(r); ws.append(w); cs.append(c) print('errors:', errors) return np.array(ps), np.array(rs), np.array(ws), np.array(cs) # p, r, w, c = evaluate_model(learner.model.train(), data.valid_dl) # - model = learner.model.eval() p, r, w, c = evaluate_model(model, learner.data.train_dl) c.mean(), w.mean(), p.mean(), r.mean() p, r, w, c = evaluate_model(model, learner.data.valid_dl) c.mean(), w.mean(), p.mean(), r.mean()
nbs/07_ocr_attention_training.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.0 # language: julia # name: julia-1.7 # --- # # Playground with sampled data from KiT-RT # # ### Setup using KitBase, Plots, JLD2, Distributions, LinearAlgebra, Flux, NPZ using Flux: @epochs # + function regime_data(w, sw, f, u, K, Kn, μ=ref_vhs_vis(Kn, 1.0, 0.5), ω=0.81) gam = heat_capacity_ratio(K, 1) prim = conserve_prim(w, gam) Mu, Mxi, _, _1 = gauss_moments(prim, K) a = pdf_slope(prim, sw, K) swt = -prim[1] .* moments_conserve_slope(a, Mu, Mxi, 1) A = pdf_slope(prim, swt, K) tau = vhs_collision_time(prim, μ, ω) fr = chapman_enskog(u, prim, a, A, tau) L = norm((f .- fr) ./ prim[1]) x = [w; sw; tau] y = ifelse(L <= 0.005, 0.0, 1.0) return x, y end function regime_number(Y, rg=0) idx = 0 for i in axes(Y, 2) if Y[1, i] == rg idx += 1 end end println("NS regime: $(idx) of $(size(Y, 2))") return nothing end function accuracy(nn, X, Z) Z1 = nn(X) ZA1 = [round(Z1[1, i]) for i in axes(Z1, 2)] ZA = [round(Z[1, i]) for i in axes(Z, 2)] accuracy = 0.0 for i in eachindex(ZA) if ZA[i] == ZA1[i] accuracy += 1.0 end end accuracy /= length(ZA) return accuracy end # - # ### Dataset file = open("../../../data/1d/a2_ev10.csv") data = [] for line in eachline(file) a = split(line, ",") b = [parse(Float64, a[i]) for i = 2:length(a)] push!(data, b) end pdfs = data[3:end]; # + file = open("../../../data/1d/a3_ev10.csv") for line in eachline(file) a = split(line, ",") b = [parse(Float64, a[i]) for i = 2:length(a)] push!(data, b) end pdfs = [pdfs; data[3:end]] #=file = open("../../../data/1d/a8_ev5.csv") for line in eachline(file) a = split(line, ",") b = [parse(Float64, a[i]) for i = 2:length(a)] push!(data, b) end pdfs = [pdfs; data[3:end]]=# nd = length(pdfs) ÷ 2 # - vs = VSpace1D(-5.0, 5.0, length(data[1]), data[1], data[1][2:end] .- data[1][1:end-1], data[2]) δ = heaviside.(vs.u); dist = Uniform(0.005, 0.1) dxs = rand(dist, nd) dist = Uniform(0.0001, 1.0) kns = rand(dist, nd); dist = Uniform(0.5, 1.0) rhos = rand(dist, nd); X = [1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] Y = [0.0] for i = 1:nd try #fL = pdfs[i]; fR = pdfs[nd*2-i] fL = pop!(pdfs) .* pop!(rhos); fR = pop!(pdfs) .* pop!(rhos) # shuffle wL = moments_conserve(fL, vs.u, vs.weights); wR = moments_conserve(fR, vs.u, vs.weights) #@show wL, wR f = @. fL * δ + fR * (1.0 - δ) w = moments_conserve(f, vs.u, vs.weights) #f = @. (fL + fR) / 2 #w = @. (wL + wR) / 2 sw = @. (wR - wL) / dxs[i] tmpx, tmpy = regime_data(w, sw, f, vs.u, 0, kns[i]) X = hcat(X, tmpx) Y = hcat(Y, tmpy) catch end end regime_number(Y) Y X npzwrite("data.npz", Dict("X" => X, "Y" => Y)) idx = Int(floor(rand() * size(X, 2))) plot(data[1], data[idx], ylabel="$(idx)-th pdf") # ### Model @load "../nn_scalar.jld2" nn accuracy(nn, X, Y) data = Flux.Data.DataLoader((X, Y), shuffle = true) ps = Flux.params(nn) sqnorm(x) = sum(abs2, x) #loss(x, y) = sum(abs2, nn(x) - y) / size(x, 2) #+ 1e-6 * sum(sqnorm, ps) loss(x, y) = Flux.binarycrossentropy(nn(x), y) cb = () -> println("loss: $(loss(X, Y))") opt = ADAM() @epochs 2 Flux.train!(loss, ps, data, opt, cb = Flux.throttle(cb, 1)) cd(@__DIR__) @save "nn_rif.jld2" nn # reinforcement neural model # ### Test accuracy(nn, X, Y) nn(X) X Y
src/1d/sampler/.ipynb_checkpoints/scalar-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from timml import * # ### first test of `HeadLineSinkContainer` # Still under development ml = ModelMaq() lsc = HeadLineSinkContainer(ml, xydict={0:[(0, 0), (10, 0), (10, 10)], 1:[(0, -5), (0, -10)]}, laydict={0: 0, 1: 0}, hls=10) rf = Constant(ml, 0, -100, 20) ml.solve() ml.contour([-20, 20, -20, 20], ngr=40)
notebooks/lscontainer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Results of the estimator study (Section 4.3.1) # + # ~~~ # This file is part of the paper: # # "A NON-CONFORMING DUAL APPROACH FOR ADAPTIVE TRUST-REGION REDUCED BASIS # APPROXIMATION OF PDE-CONSTRAINED OPTIMIZATION" # # https://github.com/TiKeil/NCD-corrected-TR-RB-approach-for-pde-opt # # Copyright 2019-2020 all developers. All rights reserved. # License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause) # ~~~ # - from matplotlib import pyplot as plt import matplotlib as mpl mpl.rcParams['figure.figsize'] = (12.0, 8.0) mpl.rcParams['font.size'] = 12 mpl.rcParams['savefig.dpi'] = 300 # + [markdown] heading_collapsed=true # ## Define data points from experiments # + [markdown] hidden=true # We gather all results from the notebooks # # EXC10-estimator_study_with_basis_size_16 # EXC10-estimator_study_with_basis_size_24 # EXC10-estimator_study_with_basis_size_32 # EXC10-estimator_study_with_basis_size_40 # EXC10-estimator_study_with_basis_size_48 # EXC10-estimator_study_with_basis_size_56 # + hidden=true data_points = [16, 24, 32, 40, 48, 56] ## functional max_J_error = [ 1.5602058, 0.0913711, 0.0766135, 0.0203013, 0.0069036, 0.0049428 ] max_J_estimator = [ 5.9913766, 0.1355419, 0.1245623, 0.0218672, 0.0070929, 0.0050998 ] max_NCD_J_error = [ 0.0960233, 0.0078391, 0.0060158, 0.0000622, 0.0000290, 0.0000253 ] max_NCD_J_estimator = [ 5.1036908, 0.1120268, 0.0782738, 0.0024091, 0.0001877, 0.0001684 ] ## gradient of the functional max_DJ_error = [ 60.3425755, 11.1470314, 6.4303011, 0.4347226, 0.3154153, 0.3015742 ] max_DJ_estimator = [ 24291.8313696, 5358.8100309, 4503.6642106, 756.5755578, 321.9380287, 297.4647676 ] max_NCD_DJ_error = [ 6.3335463, 0.7063809, 0.3138649, 0.0082424, 0.0067837, 0.0063186 ] max_NCD_DJ_estimator = [ 636.4101662, 15.3648749, 10.5050809, 0.4986274, 0.0712033, 0.0608080 ] max_approx_DJ_error = [ 1.2382556, 0.0897214, 0.0900121, 0.0006126, 0.0002131, 0.0002262 ] max_approx_DJ_estimator = [ 513.9431314, 12.7039995, 8.8771813, 0.2603803, 0.0277693, 0.0249135 ] ## u sensitivities max_d_u_error = [ 2.5355797, 0.9132762, 0.8533939, 0.1498923, 0.0852684, 0.0790111 ] max_d_u_estimator = [ 8.4667177, 1.3766618, 1.2687267, 0.3461900, 0.1496438, 0.1305220 ] max_d_u_approx_error = [ 1.3156686, 0.1176869, 0.1127435, 0.0245833, 0.0091088, 0.0087819 ] max_d_u_approx_estimator = [ 6.9827936, 0.8449806, 0.7053271, 0.1680877, 0.0418925, 0.0395744 ] ## p sensitivities max_d_p_error = [ 496.3981420, 479.5000612, 398.5507306, 41.8563170, 38.5226734, 37.0718470 ] max_d_p_estimator = [ 5114.7334754, 1286.4496500, 1032.0519773, 198.5964992, 119.5864251, 109.1262473 ] max_d_p_approx_error = [ 103.0201017, 89.1512782, 74.6967860, 6.1232911, 4.0908619, 3.0154769 ] max_d_p_approx_estimator = [ 4379.3465189, 930.7437716, 783.3274162, 112.6418339, 44.4683584, 42.1310719 ] color0 = (0.65,0,0.15) color1 = (0.84,0.19,0.15) color2 = (0.96,0.43,0.26) color4 = (1,0.88,0.56) color3 = (0.99,0.68,0.38) color6 = (0.27,0.46,0.71) color5 = (0.67,0.85,0.91) # - # ## Result for J (Figure 4A) # + plt.semilogy(data_points, max_J_error, 'o-', color=color4, label='error J') plt.semilogy(data_points, max_NCD_J_error, 'v-', color=color1, label='error NCD-J') plt.semilogy(data_points, max_J_estimator, 'o--', color=color4, label='estimate J') plt.semilogy(data_points, max_NCD_J_estimator, 'v--', color=color1, label='estimate NCD-J') plt.xlabel('greedy extension step') plt.grid() plt.legend() # - # ## Result for gradient of J (Figure 4B) # + plt.semilogy(data_points, max_DJ_error, 'o-', color=color0, label='error DJ') plt.semilogy(data_points, max_NCD_DJ_error, 'v-', color=color3, label='error NCD-DJ') plt.semilogy(data_points, max_approx_DJ_error, 'd-', color=color6, label='error approx.DJ') plt.semilogy(data_points, max_DJ_estimator, 'o--', color=color0, label='estimate DJ') plt.semilogy(data_points, max_NCD_DJ_estimator, 'v--', color=color3, label='estimate NCD-DJ') plt.semilogy(data_points, max_approx_DJ_estimator, 'd--', color=color6, label='estimate approx.DJ') plt.xlabel('greedy extension step') plt.grid() plt.legend() # - # ## Result for u sensitivities # + plt.semilogy(data_points, max_d_u_error, 'o-', color=color0, label='error u_dmu') plt.semilogy(data_points, max_d_u_approx_error, 'v-', color=color3, label='error u_dmu-approx') plt.semilogy(data_points, max_d_u_estimator, 'o--', color=color0, label='estimate u_dmu') plt.semilogy(data_points, max_d_u_approx_estimator, 'v--', color=color3, label='estimate u_dmu-approx') plt.xlabel('greedy extension step') plt.grid() plt.legend() # - # ## Result for p sensitivities # + plt.semilogy(data_points, max_d_p_error, 'o-', color=color0, label='error p_dmu') plt.semilogy(data_points, max_d_p_approx_error, 'v-', color=color3, label='error p_dmu-approx') plt.semilogy(data_points, max_d_p_estimator, 'o--', color=color0, label='estimate p_dmu') plt.semilogy(data_points, max_d_p_approx_estimator, 'v--', color=color3, label='estimate p_dmu-approx') plt.xlabel('greedy extension step') plt.grid() plt.legend()
notebooks/Paper1_simulations/Model_Problem_2_Estimator_study/Extended_results_of_the_estimator_study(Figure_4).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """ @author: bangfu.tao """ import numpy as np import pandas as pd import scipy.signal as signal import matplotlib.pyplot as plt import time from datetime import datetime # + # convert data from tab format to csv format f0 = open("data-input-tab.txt", "r") f1 = open("output.txt", "w") for x0 in f0: x1 = x0.replace('\t', ',') f1.write(x1) f0.close() f1.close() # - raw_df = pd.read_csv("data-input-csv.txt") #raw_df.head(10) # + def datetime2sec(dt): obj = datetime.strptime(dt, "%d/%m/%Y %H:%M:%S") return obj.timestamp() def date2sec(dt): obj = datetime.strptime(dt, "%d/%m/%Y") return obj.timestamp() #a = datetime2sec("25/09/2018 13:30:00") #b = date2sec("25/09/2018") #print((a-b)/3600) # - core_df = raw_df.loc[:, ['ticket #', 'checkout date', 'checkin date', 'Unit #', 'miles drive']] # + #core_df.head(10) # + bad_rows = [] for i in range(core_df.shape[0]): row = core_df.iloc[i, :] dt0 = datetime2sec(row[1].strip()) dt1 = datetime2sec(row[2].strip()) unit = row[3] miles = row[4] if not (dt0 > 0 and dt1 > dt0 and unit > 0 and miles >0): bad_rows.append(i) print(row) if (len(bad_rows) > 0): core_df.drop(bad_rows, inplace = True) # + core_df['out-date-only'] = [ x[0:10].strip() for x in core_df['checkout date'] ] core_df['out-date-only-mins'] = [ date2sec(x.strip())/60 for x in core_df['out-date-only'] ] core_df['in-date-only'] = [ x[0:10].strip() for x in core_df['checkin date'] ] core_df['in-date-only-mins'] = [ date2sec(x.strip())/60 for x in core_df['in-date-only'] ] # - core_df['out-datetime-mins'] = [ datetime2sec(x.strip())/60 for x in core_df['checkout date'] ] core_df['in-datetime-mins'] = [ datetime2sec(x.strip())/60 for x in core_df['checkin date'] ] core_df['rental-minutes'] = core_df["in-datetime-mins"] - core_df["out-datetime-mins"] # + #core_df.head(10) # - #car_sorted_df = core_df.sort_values(by = 'Unit #') car_sorted_df = core_df.sort_values(['Unit #', 'out-datetime-mins']) ## two level sorted !!! # + #car_sorted_df.head(10) # - """ fp = open("tmp.txt", "w") tmp = car_sorted_df.to_csv(index=False) fp.write(tmp) fp.close() """ # + free_mins = [] for i in range(car_sorted_df.shape[0]): curr = car_sorted_df.iloc[i, :] free_mins.append(-1.0) if (i > 0 and curr['Unit #'] == prev['Unit #']): free_mins[-2] = curr['out-datetime-mins'] - prev['in-datetime-mins'] prev = curr; car_sorted_df['free-minutes'] = free_mins # + #car_sorted_df.head(10) # - ret_date_sorted_df = core_df.sort_values(['in-date-only-mins', 'Unit #']) ## two level sorted !!! # + # Q2: per day N = ret_date_sorted_df.shape[0] prev = ret_date_sorted_df.iloc[0, :] groups = [] a = 0 for i in range(N): curr = ret_date_sorted_df.iloc[i, :] if (i == 0): a = 0 elif (i < N - 1): dt0 = prev['in-date-only'] dt1 = curr['in-date-only'] if (dt1 != dt0): groups.append((a, i)) a = i #else: #print(dt0, dt1) else: dt0 = prev['in-date-only'] dt1 = curr['in-date-only'] if (dt1 != dt0): groups.append((a, i)) groups.append((i, i+1)) else: groups.append((a, i+1)) prev = curr; print("=== groups ===") print(len(groups)) ret_dates = [] ret_cars_list = [] for m, n in groups: df = ret_date_sorted_df.iloc[m:n, :] for dt in df['in-date-only']: ret_dates.append(dt) break; tmp = [] for car in df['rental-minutes']: tmp.append(car) ret_cars_list.append(tmp) print("=== ret_dates ===") print(len(ret_dates)) print("=== ret_cars_list ===") print(len(ret_cars_list)) # - Q2_df = pd.DataFrame() Q2_df['return-date'] = ret_dates Q2_df['num-cars'] = [ len(cars) for cars in ret_cars_list ] Q2_df['total-minutes'] = [ np.array(cars).sum() for cars in ret_cars_list ] Q2_df['individual-minutes'] = [ str(cars) for cars in ret_cars_list ] """ fp = open("Q2-df.txt", "w") fp.write("return-date\tnum-cars\ttotal-minutes\tindividual-minutes\n") N = Q2_df.shape[0] for i in range(N): ds = Q2_df.iloc[i, :] text = f"{ds[0]}\t{ds[1]}\t{ds[2]}\t{ds[3]}\n" fp.write(text) fp.close() """ """ Q1_df = car_sorted_df fp = open("Q1-df.txt", "w") fp.write("car-unit-id\tout-datetime\tin-datetime\tdrive-miles\trental-minutes\tfree-in-branch-minutes\n") N = Q1_df.shape[0] for i in range(N): ds = Q1_df.iloc[i, :] text = f"{ds['Unit #']}\t{ds['checkout date']}\t{ds['checkin date']}\t{ds['miles drive']}\t{ds['rental-minutes']}\t{ds['free-minutes']}\n" fp.write(text) fp.close() """ fig = plt.figure() plt.plot(Q2_df['num-cars']) plt.grid() plt.xlabel("day index") plt.ylabel("num cars returned") fig.savefig('Q2-test1.png') fig = plt.figure() plt.plot(Q2_df['total-minutes']/60) plt.grid() plt.xlabel("day index") plt.ylabel("total hours") fig.savefig('Q2-test2.png') import matplotlib.pyplot as plt import matplotlib.dates as mdates # + """ fig = plt.figure() plt.plot(Q2_df['num-cars']) plt.grid() plt.xlabel("day index") plt.ylabel("num cars returned") fig.savefig('Q2-test1.png') """ fig = plt.figure() short_dates = list(Q2_df['return-date']) for i in range(len(short_dates)): short_dates[i] = '' if (i % 30) == 0: short_dates[i] = (Q2_df['return-date'][i]).strip() x=list(range(1, len(Q2_df['return-date'])+1)) plt.xticks(x, short_dates) plt.plot(x, Q2_df['num-cars']) plt.ylabel("num cars returned") plt.grid() plt.title("cars returned per day") _=plt.xticks(rotation=90) fig.savefig('Q2-test1.png') ## fig = plt.figure() x=list(range(1, len(Q2_df['return-date'])+1)) plt.xticks(x, short_dates) plt.plot(x, Q2_df['total-minutes']/60) # to hour plt.ylabel("total hours") plt.grid() plt.title("total hours per day") _=plt.xticks(rotation=90) fig.savefig('Q2-test2.png') # + #car_sorted_df.head(10) N = car_sorted_df.shape[0] prev = car_sorted_df.iloc[0, :] groups = [] a = 0 for i in range(N): curr = car_sorted_df.iloc[i, :] if (i == 0): a = 0 elif (i < N - 1): dt0 = prev['Unit #'] dt1 = curr['Unit #'] if (dt1 != dt0): groups.append((a, i)) a = i #else: #print(dt0, dt1) else: dt0 = prev['Unit #'] dt1 = curr['Unit #'] if (dt1 != dt0): groups.append((a, i)) groups.append((i, i+1)) else: groups.append((a, i+1)) prev = curr; print("=== car groups ===") print(len(groups)) list_cars = [] list_rental_minutes = [] list_miles = [] for m, n in groups: df = car_sorted_df.iloc[m:n, :] for dt in df['Unit #']: list_cars.append(dt) break; tmp = [] for car in df['rental-minutes']: tmp.append(car) list_rental_minutes.append(np.array(tmp).sum()) tmp = [] for car in df['miles drive']: tmp.append(car) list_miles.append(np.array(tmp).sum()) print("=== list cars ===") print(len(list_cars)) print("=== list rental minutes ===") print(len(list_rental_minutes)) print("=== list miles ===") print(len(list_miles)) # - Q1_df = pd.DataFrame() Q1_df['car-unit'] = list_cars Q1_df['total-minutes'] = list_rental_minutes Q1_df['total-miles'] = list_miles #Q1_df.head(10) # + #xstr = list(Q1_df['car-unit']) #for i in range(len(xstr)): # xstr[i] = '' # if (i % 30) == 0: # xstr[i] = str(Q1_df['car-unit'][i]).strip() fig = plt.figure() #plt.plot(Q1_df['total-minutes']/60, 'o') # to hour plt.plot(Q1_df['total-minutes']/60) # to hour plt.ylabel("total hours") plt.grid() plt.title("total hours per car") #_=plt.xticks(rotation=90) fig.savefig('Q1-test1.png') fig = plt.figure() plt.plot(Q1_df['total-miles']) plt.ylabel("total miles") plt.grid() plt.title("total miles per car") #_=plt.xticks(rotation=90) fig.savefig('Q1-test2.png') # - (Q1_df['total-minutes']/60/24).describe() # convert to total days per car Q1_df['total-miles'].describe() # total miles per car Q2_df['num-cars'].describe() # returned cars per day (Q2_df['total-minutes']/60/24).describe() # returned time (in length of days) per day
python/car-rental.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- words = [] for word in corpus_raw.split(): if word != '.' and word != '.' and word != '.' and word != '.': words.append(word) words = set(words) # so that all duplicate words are removed word2int = {} int2word = {} vocab_size = len(words) # gives the total number of unique words for i,word in enumerate(words): word2int[word] = i int2word[i] = word import numpy as np import tensorflow as tf corpus_raw = 'He is the king . The king is royal . She is the royal queen ' # convert to lower case corpus_raw = corpus_raw.lower() words = [] for word in corpus_raw.split(): if word != '.': # because we don't want to treat . as a word words.append(word) words = set(words) # so that all duplicate words are removed word2int = {} int2word = {} vocab_size = len(words) # gives the total number of unique words for i,word in enumerate(words): word2int[word] = i int2word[i] = word
jupyter/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import re import tarfile import requests from pugnlp.futil import path_status, find_files # + # From the nlpia package for downloading data too big for the repo BIG_URLS = { 'w2v': ( 'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1', 1647046227, ), 'slang': ( 'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1', 117633024, ), 'tweets': ( 'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1', 311725313, ), 'lsa_tweets': ( 'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1', 3112841563, # 3112841312, ), 'imdb': ( 'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1', 3112841563, # 3112841312, ), } # + # These functions are part of the nlpia package which can be pip installed and run from there. def dropbox_basename(url): filename = os.path.basename(url) match = re.findall(r'\?dl=[0-9]$', filename) if match: return filename[:-len(match[0])] return filename def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True): """Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https""" if filename is None: filename = dropbox_basename(url) file_path = os.path.join(data_path, filename) if url.endswith('?dl=0'): url = url[:-1] + '1' # noninteractive download if verbose: tqdm_prog = tqdm print('requesting URL: {}'.format(url)) else: tqdm_prog = no_tqdm r = requests.get(url, stream=True, allow_redirects=True) size = r.headers.get('Content-Length', None) if size is None else size print('remote size: {}'.format(size)) stat = path_status(file_path) print('local size: {}'.format(stat.get('size', None))) if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file r.close() return file_path print('Downloading to {}'.format(file_path)) with open(file_path, 'wb') as f: for chunk in r.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive chunks f.write(chunk) r.close() return file_path def untar(fname): if fname.endswith("tar.gz"): with tarfile.open(fname) as tf: tf.extractall() else: print("Not a tar.gz file: {}".format(fname)) # + # UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset # download_file(BIG_URLS['w2v'][0]) # untar(download_file(BIG_URLS['imdb'][0])) # + import glob import os from random import shuffle def pre_process_data(filepath): """ This is dependent on your training data source but we will try to generalize it as best as possible. """ positive_path = os.path.join(filepath, 'pos') negative_path = os.path.join(filepath, 'neg') pos_label = 1 neg_label = 0 dataset = [] for filename in glob.glob(os.path.join(positive_path, '*.txt')): with open(filename, 'r') as f: dataset.append((pos_label, f.read())) for filename in glob.glob(os.path.join(negative_path, '*.txt')): with open(filename, 'r') as f: dataset.append((neg_label, f.read())) shuffle(dataset) return dataset # + from nltk.tokenize import TreebankWordTokenizer from gensim.models.keyedvectors import KeyedVectors word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000) def tokenize_and_vectorize(dataset): tokenizer = TreebankWordTokenizer() vectorized_data = [] expected = [] for sample in dataset: tokens = tokenizer.tokenize(sample[1]) sample_vecs = [] for token in tokens: try: sample_vecs.append(word_vectors[token]) except KeyError: pass # No matching token in the Google w2v vocab vectorized_data.append(sample_vecs) return vectorized_data # - word_vectors["dog"] def collect_expected(dataset): """ Peel of the target values from the dataset """ expected = [] for sample in dataset: expected.append(sample[0]) return expected dataset = pre_process_data('./aclImdb_v1/train') vectorized_data = tokenize_and_vectorize(dataset) expected = collect_expected(dataset) # + split_point = int(len(vectorized_data)*.8) x_train = vectorized_data[:split_point] y_train = expected[:split_point] x_test = vectorized_data[split_point:] y_test = expected[split_point:] # + maxlen = 400 batch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet epochs = 2 # - def pad_trunc(data, maxlen): """ For a given dataset pad with zero vectors or truncate to maxlen """ new_data = [] # Create a vector of 0's the length of our word vectors zero_vector = [] for _ in range(len(data[0][0])): zero_vector.append(0.0) for sample in data: if len(sample) > maxlen: temp = sample[:maxlen] elif len(sample) < maxlen: temp = sample additional_elems = maxlen - len(sample) for _ in range(additional_elems): temp.append(zero_vector) else: temp = sample new_data.append(temp) return new_data # + import numpy as np x_train = pad_trunc(x_train, maxlen) x_test = pad_trunc(x_test, maxlen) x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims)) y_train = np.array(y_train) x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims)) y_test = np.array(y_test) # + from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, SimpleRNN num_neurons = 50 print('Build model...') model = Sequential() model.add(SimpleRNN(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims))) model.add(Dropout(.2)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy']) print(model.summary()) # + model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test)) model_structure = model.to_json() with open("simplernn_model1.json", "w") as json_file: json_file.write(model_structure) model.save_weights("simplernn_weights1.h5") print('Model saved.') # + from keras.models import model_from_json with open("simplernn_model1.json", "r") as json_file: json_string = json_file.read() model = model_from_json(json_string) model.load_weights('simplernn_weights1.h5') # - sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend." # + # We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever. vec_list = tokenize_and_vectorize([(1, sample_1)]) # Tokenize returns a list of the data (length 1 here) test_vec_list = pad_trunc(vec_list, maxlen) test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims)) # - model.predict_classes(test_vec) # + from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, SimpleRNN num_neurons = 100 print('Build model...') model = Sequential() model.add(SimpleRNN(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims))) model.add(Dropout(.2)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy']) print(model.summary()) # + model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test)) model_structure = model.to_json() with open("simplernn_model2.json", "w") as json_file: json_file.write(model_structure) model.save_weights("simplernn_weights2.h5") print('Model saved.')
src/nlpia/book/examples/ch08.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Tag 1. Kapitel 6. R Programmierung # # ## Lab 5. R Funktionen Übung - Lösung # # Lasst uns in dieser Übung euer Wissen über die Funktionen in R überprüfen! Dafür gibt es einige Aufgaben, die sich mit der Erstellung von Funktionen befassen. Diese Funktionen sollen bestimmte Parameter aufnehmen und bestimmte Ergebnisse erzeugen. Folgt dazu einfach den Anweisungen und orientiert euch an den zwei einleitenden Beispielen. # **Beispiel 1: Erstelle eine Funktion, die einen Namen als Parameter aufnimmt und als print-Ausgabe "Hallo *Name*" erzeugt** hallo_du <- function(name){ print(paste('Hallo',name)) } hallo_du('Sam') # ** Beispiel 2: Erstelle eine Funktion, die einen Namen als Parameter aufnimmt und als return-Ausgabe "Hallo *Name*" erzeugt** hallo_du2 <- function(name){ return(paste('Hallo',name)) } print(hallo_du2('Sam')) # ** Erstelle eine Funktion, die das Produkt von zwei ganzzahligen Werten zurückgibt (-> return).** prod <- function(num1,num2){ return(num1*num2) } prod(7,7) # **Erstelle eine Funktion, die zwei Parameter aufnimmt. Eine Interger (dt. Ganzzahl) und einen Vektor von ganzzahligen Werten. Sie soll TRUE zurückgeben, sofern die Integer im Vektor beinhaltet ist. Andenfalls soll das Ergebnis FLASE lauten. Achte dabei gut auf die Position der FALSE Rückgabe im Code.** num_seek <- function(num,v){ for (item in v){ if (item == num){ # Erfolg! Abbrechen und Wert zurückliefern! return(TRUE) } } # Misserfolg return(FALSE) } num_seek(2,c(1,2,3)) num_seek(2,c(1,4,5)) # **Erstelle eine Funktion, die zwei Parameter aufnimmt. Eine Interger (dt. Ganzzahl) und einen Vektor von ganzzahligen Werten. Die Funktion soll die Anzahl der Erscheinungen der Integer im Vektor ausgeben.** num_anzahl <- function(num,v){ anzahl = 0 for (x in v){ if (x == num){ anzahl <- anzahl + 1 } } return(anzahl) } num_anzahl(2,c(1,1,2,2,3,3)) num_anzahl(1,c(1,1,2,2,3,1,4,5,5,2,2,1,3)) # **Wir wollen nun Blöcke aus Aluminium verschiffen. Dazu erstellen wir eine Funktion, die eine Interger akzeptiert, die die angeforderte Menge (in kg) repräsentiert. Um so eine Bestellung zu erfüllen können wir aus 5kg und 1kg Blöcken auswählen. DIe Funktion soll nun die kleinst mögliche Menge an Blöcken ausgeben.** # # **Zum Beispiel benötigen wir für eine Bestellung von 6kg mindestens zwei Blöcke: 1 x 5kg und 1 x 1kg. Eine Ladung von 17kg benötigt mindestens 5 Blöcke (3 x 5kg und 2 x 1kg).** bar_anzahl <- function(ladung){ anzahl_von_einsen = ladung %% 5 anzahl_von_fuenfen = (ladung - anzahl_von_einsen)/5 return(anzahl_von_einsen+anzahl_von_fuenfen) } bar_anzahl(6) bar_anzahl(17) # ** Erstelle eine Funktion, die 3 Integers als Eingabe akzeptiert und deren Summe zurückgibt. Allerdings zählen alle die Werte nicht zur Summe dazu, die gerade durch 3 teilbar sind (3,6,9,...). Falls alle Werte gerade durch 3 teilbar sind soll 0 zurückgegeben werden. Hinweis: append().** summe <- function(a, b, c){ out <- c(0) if (a %% 3 != 0){ out <- append(a,out) } if (b %% 3 != 0){ out <- append(b,out) } if (c %% 3 != 0){ out <- append(c,out) } return(sum(out)) } summe(7,2,3) summe(3,6,9) summe(9,11,12) # **Erstelle eine Funktion, die TRUE ergibt, falls die eingegebene Interger eine Primzahl ist. Andernfalls soll das Ergebnis FALSE lauten.** # Diese Aufgabe lässt sich auf viele verschiedene Arten lösen. Auf die Randlösungen wie z.B. negative Zahlen müsst ihr nicht unbedingt achten, könnt aber sehr gerne Abfragen für diese einbauen! prime_check <- function(num) { if (num == 2) { return(TRUE) } else if (any(num %% 2:(num-1) == 0)) { return(FALSE) } else { return(TRUE) } } # + # Alternative: prime_check <- function(num){ # Wir könnten zusätzlich auf negative Werte kontrollieren if (num == 2) { return(TRUE) } for (x in 2:(num-1)){ if ((num%%x) == 0){ return(FALSE) } } return(TRUE) } # - prime_check(2) prime_check(5) prime_check(4) prime_check(237) prime_check(131) # Herzlichen Glückwunsch! Sie sind mit Lab 5. fertig!
1.6 R Programming/de-DE/1.6.34 R - Functions. Lab5. Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + import tensorflow.contrib.learn.python.learn as learn import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from sklearn import datasets, metrics from sklearn.decomposition import PCA import tensorflow.contrib.slim as slim import numpy as np import collections import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import sklearn import seaborn as sns import six.moves.cPickle as pickle import sys from pandas import * from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression from sklearn import svm # %matplotlib inline # - def accuracy_fn(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) train_set= mnist.train.images.reshape(-1,28,28, 1) test_set = mnist.test.images.reshape(-1,28,28, 1) def variable_summaries(name, var): """Attach a lot of summaries to a Tensor.""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.scalar_summary('mean/' + name, mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) tf.scalar_summary('sttdev/' + name, stddev) tf.scalar_summary('max/' + name, tf.reduce_max(var)) tf.scalar_summary('min/' + name, tf.reduce_min(var)) tf.histogram_summary(name, var) def train_model(train_dataset, train_labels, test_dataset, test_labels, train_tensor, accuracy, tf_batch_data, tf_batch_labels, log_dir='./logs', num_steps=20000, batch_size=10, test_steps=1000, log_steps=100, predictor=None, last_test='np'): with tf.Session() as session: summaries = tf.merge_all_summaries() if tf.gfile.Exists(log_dir): tf.gfile.DeleteRecursively(log_dir) train_writer = tf.train.SummaryWriter(log_dir + '/train', session.graph) test_writer = tf.train.SummaryWriter(log_dir + '/test') session.run(tf.initialize_all_variables()) shuffle_train = np.random.permutation(train_dataset.shape[0]) train_dataset = train_dataset[shuffle_train] train_labels = train_labels[shuffle_train] for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = ((step * batch_size) % (train_labels.shape[0] - batch_size)) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size)] batch_labels = train_labels[offset:(offset + batch_size)] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = { tf_batch_data : batch_data, tf_batch_labels : batch_labels, keep_prob: 0.5 } if step % test_steps == 0: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() _, acc, summary = session.run([train_tensor, accuracy, summaries], feed_dict=feed_dict, run_metadata=run_metadata, options=run_options) print("Train accuracy at step %s: %.1f%%" % (step, acc)) train_writer.add_run_metadata(run_metadata, "step%d" % step) train_writer.add_summary(summary, step) elif step % log_steps == 0: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() _, summary = session.run([train_tensor, summaries], feed_dict=feed_dict, run_metadata=run_metadata, options=run_options) train_writer.add_run_metadata(run_metadata, "step%d" % step) train_writer.add_summary(summary, step) else: session.run(train_tensor, feed_dict=feed_dict, options=run_options) feed_dict = { tf_batch_data : test_dataset, tf_batch_labels : test_labels, keep_prob: 1 } if last_test == 'splitted': predictions = np.empty([0,10]) for batch in np.array_split(test_dataset, test_dataset.shape[0] / 16): tmp = session.run(predictor, feed_dict={ tf_batch_data: batch, # batch_labels: np.array([]), keep_prob: 1.0 }) predictions = np.vstack((predictions, tmp)) acc = accuracy_fn(predictions, test_labels) elif accuracy is not None: acc = session.run(accuracy, feed_dict=feed_dict) print("Test accuracy: %.3f%%" % acc) # # MNIST # + def convnet(inputs, keep_prob): with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer=slim.l2_regularizer(0.0005)): net = slim.conv2d(inputs, 32, [5, 5], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.conv2d(net, 64, [5, 5], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') # net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') # net = slim.max_pool2d(net, [2, 2], scope='pool2') # net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') # net = slim.max_pool2d(net, [2, 2], scope='pool3') # net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') # net = slim.max_pool2d(net, [2, 2], scope='pool4') # net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') # net = slim.max_pool2d(net, [2, 2], scope='pool5') net = slim.flatten(net) net = slim.fully_connected(net, 1024, scope='fc6') net = slim.dropout(net, keep_prob, scope='dropout6') # net = slim.fully_connected(net, 4096, scope='fc7') # net = slim.dropout(net, 0.5, scope='dropout7') net = slim.fully_connected(net, 10, activation_fn=None, scope='fc8') predictor = slim.softmax(net) return net, predictor image_size = 28 num_labels = 10 num_channels = 1 g = tf.Graph() with g.as_default(): batch_data = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels)) batch_labels = tf.placeholder(tf.float32, shape=(None, num_labels)) keep_prob = tf.placeholder(tf.float32) last_layer, predictor = convnet(batch_data, keep_prob) print(last_layer) print(batch_labels) slim.losses.softmax_cross_entropy(last_layer, batch_labels) total_loss = slim.losses.get_total_loss() tf.scalar_summary('losses/total_loss', total_loss) tf.scalar_summary('accuracy', accuracy) optimizer = tf.train.AdamOptimizer() train_tensor = slim.learning.create_train_op(total_loss, optimizer) correct_prediction = tf.equal(tf.argmax(predictor,1), tf.argmax(batch_labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train_model(train_dataset=train_set, train_labels=mnist.train.labels, test_dataset=test_set, test_labels=mnist.test.labels, train_tensor=train_tensor, accuracy=accuracy, last_test='splitted', predictor=predictor, tf_batch_data=batch_data, log_dir='mnist_conv_max_conv_max_flatten_fc_d_sm_autoADAM', tf_batch_labels=batch_labels, batch_size=16, num_steps=20000, test_steps=100) # - # * [conv(5,32)-max(2,2)]*1 - flatten - 10, adams, dropout, 20k steps, l2=5e-3: 2.7% # * [conv(5,32)-max(2,2)]*1 - flatten - fully_1024 - 10, adams, dropout, 20k steps, l2=5e-3: 1.8% # # CIFAR - 10 # + def unpickle(file): fo = open(file, 'rb') dict = pickle.load(fo, encoding='latin-1') fo.close() return dict def from_flat_to_3d(image): # print(image.shape) return np.dstack((image[0:1024].reshape(32,32), image[1024:2048].reshape(32,32), image[2048:3072].reshape(32,32))) cifar_test = unpickle('cifar-10-batches-py/test_batch') cifar_test['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar_test['data']]) cifar = unpickle('cifar-10-batches-py/data_batch_1') for i in range(2, 6): tmp = unpickle('cifar-10-batches-py/data_batch_' + str(i)) cifar['data'] = np.vstack((cifar['data'], tmp['data'])) cifar['labels'] = np.concatenate((cifar['labels'], tmp['labels'])) cifar['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar['data']]) # cifar['data_bw'] = (cifar['data'][:,0:1024] + cifar['data'][:,1024:2048] + cifar['data'][:, 2048:3072]) / 3 # cifar_test['data_bw'] = (cifar_test['data'][:,0:1024] + cifar_test['data'][:,1024:2048] + cifar_test['data'][:, 2048:3072]) / 3 enc = OneHotEncoder() cifar['labels_oh'] = enc.fit_transform(cifar['labels'].reshape(-1, 1)) cifar['labels_oh'] = cifar['labels_oh'].toarray() cifar_test['labels'] = np.array(cifar_test['labels']) cifar_test['labels_oh'] = enc.fit_transform(cifar_test['labels'].reshape(-1, 1)) cifar_test['labels_oh'] = cifar_test['labels_oh'].toarray() # pca = PCA(whiten=True) # cifar['data_bw_whitened'] = pca.fit_transform(cifar['data_bw']) # cifar_test['data_bw_whitened'] = pca.fit_transform(cifar_test['data_bw']) # - cifar['data_3d'].shape # + def convnet(inputs, keep_prob, is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer=slim.l2_regularizer(0.0005)): net = slim.conv2d(inputs, 32, [5, 5], scope='conv1') variable_summaries('conv1', net) net = slim.max_pool2d(net, [2, 2], scope='pool1') net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') net = slim.conv2d(net, 64, [5, 5], scope='conv2') variable_summaries('conv2', net) net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') net = slim.max_pool2d(net, [2, 2], scope='pool2') # net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') # net = slim.max_pool2d(net, [2, 2], scope='pool2') # net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') # net = slim.max_pool2d(net, [2, 2], scope='pool3') # net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') # net = slim.max_pool2d(net, [2, 2], scope='pool4') # net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') # net = slim.max_pool2d(net, [2, 2], scope='pool5') net = slim.flatten(net) net = slim.fully_connected(net, 1024, scope='fc6') variable_summaries('fc1', net) net = slim.dropout(net, keep_prob, scope='dropout6') net = slim.fully_connected(net, 1024, scope='fc7') variable_summaries('fc2', net) net = slim.dropout(net, keep_prob, scope='dropout7') net = slim.fully_connected(net, 10, activation_fn=None, scope='fc8') predictor = slim.softmax(net) return net, predictor image_size = 32 num_labels = 10 num_channels = 3 g = tf.Graph() with g.as_default(): batch_data = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels), name='batch_data') batch_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name='batch_labels') keep_prob = tf.placeholder(tf.float32) is_training = tf.placeholder(tf.bool) last_layer, predictor = convnet(batch_data, keep_prob, is_training) print(last_layer) print(batch_labels) slim.losses.softmax_cross_entropy(last_layer, batch_labels) total_loss = slim.losses.get_total_loss() tf.scalar_summary('losses/total_loss', total_loss) optimizer = tf.train.AdamOptimizer(1e-4) train_tensor = slim.learning.create_train_op(total_loss, optimizer) correct_prediction = tf.equal(tf.argmax(predictor,1), tf.argmax(batch_labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.scalar_summary('accuracy', accuracy) train_model(train_dataset=cifar['data_3d'], train_labels=cifar['labels_oh'], test_dataset=cifar_test['data_3d'], test_labels=cifar_test['labels_oh'], train_tensor=train_tensor, accuracy=accuracy, last_test='splitted', predictor=predictor, tf_batch_data=batch_data, tf_batch_labels=batch_labels, log_dir='cifar_conv_max_lrn_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAM', batch_size=32, num_steps=30000, test_steps=1000) # - # * cifar_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAMTest accuracy: 66.770% # * cifar_conv_max_flatten_fc_d_fc_d_sm_autoADAMTest accuracy: 66.480% # * cifar_conv_max_conv_max_flatten_fc_d_fc_d_sm_autoADAM accuracy: 73.090% # * cifar_conv_max_lrn_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAM: 74.040% print(1 - 66.77) print(1 - 66.48) print(1 - 73.09) predictions = np.empty([0,10]) for batch in np.array_split(cifar_test['data_3d'], cifar_test['data_3d'].shape[0] / 16): tmp = session.run(predictor, feed_dict={ tf_batch_data: batch, # batch_labels: np.array([]), keep_prob: 1.0 }) predictions = np.vstack((predictions, tmp)) acc = accuracy_fn(predictions, cifar_test['labels']) # * [conv(5,32)-max(2,2)]*2 - flatten - 10, adams, dropout, 20k steps, l2=5e-3: 52.4% # * [conv(5,32)-max(2,2)]*2 - flatten - fc(1024) - 10, adams, dropout, 20k steps, l2=5e-3: 66.4% for batch in np.array_split(cifar_test['data_3d'], cifar_test['data_3d'].shape[0] / 16): predictions = session.run(predictions, feed_dict={ batch_data: batch, batch_labels: np.array([]), keep_prob: 1.0 }) a = np.array
.ipynb_checkpoints/convnet_experiments-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <p style="border: 1px solid #e7692c; border-left: 15px solid #e7692c; padding: 10px; text-align:justify;"> # <strong style="color: #e7692c">Tip.</strong> <a style="color: #000000;" href="https://nbviewer.jupyter.org/github/PacktPublishing/Hands-On-Computer-Vision-with-TensorFlow-2/blob/master/Chapter06/ch6_nb2_denoise_with_autoencoders.ipynb" title="View with Jupyter Online">Click here to view this notebook on <code>nbviewer.jupyter.org</code></a>. # <br/>These notebooks are better read there, as Github default viewer ignores some of the formatting and interactive content. # </p> # <table style="font-size: 1em; padding: 0; margin: 0;"> # <tr style="vertical-align: top; padding: 0; margin: 0;background-color: #ffffff"> # <td style="vertical-align: top; padding: 0; margin: 0; padding-right: 15px;"> # <p style="background: #363636; color:#ffffff; text-align:justify; padding: 10px 25px;"> # <strong style="font-size: 1.0em;"><span style="font-size: 1.2em;"><span style="color: #e7692c;">Hands-on</span> Computer Vision with TensorFlow 2</span><br/>by <em><NAME></em> & <em><NAME></em> (Packt Pub.)</strong><br/><br/> # <strong>> Chapter 6: Enhancing and Segmenting Images </strong><br/> # </p> # # <h1 style="width: 100%; text-align: left; padding: 0px 25px;"><small style="color: #e7692c;"> # Notebook 2:</small><br/>Denoising with Auto-Encoders</h1> # <br/> # <p style="border-left: 15px solid #363636; text-align:justify; padding: 0 10px;"> # Reusing the simple fully-connected auto-encoder implemented in the previous notebook, we will now demonstrate how such network can be used to <strong>recover corrupted/noisy images</strong>. # </p> # <br/> # <p style="border-left: 15px solid #e7692c; padding: 0 10px; text-align:justify;"> # <strong style="color: #e7692c;">Tip.</strong> The notebooks shared on this git repository illustrate some notions from the book "<em><strong>Hands-on Computer Vision with TensorFlow 2</strong></em>" written by <NAME> and <NAME>, published by Packt. If you enjoyed the insights shared here, <a href="https://www.amazon.com/Hands-Computer-Vision-TensorFlow-processing/dp/1788830644" title="Learn more about the book!"><strong>please consider acquiring the book!</strong></a> # <br/><br/> # The book provides further guidance for those eager to learn about computer vision and to harness the power of TensorFlow 2 and Keras to build efficient recognition systems for object detection, segmentation, video processing, smartphone applications, and more.</p> # </td> # <td style="vertical-align: top; padding: 0; margin: 0; width: 280px;"> # <a href="https://www.amazon.com/Hands-Computer-Vision-TensorFlow-processing/dp/1788830644" title="Learn more about the book!" target="_blank"> # <img src="../banner_images/book_cover.png" width=280> # </a> # <p style="background: #e7692c; color:#ffffff; padding: 10px; text-align:justify;"><strong>Leverage deep learning to create powerful image processing apps with TensorFlow 2 and Keras. <br/></strong>Get the book for more insights!</p> # <ul style="height: 32px; white-space: nowrap; text-align: center; margin: 0px; padding: 0px; padding-top: 10px;"> # <li style="display: block;height: 100%;float: left;vertical-align: middle;margin: 0 25px 10px;padding: 0px;"> # <a href="https://www.amazon.com/Hands-Computer-Vision-TensorFlow-processing/dp/1788830644" title="Get the book on Amazon (paperback or Kindle version)!" target="_blank"> # <img style="vertical-align: middle; max-width: 72px; max-height: 32px;" src="../banner_images/logo_amazon.png" width="75px"> # </a> # </li> # <li style="display: inline-block;height: 100%;vertical-align: middle;float: right;margin: -5px 25px 10px;padding: 0px;"> # <a href="https://www.packtpub.com/application-development/hands-computer-vision-tensorflow-2" title="Get your Packt book (paperback, PDF, ePUB, or MOBI version)!" target="_blank"> # <img style="vertical-align: middle; max-width: 72px; max-height: 32px;" src="../banner_images/logo_packt.png" width="75px"> # </a> # </li> # </ul> # </td> # </tr> # </table> # + import tensorflow as tf import os import math import numpy as np from matplotlib import pyplot as plt # Choosing which GPU this notebook can access # (useful when running multiple experiments in parallel, on different GPUs): os.environ["CUDA_VISIBLE_DEVICES"]= "0" # Some hyper-parameters: batch_size = 32 # Images per batch (reduce/increase according to the machine's capability) num_epochs = 50 # Max number of training epochs random_seed = 42 # Seed for some random operations, for reproducibility # - # ## Preparing the Dataset # Once again, we will first demonstrate on the [MNIST](http://yann.lecun.com/exdb/mnist) dataset[$^1$](#ref). Though we could use again the efficient `tensorflow-datasets` module to instantiate and prepare the data (c.f. previous notebook), we will stick this time to using the MNIST dataset as `numpy` array provided by Keras, simply to illustrate some of the Keras utilities to handle input data. # + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() img_height, img_width = x_train.shape[1:] img_channels = 1 input_shape = (img_height, img_width, img_channels) # For the following experiments, we will however not use the class labels: del y_train del y_test # Normalizing the image data: x_train, x_test = x_train / 255.0, x_test / 255.0 # Even though we will use again a basic fully-coonected network, we need to preserve the image format # of the sample this time, to use the Keras image pre-processing tool to add noise. Therefore, # the augmented images will be flattened by the network itself via a initial Flatten() layer. x_train = x_train.reshape((-1, img_height, img_width, img_channels)) x_test = x_test.reshape((-1, img_height, img_width, img_channels)) print("Shape of training set: {}".format(x_train.shape)) print("Shape of testing set: {}".format(x_test.shape)) # Setting some variables: batch_size = 64 train_steps_per_epoch = len(x_train) // batch_size val_steps_per_epoch = len(x_test) // batch_size # - # ## Building an Auto-Encoder for Image Denoising # As mentioned in the book, denoising auto-encoders are normal auto-encoders, but **fed with corrupted samples** while still trying to recover the original images. Therefore, we need to instantiate our network, and build a method to corrupt our images. # ### Simple auto-encoder # For this experiment, we will reuse the exact same architecture as in the previous notebook: code_size = 32 # + from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Input, Flatten, Reshape inputs = Input(shape=input_shape, name='input') # As previously mentioned, the image flattening is done here: inputs_flat = Flatten()(inputs) # Encoding layers: enc_1 = Dense(128, activation='relu', name='enc_dense1')(inputs_flat) enc_2 = Dense(64, activation='relu', name='enc_dense2')(enc_1) code = Dense(code_size, activation='relu', name='enc_dense3')(enc_2) # Decoding layers: dec_1 = Dense(64, activation='relu', name='dec_dense1')(code) dec_2 = Dense(128, activation='relu', name='dec_dense2')(dec_1) decoded = Dense(np.prod(input_shape), activation='sigmoid', name='dec_dense3')(dec_2) # note: we use a sigmoid for the last activation, as we want the output values # to be between 0 and 1, like the input ones. # Finally, we reshape the decoded data so it has the same shape as the input samples: decoded_reshape = Reshape(input_shape)(decoded) # Auto-encoder model: autoencoder = Model(inputs, decoded_reshape) autoencoder.summary() # - # ### Generator of noisy images # We now set up our input pipeline, adding a pre-processing step to corrupt our images. We will use this simple function for that: def add_noise(img, min_noise_factor=.3, max_noise_factor=.6): """ Add some random noise to an image, from a uniform distribution. :param img: Image to corrupt :param min_noise_factor: Min. value for the noise random average amplitude :param max_noise_factor: Max. value for the noise random average amplitude :return: Corrupted image """ # Generating and applying noise to image: noise_factor = np.random.uniform(min_noise_factor, max_noise_factor) noise = np.random.normal(loc=0.0, scale=noise_factor, size=img.shape) img_noisy = img + noise # Making sure the image value are still in the proper range: img_noisy = np.clip(img_noisy, 0., 1.) return img_noisy # Let us have a look how this affects our images. To visualize some results, we first implement the following method to draw grids of results. This will come handy later. # + import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec def plot_image_grid(images, titles=None, figure=None, grayscale=False, transpose=False): """ Plot a grid of n x m images. :param images: Images in a n x m array :param titles: (opt.) List of m titles for each image column :param figure: (opt.) Pyplot figure (if None, will be created) :param grayscale: (opt.) Flag to draw the images in grayscale :param transpose: (opt.) Flag to transpose the grid :return: Pyplot figure filled with the images """ num_cols, num_rows = len(images), len(images[0]) img_ratio = images[0][0].shape[1] / images[0][0].shape[0] if transpose: vert_grid_shape, hori_grid_shape = (1, num_rows), (num_cols, 1) figsize = (int(num_rows * 5 * img_ratio), num_cols * 5) wspace, hspace = 0.2, 0. else: vert_grid_shape, hori_grid_shape = (num_rows, 1), (1, num_cols) figsize = (int(num_cols * 5 * img_ratio), num_rows * 5) hspace, wspace = 0.2, 0. if figure is None: figure = plt.figure(figsize=figsize) imshow_params = {'cmap': plt.get_cmap('gray')} if grayscale else {} grid_spec = gridspec.GridSpec(*hori_grid_shape, wspace=0, hspace=0) for j in range(num_cols): grid_spec_j = gridspec.GridSpecFromSubplotSpec( *vert_grid_shape, subplot_spec=grid_spec[j], wspace=wspace, hspace=hspace) for i in range(num_rows): ax_img = figure.add_subplot(grid_spec_j[i]) # ax_img.axis('off') ax_img.set_yticks([]) ax_img.set_xticks([]) if titles is not None: if transpose: ax_img.set_ylabel(titles[j], fontsize=25) else: ax_img.set_title(titles[j], fontsize=15) ax_img.imshow(images[j][i], **imshow_params) figure.tight_layout() return figure # - # **Note:** We will save this method in [plot_utils.py](./plot_utils.py) and use it in the next notebooks. # + num_show = 12 random_image_indices = np.random.choice(len(x_test), size=num_show) orig_samples = x_test[random_image_indices] noisy_samples = add_noise(orig_samples) fig = plot_image_grid([np.squeeze(orig_samples), np.squeeze(noisy_samples)], grayscale=True, transpose=True) fig.show() # - # Our corrupting function is doing its job, adding some _salt-and-pepper_ noise. Some digits are even hard to recognize... Let us see how our denoising model will manage... # # We now have a choice. We could simply apply our noise function to the whole training set (`x_noisy = add_noise(x_train)`) and pass it to our model for training (`autoencoder.fit(x_noisy, x_train)`). While straightforward, this solution has one inconvenient: each original image have only one noisy version. Epoch after epoch, the network will receive the same noisy images. There is thus a risk that it may overfit some of the data. # # Another solution would be to corrupt each batch of images at each iteration, thus creating different corrupted versions each time. While a bit heavier, this solution provides our network with new samples each time, making it more robust. We will therefore opt for this solution. # # To implement it, we will use a _**generator**_, a Python object yielding new elements each time it is called. Keras models can be trained directly on datasets (`model.fit(...)`), or on generators (`model.fit_generator(...)`). Though less advanced that TensorFlow `tf.data.Dataset`, generators share some common advantages (e.g., for datasets too big to be loaded at once, a generator can be used to load only the images for the next batches). # # Keras offers several pre-implemented generators, to iterate over image folders, image arrays, etc. Here, we will use `ImageDataGenerator`, which can iterate over numpy arrays to generate batches. This generator can also be configured to pre-process each batch before yielding it. This is exactly what we need: # + from tensorflow.keras.preprocessing.image import ImageDataGenerator # We define our Keras generator, passing our noisy function as pre-processing step: train_datagen = ImageDataGenerator(preprocessing_function=add_noise) # Then we pass our dataset to the generator and specify how the yielded batch should be # (batch size, shuffling or not, etc.). train_generator = train_datagen.flow(x_train, x_train, batch_size=batch_size, shuffle=True) train_generator # - # Our input pipeline is ready, though we need to prepare the validation data too. # # To be able to consistently compare metrics from one epoch to another, we augment the validation images with noise only once, and saved the resulting images so they can be reused for each epoch: # + x_test_noisy = add_noise(x_test) val_datagen = tf.keras.preprocessing.image.ImageDataGenerator() val_generator = train_datagen.flow(x_test_noisy, x_test, batch_size=batch_size, shuffle=False) # - # ## Training and Monitoring # As in previous experiments, we will set up some monitoring tools, then train our model. # ### PSNR as custom metric # We reuse PSNR, to evaluate how well our model recovered the corrupted sampled, compared to the original ones: # + import functools psnr_metric = functools.partial(tf.image.psnr, max_val=1.) psnr_metric.__name__ = 'psnr' # - # ### Custom callback to plot result grids # To keep experimenting with Keras and TensorFlow, we will implement another **custom callback**. Instead of waiting the end of the training to visualize some predicted images, would it be not better to **plot some result grids after each epoch** for instance? Even better: we could reuse the same input/target images each time, so we can compare the predicted versions from one epoch to another! # # The following custom callback does exactly that. At the end of each epoch, it feeds the model with some pre-selected images, then use `matplotlib` to draw a result grid. It then converts the `pyplot.Figure` into a `tensorflow.Summary` which can be saved by TensorFlow an read by Tensorboard for display (image summaries can be seen in [http://localhost:6006/#images](http://localhost:6006/#images) once Tensorboard started. # # This is what will appear in Tensorboard (with the slider to visualize other steps/epochs): # ![Tensorboard screenshot](./notebook_images/tensorboard_result_grid.png) # + import io def figure_to_rgb_array(fig): """ Convert figure into a RGB array :param fig: PyPlot Figure :return: RGB array """ figure_buffer = io.BytesIO() fig.savefig(figure_buffer, format='png') figure_buffer.seek(0) figure_string = figure_buffer.getvalue() return figure_string def figure_to_summary(fig, name, step): """ Convert figure into TF summary :param fig: Figure :param tag: Summary name :return: Summary step """ # Transform figure into PNG buffer: figure_string = figure_to_rgb_array(fig) # Transform PNG buffer into image tensor: figure_tensor = tf.image.decode_png(figure_string, channels=4) figure_tensor = tf.expand_dims(figure_tensor, 0) # adding batch dimension # Using Proto to convert the image string into a summary: figure_summary = tf.summary.image(name, figure_tensor, step) return figure_summary class TensorBoardImageGridCallback(tf.keras.callbacks.Callback): """ Keras callback for generative models, to draw grids of input/predicted/target images into Tensorboard every epoch. """ def __init__(self, log_dir, input_images, target_images=None, tag='images', figsize=(10, 10), dpi=300, grayscale=False, transpose=False, preprocess_fn=None): """ Initialize the Callback. :param log_dir: Folder to write the image summaries into :param input_images: List of input images to use for the grid :param target_images: (opt.) List of target images for the grid :param tag: Tag to name the Tensorboard summary :param figsize: Pyplot figure size for the grid :param dpi: Pyplot figure DPI :param grayscale: Flag to plot the images as grayscale :param transpose: Flag to transpose the image grid :param preprocess_fn: (opt.) Function to pre-process the input/predicted/target image lists before plotting """ super().__init__() self.summary_writer = tf.summary.create_file_writer(log_dir) self.input_images, self.target_images = input_images, target_images self.tag = tag self.postprocess_fn = preprocess_fn self.image_titles = ['images', 'predicted'] if self.target_images is not None: self.image_titles.append('ground-truth') # Initializing the figure: self.fig = plt.figure(num=0, figsize=figsize, dpi=dpi) self.grayscale = grayscale self.transpose = transpose def on_epoch_end(self, epoch, logs={}): """ Plot into Tensorboard a grid of image results. :param epoch: Epoch num :param logs: (unused) Dictionary of loss/metrics value for the epoch """ # Get predictions with current model: predicted_images = self.model.predict_on_batch(self.input_images) if self.postprocess_fn is not None: input_images, predicted_images, target_images = self.postprocess_fn( self.input_images, predicted_images, self.target_images) else: input_images, target_images = self.input_images, self.target_images # Fill figure with images: grid_imgs = [input_images, predicted_images] if target_images is not None: grid_imgs.append(target_images) self.fig.clf() self.fig = plot_image_grid(grid_imgs, titles=self.image_titles, figure=self.fig, grayscale=self.grayscale, transpose=self.transpose) with self.summary_writer.as_default(): # Transform into summary: figure_summary = figure_to_summary(self.fig, self.tag, epoch) # # Finally, log it: # self.summary_writer.add_summary(figure_summary, global_step=epoch) self.summary_writer.flush() def on_train_end(self, logs={}): """ Close the resources used to plot the grids. :param logs: (unused) Dictionary of loss/metrics value for the epoch """ self.summary_writer.close() plt.close(self.fig) # - # ### Training and results # It is now just a matter of launching the training, and monitoring it here or in Tensorboard: # + import collections from keras_custom_callbacks import SimpleLogCallback model_dir = os.path.join('.', 'models', 'ae_denoising_mnist') metrics_to_print = collections.OrderedDict([("loss", "loss"), ("v-loss", "val_loss"), ("psnr", "psnr"), ("v-psnr", "val_psnr")]) callbacks = [ # Callback to interrupt the training if the validation loss/metrics stops improving for some epochs: tf.keras.callbacks.EarlyStopping(patience=5, monitor='val_loss', restore_best_weights=True), # Callback to log the graph, losses and metrics into TensorBoard: tf.keras.callbacks.TensorBoard(log_dir=model_dir, histogram_freq=0, write_graph=True), # Callback to simply log metrics at the end of each epoch (saving space compared to verbose=1/2): SimpleLogCallback(metrics_to_print, num_epochs=num_epochs, log_frequency=1), # Callback to log some validation results as image grids into TensorBoard: TensorBoardImageGridCallback( log_dir=model_dir, input_images=noisy_samples, target_images=orig_samples, tag='ae_results', figsize=(len(noisy_samples) * 3, 3 * 3), grayscale=True, transpose=True, preprocess_fn=lambda img, pred, gt: ( # Squeezing the images from H x W x 1 to H x W, otherwise Pyplot complains: np.squeeze(img, -1), np.squeeze(pred, -1), np.squeeze(gt, -1))) ] autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=[psnr_metric]) # `model.fit_generator()` works quite similarly to `model.fit()` # used with tf.data inputs (c.f. notebooks for Chapter 4). # In other words, we need to specify the number of batches ("steps") per epoch, # for Keras to keep track. history = autoencoder.fit_generator( train_generator, steps_per_epoch=train_steps_per_epoch, epochs=num_epochs, validation_data=val_generator, validation_steps=val_steps_per_epoch, verbose=0, callbacks=callbacks) # + fig, ax = plt.subplots(2, 2, figsize=(10, 5), sharex='col') ax[0, 0].set_title("loss") ax[0, 1].set_title("val-loss") ax[1, 0].set_title("psnr") ax[1, 1].set_title("val-psnr") ax[0, 0].plot(history.history['loss']) ax[0, 1].plot(history.history['val_loss']) ax[1, 0].plot(history.history['psnr']) ax[1, 1].plot(history.history['val_psnr']) # - # Though we already have results drawn in Tensorboard, let us add one last image grid to this notebook: # + predicted_samples = autoencoder.predict_on_batch(noisy_samples) fig = plot_image_grid([np.squeeze(noisy_samples), np.squeeze(predicted_samples), np.squeeze(orig_samples)], titles=['image', 'predicted', 'ground-truth'], grayscale=True, transpose=True) fig.show() # - # ## Conclusion # As we can see, though the denoising is not perfect and information is sometimes lost, our simplistic auto-encoder is doing rather well. In the next notebook, we will implement a more advanced, convolutional network for the more complex problem of image super-resolution. # <a id="ref"></a> # #### References # 1. <NAME>., <NAME>., <NAME>., 2010. MNIST handwritten digit database. AT&T Labs [Online]. Available: http://yann.lecun.com/exdb/mnist 2, 18.
Chapter06/ch6_nb2_denoise_with_autoencoders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Noisy Dataset Generation import os import numpy as np import librosa import soundfile as sf from pydub import AudioSegment dataset_root = os.path.abspath('./noise_dataset') raw_root = f'{dataset_root}/raw' clean_root = f'{dataset_root}/clean' dirty_root = f'{dataset_root}/dirty' # First we need to conver the raw mp3 files into wav files file_limit = 10000 i = 0 for path in os.listdir(raw_root): src = f'{raw_root}/{path}' dst = f'{clean_root}/{path.replace(".mp3", ".wav")}' sound = AudioSegment.from_mp3(src) sound.export(dst, format="wav") i += 1 if i % 1000 == 0: print('%d...' % i) if i >= file_limit: break # Next we will add in some random noise to each of the samples noise_mean = 0.005 noise_std = 0.004 i = 0 for path in os.listdir(clean_root): src = f'{clean_root}/{path}' dst = f'{dirty_root}/{path}' y, sr = librosa.load(src) sf.write(dst, y + abs(np.random.normal(noise_mean, noise_std))*np.random.randn(len(y)), sr) i += 1 if i % 1000 == 0: print('%d...' % i) # And finally we need to generate test and training spectograms in pickle format import pickle i = 0 clean_specs = [] dirty_specs = [] for path in os.listdir(clean_root): src = f'{clean_root}/{path}' y, sr = librosa.load(src) D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max) clean_specs.append(D) srcd = f'{dirty_root}/{path}' yd, srd = librosa.load(srcd) Dd = librosa.amplitude_to_db(np.abs(librosa.stft(yd)), ref=np.max) dirty_specs.append(Dd) i += 1 if i % 100 == 0: print('%d...' % i) break spec_ds = { 'clean': clean_specs, 'dirty': dirty_specs } with open(f'{dataset_root}/spectrograms.bin', 'wb') as io: pickle.dump(spec_ds, io)
connor/Noisy Dataset Generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Environment # + from __future__ import absolute_import from __future__ import print_function import os import sys import gym from gym import spaces, logger import numpy as np # we need to import python modules from the $SUMO_HOME/tools directory if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) else: sys.exit("please declare environment variable 'SUMO_HOME'") from sumolib import checkBinary import traci # HARDCODE controlled_lights = [{'name':'gneJ14', 'curr_phase':0, 'num_phases': 5}] uncontrolled_lights = [{'name':'nw', 'curr_phase':0, 'num_phases': 4}, {'name':'se', 'curr_phase':0, 'num_phases': 4}, {'name':'sw', 'curr_phase':0, 'num_phases': 4}] important_roads = ['gneE16', 'gneE59', 'gneE13'] load_options = ["-c", "PoundSign/PoundSign.sumocfg", "--tripinfo-output", "tripinfo.xml", '--log', 'log.txt' , "-t"] class SumoEnv(gym.Env): def __init__(self, steps_per_episode, render): super(SumoEnv, self).__init__() # self.scenario_name = scenario_name self.steps_per_episode = steps_per_episode self.is_done = False self.current_step = 0 self.reward_range = (-float('inf'), float('inf')) # HARDCODE self.action_space = spaces.Discrete(5) # HARDCODE self.observation_space = spaces.Box(low=0, high=float('inf'), shape=np.array([6]), dtype=np.float32) # HARDCODE # Start connection with sumo self.noguiBinary = checkBinary('sumo') self.guiBinary = checkBinary('sumo-gui') # self.current_binary = self.noguiBinary self.current_binary = self.guiBinary if render else self.noguiBinary traci.start([self.current_binary] + load_options) def reset(self): traci.load(load_options) self.current_step = 0 self.is_done = False return self._next_observation() def _next_observation(self): obs = [] wait_counts, road_counts = self._get_road_waiting_vehicle_count() # HARDCODE for lane in important_roads: obs.append(road_counts[lane]) obs.append(wait_counts[lane]) return np.array(obs) def step(self, action): self._take_action(action) traci.simulationStep() self.current_step += 1 obs = self._next_observation() reward = self._get_reward() if self.is_done: logger.warn("You are calling 'step()' even though this environment has already returned done = True. " "You should always call 'reset()' once you receive 'done = True' " "-- any further steps are undefined behavior.") reward = 0.0 if self.current_step + 1 == self.steps_per_episode: self.is_done = True return obs, reward, self.is_done, {} def _get_reward(self): road_waiting_vehicles_dict , _ = self._get_road_waiting_vehicle_count() reward = 0.0 for (road_id, num_vehicles) in road_waiting_vehicles_dict.items(): if road_id in important_roads: reward -= num_vehicles return reward def _take_action(self, action): if action != controlled_lights[0]['curr_phase']: controlled_lights[0]['curr_phase'] = action self._set_tl_phase(controlled_lights[0]['name'], action) def _get_road_waiting_vehicle_count(self): wait_counts = {'gneE16':0, 'gneE59':0, 'gneE13':0} road_counts = {'gneE16':0, 'gneE59':0, 'gneE13':0} vehicles = traci.vehicle.getIDList() for v in vehicles: road = traci.vehicle.getRoadID(v) if road in wait_counts.keys(): if traci.vehicle.getWaitingTime(v) > 0: wait_counts[road] += 1 road_counts[road] += 1 return wait_counts , road_counts def _on_training_end(self): super(self) traci.close() def _set_tl_phase(self, intersection_id, phase_id): traci.trafficlight.setPhase(intersection_id, phase_id) def render(self, mode='human', close=False): # self.save_replay = not self.save_replay self.current_binary = self.guiBinary def close(self): self._on_training_end() # - # Test the environment with an agent # + # Remove TF warnings in Stable baselines (may not be safe) import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) import traci import sys import gym from stable_baselines.common.policies import MlpPolicy from stable_baselines.common.vec_env import DummyVecEnv , SubprocVecEnv from stable_baselines import PPO2, DQN from env.SumoEnv import SumoEnv from env.SumoEnv_Parallel import SumoEnv_Parallel import time try: num_proc = 1 steps_per_episode = 1000 num_episodes = 1 if(num_proc == 1): env = DummyVecEnv([lambda: SumoEnv(steps_per_episode, False)]) else: env = SubprocVecEnv([lambda: SumoEnv_Parallel(steps_per_episode, False, i) for i in range(num_proc)], start_method='forkserver') model = DQN('MlpPolicy', env, verbose=1) start = time.time() model.learn(total_timesteps=steps_per_episode*num_episodes) print(f'LEARNING TIME: {time.time() - start}') model.save('dqn_pound') print('done learning') traci.close() except: traci.close() # - traci.close() # + # Test the trained agent from stable_baselines.common.policies import MlpPolicy from stable_baselines.common.vec_env import DummyVecEnv , SubprocVecEnv from stable_baselines import PPO2 from env.SumoEnv import SumoEnv steps_per_episode = 1000 #num_episodes = 5 saved_model = "ppo2_pound" env = DummyVecEnv([lambda: SumoEnv(steps_per_episode, False)]) # wrap it model = PPO2.load(saved_model) obs = env.reset() # while True: # action, _states = model.predict(obs,deterministic=True) # obs, rewards, dones, info = env.step(action) # env.render() # + # Launch simulation server (SUMO-gui) current_binary = checkBinary('sumo-gui') load_options = ["-c", "PoundSign/PoundSign.sumocfg", "--tripinfo-output", "tripinfo.xml", '--log', 'log.txt' , "-t"] traci.start([current_binary] + load_options) traci.load(load_options) # + # Perform this code while simulator server (SUMO) is open #Perform calculations here traci.simulationStep() # step simulation in time # - traci.close() # Close TraCI connection to prevent error
individual_code/michael/sumo_gym/PoundSign_michael.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import time import datetime import argparse import random import os, sys import torch from torch.utils.data import DataLoader from torchvision import datasets from torch.autograd import Variable import torchvision.transforms as T from torch.utils.data import Dataset from torch.utils.data import DataLoader from matplotlib.ticker import NullLocator from PIL import Image import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.colors import ListedColormap, LinearSegmentedColormap import numpy as np from tqdm import tqdm import io import PIL import requests import cv2 import shutil #from google.colab.patches import cv2_imshow import os.path import time import shutil import glob import warnings def clean_folder(folder_name): try: shutil.rmtree(folder_name) except OSError as e: pass os.makedirs(folder_name, exist_ok=True) # + pycharm={"name": "#%%\n"} yolo_path = "detLib/" sys.path.append(yolo_path) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from detLib.models import Darknet from detLib.utils.utils import * from detLib.utils.datasets import * def y_path(p): return os.path.join(yolo_path,p) weights = y_path("weights/yolov3_ckpt_current_50.pth") model_def = y_path("config/yolov3-custom.cfg") model = Darknet(model_def, img_size=416).to(device) model.load_state_dict(torch.load(weights, map_location=device)) model.eval() classes = ['pedestrian'] conf_thres, nms_thres = 0.9, 0.4 # + pycharm={"name": "#%%\n"} # une img output_dir = "output/" clean_folder(output_dir) resizeSize = 960 # if zero keep original dimnesion img_path = "Frames/MOT16-10-raw/frame1.jpg" img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = transforms.ToTensor()(img) img, _ = pad_to_square(img, 0) if resizeSize != 0: img = resize(img, resizeSize) img.unsqueeze_(0) # add batch axis with torch.no_grad(): start_time = time.time() detections = model(img) detections = non_max_suppression(detections, conf_thres,nms_thres) detections = detections[0] print(f"--- %s seconds for {img.shape}---" % (time.time() - start_time)) start_time = time.time() #img = np.array(Image.open(img_path)) img = cv2.imread(img_path) detections = rescale_boxes(detections, resizeSize, img.shape[:2]) for i, (x1, y1, x2, y2, conf, cls_conf, cls_pred) in enumerate(detections): box_w = x2 - x1 box_h = y2 - y1 if 0.45*box_h > box_w > 10: pedestrian = img[int(y1):int(y2), int(x1):int(x2), :] filename =f'{output_dir}/{i:04}.png' cv2.imwrite(filename, pedestrian) print(f"--- %s seconds for saving {i} images---" % (time.time() - start_time)) # + pycharm={"name": "#%%\n"} videoname = 'MOT16-10-raw' videoframe = cv2.VideoCapture('vids/'+videoname+'.webm') output_dir = "output/" clean_folder(output_dir) resizeSize = 960 # if zero keep original dimnesion framenr=0 if not videoframe.isOpened() : print("Error opening video stream or file") while videoframe.isOpened(): # Capture frame-by-frame ret, img = videoframe.read() if ret: framenr+=1 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = transforms.ToTensor()(img) img, _ = pad_to_square(img, 0) if resizeSize != 0: img = resize(img, resizeSize) img.unsqueeze_(0) # add batch axis with torch.no_grad(): start_time = time.time() detections = model(img) detections = non_max_suppression(detections, conf_thres,nms_thres) detections = detections[0] print(f"--- %s seconds for {img.shape}---" % (time.time() - start_time)) start_time = time.time() #img = np.array(Image.open(img_path)) img = cv2.imread(img_path) detections = rescale_boxes(detections, resizeSize, img.shape[:2]) for i, (x1, y1, x2, y2, conf, cls_conf, cls_pred) in enumerate(detections): box_w = x2 - x1 box_h = y2 - y1 if 0.45*box_h > box_w > 10: pedestrian = img[int(y1):int(y2), int(x1):int(x2), :] filename =f'{output_dir}/{framenr}-{i:04}.png' cv2.imwrite(filename, pedestrian) print(f"--- %s seconds for saving {i} images---" % (time.time() - start_time)) if framenr > 3: break videoframe.release() # Closes all the frames cv2.destroyAllWindows() # + pycharm={"name": "#%%\n"} """" some test to make the model work with full size image without padding, complicate to modify cause model is for square from torchvision.transforms.functional import pad s = list(file.size) to_pad = tuple([int((31-(si-1)%32)/2) for si in s]) print(to_pad) file = pad(file, padding=to_pad) print(file.size) """
notebooks/tryYolo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # # Notebook contents: # # This notebook contains a lecture. The code for generating plots are found at the of the notebook. Links below. # # - [presentation](#Python-plotting) # - [code for plots](#Code-for-plots) # + [markdown] slideshow={"slide_type": "slide"} # # Python plotting # # *<NAME>* # + [markdown] slideshow={"slide_type": "slide"} # # Reminders # - Why so many exercises # - APIs, # - 9am means 9.15, 1pm means 13.15 # *<NAME>* # + [markdown] slideshow={"slide_type": "slide"} endofcell="--" # # Recap # *What have we learned about basic Python and Pandas?* # # - # # - # -- # + [markdown] slideshow={"slide_type": "slide"} # # Agenda # # 1. [Background on plotting](#Understanding plotting) # 2. The [Python toolbox for plotting](#Python-plotting) # 3. Plots for one variable: [numeric](#Plotting-one-numerical-variable) and [categorical](#Plotting-one-categorical-variable) # 4. Plots for two variables: [numeric](#Plots-of-two-numeric-variables) and [categorical](#Plots-with-categorical-variables) # 5. [Advanced exploratory plotting](#Advanced-exploratory-plotting) # + [markdown] slideshow={"slide_type": "slide"} # # Understanding plotting # + [markdown] slideshow={"slide_type": "slide"} # ## What values do A,B,C,D have? # <center><img src='https://raw.githubusercontent.com/abjer/sds2017/master/slides/figures/excel1.png'></center> # + [markdown] slideshow={"slide_type": "slide"} # ## The shocking answer # <center><img src='https://raw.githubusercontent.com/abjer/sds2017/master/slides/figures/excel2.png'></center> # # + [markdown] slideshow={"slide_type": "slide"} # ## Why are you plotting? # *Who's the audience?* # + [markdown] slideshow={"slide_type": "fragment"} # Others # + [markdown] slideshow={"slide_type": "-"} # - **Explanatory** plots: polished figures to convey your message # + [markdown] slideshow={"slide_type": "fragment"} # Yourself: # + [markdown] slideshow={"slide_type": "-"} # - **Exploratory** plots: fast for understanding data - minimal polishing. # + [markdown] slideshow={"slide_type": "slide"} # ## How should you plot (1) # *What are some tips for making **explanatory** plots in a report?* # + [markdown] slideshow={"slide_type": "fragment"} # 1. Self explanatory # - Contain axis label, title, footnotes in text containing relevant information. # 1. Eye candy # - Choose the right plot type. # - Make sure font size, colors, line width. # 1. Narratives - should convey key point(s) # - If you to show difference between groups in data make sure it is easy to distinguish them. # 1. Keep simplicity. # - Anything unnecessary should be removed, see [this post](https://www.darkhorseanalytics.com/blog/data-looks-better-naked/). # + [markdown] slideshow={"slide_type": "slide"} # ## How should you plot (2) # *What is some practical advice on making **explanatory** plots?* # + [markdown] slideshow={"slide_type": "fragment"} # 1. Try out a few plot types, using exploratory analysis. # 1. Apply the *"layered grammer of graphics"*. # - Start with an empty canvas # - Fill the necessary things (axis, ticks, bars/lines, labels) # + [markdown] slideshow={"slide_type": "slide"} # ## How should you plot (3) # *What are some guidelines on making plots in **general**?* # + [markdown] slideshow={"slide_type": "fragment"} # Be aware of *what* you plot # - numerical vs. non-numeric (categorical) # - raw data vs. model results # + [markdown] slideshow={"slide_type": "slide"} # # Python plotting # + [markdown] slideshow={"slide_type": "slide"} # ## Packages for Python plotting (1) # *What is the fundamental tool for making plots in Python?* # + [markdown] slideshow={"slide_type": "fragment"} # **Matplotlib** is the fundamental plotting module # - Can make almost any 2d plot. # - Can build publication ready figures. # - Caveat: # - requires time consuming customization; # - requires practice. # + slideshow={"slide_type": "fragment"} import matplotlib.pyplot as plt # allow printing in notebook # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # ## Packages for Python plotting (2) # *What are good tools for fast, exploratory plots?* # + [markdown] slideshow={"slide_type": "fragment"} # `seaborn` has built-in capabilities to make plots # - Analyzing data, e.g. splitting by subsets # - Make interpolation of data to smooth noise. # # `pandas` can easily convert Series and DataFrames to plots # + slideshow={"slide_type": "-"} import pandas as pd import seaborn as sns # high level plotting library # + [markdown] slideshow={"slide_type": "slide"} # ## Packages for Python plotting (3) # Seaborn comes with some illustrative datasets. We load `iris` and `tips`. # # # + slideshow={"slide_type": "fragment"} iris = sns.load_dataset('iris') tips = sns.load_dataset('tips') # + [markdown] slideshow={"slide_type": "slide"} # # Plotting one numerical variable # + [markdown] slideshow={"slide_type": "slide"} # ## The data # *What does the `tips` data contain?* # + slideshow={"slide_type": "fragment"} print(tips.head(3)) # + [markdown] slideshow={"slide_type": "slide"} # ## Univariate distribution (1) # *How did we count categorical data?* # + [markdown] slideshow={"slide_type": "fragment"} # - Using `value_counts`. # + [markdown] slideshow={"slide_type": "fragment"} # Can we do something similar with numeric data? # - # cut into categorical data x = tips.total_bill cuts = np.arange(0, 70, 10) pd.cut(x, cuts).value_counts() # + [markdown] slideshow={"slide_type": "slide"} # ## Univariate distribution (2) # *How do we plot the distribution of numerical variables?* # + [markdown] slideshow={"slide_type": "fragment"} # We often use the histogram. # - Bins data and counts observations # - Example of tips: # + slideshow={"slide_type": "-"} histplot # + [markdown] slideshow={"slide_type": "slide"} # ## Matplotlib and the grammar of graphics (1) # *Where do I start with making a plot?* # + [markdown] slideshow={"slide_type": "fragment"} # We will begin with the fundamental and flexible way. We start with our plotting canvas. # + slideshow={"slide_type": "-"} fig, ax = plt.subplots(figsize = (6, 2.5)) # create placeholder for plot # + [markdown] slideshow={"slide_type": "-"} # - `ax` contains most of the chart elements: the grid axes, labels, shapes we draw etc. # - `fig` the actual plot which is displayed (export to pdf etc.) # + [markdown] slideshow={"slide_type": "slide"} # ## Matplotlib and the grammar of graphics (2) # We can modify our canvas, e.g the axis scaling: # + slideshow={"slide_type": "-"} fig, ax = plt.subplots(figsize = (10, 4.5)) ax.set_xlim([0, 60]) # x-axis cutoffs ax.set_ylim([0, 80]) # y-axis cutoffs # + [markdown] slideshow={"slide_type": "slide"} # ## Matplotlib and the grammar of graphics (3) # We can draw plots on the canvas # + slideshow={"slide_type": "-"} fig, ax = plt.subplots(figsize = (10, 4.5)) ax.set_xlim([0, 60]) ax.set_ylim([0, 80]) ax.hist(x) # make plot # + [markdown] slideshow={"slide_type": "slide"} endofcell="--" # ## Matplotlib and the grammar of graphics (4) # What might we change about our plot? # - # - # # We will try customization in the exercises today. # -- # + [markdown] slideshow={"slide_type": "slide"} # ## Matplotlib and the grammar of graphics (5) # *Can we change matplotlib defaults?* # + [markdown] slideshow={"slide_type": "fragment"} # Yes, this may be very useful. For instance plot size. # + slideshow={"slide_type": "-"} plt.style.use('default') # set style (colors, background, size, gridlines etc.) plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots plt.rcParams.update({'font.size': 18}) # + [markdown] slideshow={"slide_type": "slide"} # ## Plotting with pandas # Pandas has a quick and dirty implemention. Let's try the code below. # + slideshow={"slide_type": "-"} x.head() #x.plot.hist() # + [markdown] slideshow={"slide_type": "slide"} # ## Plotting with Seaborn (1) # The module Seaborn is great for fast plots that look good # + slideshow={"slide_type": "fragment"} sns.distplot(x) # histogram for seaborn # + [markdown] slideshow={"slide_type": "fragment"} # Quiz: What is the line? # + [markdown] slideshow={"slide_type": "slide"} # ## Plotting with Seaborn (2) # *Can we use Seaborn for cumulative plots?* # + [markdown] slideshow={"slide_type": "fragment"} # Yes, we specify `cumulative` in the keywords. # + slideshow={"slide_type": "-"} sns.distplot(x, hist_kws={'cumulative': True}, kde_kws={'cumulative': True}) # + [markdown] slideshow={"slide_type": "slide"} # ## Summing up # # + [markdown] slideshow={"slide_type": "fragment"} # Group discussion (2 minutes): # - How did our tools perform? # - Which one seems most adequate for exploratory analysis? Which one for explanatory? # - Which steps could be taken towards improving our histograms? # + [markdown] slideshow={"slide_type": "slide"} # # Plotting one categorical variable # # + [markdown] slideshow={"slide_type": "slide"} # ## Univariate categorical # # + [markdown] slideshow={"slide_type": "-"} # *What is categorical data? How can we plot categorical data?* # + [markdown] slideshow={"slide_type": "fragment"} # Pies are possible but of little use. Let's plot this with bars: # + slideshow={"slide_type": "-"} sns.countplot(x='sex', data=tips) # + [markdown] slideshow={"slide_type": "slide"} # # Plotting DataFrames # + [markdown] slideshow={"slide_type": "slide"} # ## Table format # + [markdown] slideshow={"slide_type": "fragment"} # *How did we define a tidy/long table?* # + [markdown] slideshow={"slide_type": "fragment"} # One row for each observation # + [markdown] slideshow={"slide_type": "-"} # <center><img src='https://raw.githubusercontent.com/abjer/sds2017/master/slides/figures/tidy.png'></center> # + [markdown] slideshow={"slide_type": "slide"} # # Plots of two numeric variables # + [markdown] slideshow={"slide_type": "slide"} # ## Two numeric variables (1) # *How do we plot two numeric variables?* # + [markdown] slideshow={"slide_type": "fragment"} # If we have little data we can make a point cloud, i.e. a scatter plot. # + slideshow={"slide_type": "-"} plt.scatter(x=tips['total_bill'], y=tips['tip']) # + [markdown] slideshow={"slide_type": "slide"} # ## Two numeric variables (2) # *Quiz: How might we alter the scatter plot?* # + [markdown] slideshow={"slide_type": "fragment"} # We can interpolate the data: # + slideshow={"slide_type": "-"} sns.jointplot(x='total_bill', y='tip', data=tips, kind='hex', size=5) # hex # + [markdown] slideshow={"slide_type": "slide"} # ## Two numeric variables (3) # *What if we want to see the linear relationship?* # + [markdown] slideshow={"slide_type": "fragment"} # We use the linear model plot: # + slideshow={"slide_type": "-"} sns.lmplot(x='total_bill', y='tip', data=tips, size=5, aspect=2) # + [markdown] slideshow={"slide_type": "slide"} # # Plots with categorical variables # + [markdown] slideshow={"slide_type": "slide"} # ## Mixed types - numeric, categorical (1) # + [markdown] slideshow={"slide_type": "fragment"} # *Quiz: What is tidy format?* # + [markdown] slideshow={"slide_type": "fragment"} # - One row per observation # + [markdown] slideshow={"slide_type": "fragment"} # *How might we use categorical variables?* # # + [markdown] slideshow={"slide_type": "fragment"} # - We can split data! # + [markdown] slideshow={"slide_type": "fragment"} # *In which plots might this be useful?* # # + [markdown] slideshow={"slide_type": "fragment"} # - We can compute mean for each categorical variables, the `barplot`. # - We can compute quartiles for each categorical variables, the `boxplot`. # + [markdown] slideshow={"slide_type": "slide"} # ## Mixed types - numeric, categorical (2) # # Let's make a plot the mean tips - distinguish by weekday: # + slideshow={"slide_type": "-"} f = sns.barplot(x='day', y='tip', data=tips) # + [markdown] slideshow={"slide_type": "slide"} # ## Mixed types - numeric, categorical (2) # Let's make a plot the tip quartiles - distinguish by sex: # + slideshow={"slide_type": "-"} f = sns.boxplot(x='sex', y='tip', data=tips) # + [markdown] slideshow={"slide_type": "slide"} # # Advanced exploratory plotting # + [markdown] slideshow={"slide_type": "slide"} # ## Plot grids (1) # + [markdown] slideshow={"slide_type": "-"} # *How can we we plot the relationship for more than two variables?* # + slideshow={"slide_type": "-"} # A powerful method: sns.pairplot(tips, size=1.5, aspect=1.6) # + [markdown] slideshow={"slide_type": "slide"} # ## Plot grids (2) # + [markdown] slideshow={"slide_type": "-"} # *Can we split the data to investigate heterogeneous relationships?* # + [markdown] slideshow={"slide_type": "fragment"} # Yes, let's starting building a FacetGrid: # + slideshow={"slide_type": "-"} g = sns.FacetGrid(tips) g = g.map(sns.regplot, 'total_bill', 'tip') # + [markdown] slideshow={"slide_type": "slide"} # ## Plot grids (3) # + [markdown] slideshow={"slide_type": "-"} # Let's try to add distinctive slopes for smoker # + slideshow={"slide_type": "-"} g = sns.FacetGrid(tips, col='smoker') # time g = g.map(sns.regplot, 'total_bill', 'tip') # + [markdown] slideshow={"slide_type": "fragment"} # Can we say anything about smokers tipping behavior? # + [markdown] slideshow={"slide_type": "skip"} # # Code for plots # # ### Histogram # + slideshow={"slide_type": "skip"} import warnings import matplotlib.pyplot as plt import pandas as pd import seaborn as sns warnings.filterwarnings("ignore") histplot,ax = plt.subplots(1, 1, figsize=(10,4)) sns.distplot(sns.load_dataset('tips').total_bill, kde=False, ax=ax) ax.set_title('Distribution of total bill') ax.set_xlabel('Total bill, $') ax.title.set_fontsize(20) ax.xaxis.label.set_fontsize(16) for item in ax.get_yticklabels()+ax.get_xticklabels(): item.set_fontsize(12) # + [markdown] slideshow={"slide_type": "slide"} # # The end # # [Return to Agenda](#Agenda)
Test_karl/material/session_5/karl_lecture_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Curve Fit With or Without Constant # # When fitting an exponential to the early data for COVID-19 hospitalizations in San Diego, what is the effect of including or not including a constant in the fit function? # # + import sys # Install required packages # !{sys.executable} -mpip -q install matplotlib seaborn statsmodels pandas metapack uncertainties sklearn scipy # %matplotlib inline import pandas as pd import numpy as np import metapack as mp import rowgenerators as rg import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) from sklearn import linear_model #from scipy.stats import weibull_min, lognorm, logistic, norm from scipy.optimize import curve_fit import statsmodels.api as sm import uncertainties.unumpy as unp import uncertainties as unc # - pkg = mp.open_package('http://library.metatab.org/sandiegodata.org-covid19.csv') pkg # + df = pkg.resource('sd_covid_cases').read_csv().fillna(0) df.drop(columns=['notes'], inplace=True) df['date'] = pd.to_datetime(df.date) start_date = df.iloc[0].date start_cases = df.iloc[0].cases df['day'] = (df.date - start_date).dt.days df.rename(columns={'hospitalized': 'hosp'}, inplace=True) # - # # Setup # # First we will create a "true" curve for hospitlaizations from an initial fit to the data. We'll add error to this curve, then fit to it again, to assess the accuracy of the curve fit under different conditions. # # # + ## Fit to hospitalizations to get parameters and relative error # Here are our fit functions def func_exp_c(x, a, b, c): """Version of func_exp with a constant""" return a * np.exp(b * x) +c func_exp_c.popt = (6.20811331, 0.17217422, 0) def func_exp(x, a, b): '''Exponential with no constant''' return a * np.exp(b * x) func_exp.popt = (6.20811331, 0.17217422) # Analysis date range start_date = pd.Timestamp('2020-03-17') end_date = pd.Timestamp('2020-03-26') t = df.set_index('date').loc[start_date:end_date] # Create parameters for an initial curve fit. This will # produce a new clean curve that we will use to compare other # fits to fit_func = func_exp_c popt, pcov = curve_fit(fit_func, t.day, t.hosp, p0=fit_func.popt) y_p = fit_func(t.day,*popt).values # Supposing that the curve fit is the true curve err_std = ( ( y_p - t.hosp) / y_p).std() # relative error to the "true" curve # Parameters for the base curve, the "true" values that we'll add error to. # Doesn't really matter what it is exactly, but should be similar to reality curve_params = list(popt) + ([0]*(3-len(popt))) # Ensure there are always three components to the params # Now we have the parameters to generate the "true" curve, and the std dev # to add errors to it. # curve_params, err_std # - # Now we can run build exponential curves with errors, fit them, and access the accuracy. Rather than analyze the errors in the fit parameters, we will predict the y value ( hosptlizations ) at day 60 and compare to the value for the "true" curve. # + # Shift from working with actual dates, to an undated series of values. start_day = (start_date - df.date.min()).days end_day = (end_date - df.date.min()).days cp = curve_params[:] #cp[0] = 0 # Reset Initial curve fit parameters func_exp.popt = curve_params[:2] func_exp_c.popt = curve_params def bootff(fit_func,start_day=start_day, end_day=end_day, err_std=err_std, curve_params=curve_params, iters=1000): # The two fit functions require different lengths of parameters ff_cp = curve_params[:len(fit_func.popt)] # Range of data that we'll fit to x_f = np.linspace(start_day, end_day) y_f = fit_func(x_f, *ff_cp) diff = None for i in range(iters): # Optimize zero error case if diff is not None and err_std == 0: yield diff continue # Add some noise y_noise = np.random.normal(0, err_std, size=len(y_f)) y_e = y_f*(1+y_noise) popt, pcov = curve_fit(fit_func, x_f, y_e, p0=fit_func.popt, maxfev = 5000) # Take the difference between the predictions at day 60 diff = fit_func(60, *popt) yield diff trials = pd.DataFrame({ 'exp2': list(bootff(func_exp, curve_params=cp)), 'exp2_ze': list(bootff(func_exp, curve_params=cp, err_std=0)), 'exp2_v': func_exp(60, *cp[:2]), 'exp3': list(bootff(func_exp_c, curve_params=cp)), 'exp3_ze': list(bootff(func_exp_c, curve_params=cp, err_std=0)), 'exp3_v': func_exp_c(60, *cp), }) trials['exp2_d'] = trials['exp2_v'] - trials['exp2'] trials['exp3_d'] = trials['exp3_v'] - trials['exp3'] tt =trials.describe() tt # + rows = [ ['True Value', tt.loc['mean','exp2_v'], np.nan, tt.loc['mean','exp3_v'], np.nan], ['95 CI L', tt.loc['mean','exp2_v'] - tt.loc['std','exp2']*1.96, np.nan, tt.loc['mean','exp3_v'] - tt.loc['std','exp3']*1.96, np.nan], ['Prediction Mean', tt.loc['mean','exp2'], tt.loc['std','exp2'], tt.loc['mean','exp3'], tt.loc['std','exp3']], ['95 CI U', tt.loc['mean','exp2_v'] + tt.loc['std','exp2']*1.96, np.nan, tt.loc['mean','exp3_v'] + tt.loc['std','exp3']*1.96, np.nan] ] o = pd.DataFrame(rows, columns=['Metric','2 Parameter exp','std','3 parameter exp','std']).set_index('Metric') o['2 Parameter exp'] = o['2 Parameter exp'].astype(int) o['3 parameter exp'] = o['3 parameter exp'].astype(int) o.fillna('') # -
Notebooks/Curve Fit WO Constant.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import csv from pathlib import Path import os import sys module_path = os.path.abspath(os.path.join("..")) if module_path not in sys.path: sys.path.append(module_path) DATA_PATH = Path.cwd().parent / "data" TMP_PATH: Path = DATA_PATH / "tmp" OUTPUT_PATH = DATA_PATH / "dataset" / "ejscreen_2019" CENSUS_USA_CSV = DATA_PATH / "census" / "csv" / "us.csv" cbg_usa_df = pd.read_csv( CENSUS_USA_CSV, names=["GEOID10"], dtype={"GEOID10": "string"}, low_memory=False, header=None, ) cbg_usa_df.head() cbg_usa_df.dtypes ejscreen_df = pd.read_csv( OUTPUT_PATH / "usa.csv", dtype={"ID": "string"}, low_memory=False, ) ejscreen_df.rename( columns={"ID": "GEOID10"}, inplace=True, ) ejscreen_df.head() ejscreen_df.dtypes merged_df = cbg_usa_df.merge(ejscreen_df, on="GEOID10", how="left") merged_df.head() merged_df[merged_df["Shape_Area"].isnull()]
data/data-pipeline/data_pipeline/ipython/EJScreen Validate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf_gpu # language: python # name: tf_gpu # --- # # 1.5. Implementing a simple Linear Regression Algorithm # ## Imports # + import numpy as np # conda install numpy import tensorflow as tf import matplotlib.pyplot as plt # conda install matplotlib import pandas as pd # conda install pandas import warnings import seaborn as sb # conda install seaborn warnings.filterwarnings('ignore') print(tf.__version__) # - # ## Load data and take a look at it data = pd.read_csv('airbnb new york.csv').sample(frac=1) data.describe() # ## Data preprocessing # + features = data[['neighbourhood_group', 'room_type', 'minimum_nights', 'number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count', 'availability_365']] #print(features.isna().sum()) features['reviews_per_month'] = features['reviews_per_month'].fillna(0) #print(features.isna().sum()) onehot_neighborhood_group = pd.get_dummies(features['neighbourhood_group']) onehot_room_type = pd.get_dummies(features['room_type']) #print(onehot_room_type) features = features.drop(columns=['neighbourhood_group', 'room_type']) features = pd.concat([features, onehot_neighborhood_group, onehot_room_type], axis=1) #print(features.head()) targets = data['price'] train_size = int(0.7*len(data)) X_train, X_test = features.values[:train_size, :], features.values[train_size:, :] y_train, y_test = targets.values[:train_size], targets.values[train_size:] print(len(X_train[0])) # - # ## Data visualization and analysis # ## The Tensorflow 2 Machine Learning Approaches # ### Linear Regression class LinearModel: def __init__(self): # y_pred = W*X + b self.initializer = tf.keras.initializers.GlorotUniform() def loss(self, y, y_pred): return tf.reduce_mean(tf.abs(y - y_pred)) def train(self, X, y, lr=0.00001, epochs=20, verbose=True): X = np.asarray(X, dtype=np.float32) y = np.asarray(y, dtype=np.float32).reshape((-1, 1)) # [1,2,3,4] -> [[1],[2],[3],[4]] self.W = tf.Variable( initial_value=self.initializer(shape=(len(X[0]), 1), dtype='float32')) self.b = tf.Variable( initial_value=self.initializer(shape=(1,), dtype='float32')) def train_step(): with tf.GradientTape() as t: current_loss = self.loss(y, self.predict(X)) dW, db = t.gradient(current_loss, [self.W, self.b]) self.W.assign_sub(lr * dW) # W -= lr * dW self.b.assign_sub(lr * db) return current_loss for epoch in range(epochs): current_loss = train_step() if verbose: print(f'Epoch {epoch}: Loss: {current_loss.numpy()}') # <3 eager execution def predict(self, X): # [a, b] x [b, c] # X -> [n_instances, n_features] [n_features, 1] return tf.matmul(X, self.W) + self.b model = LinearModel() model.train(X_train, y_train, epochs=100) # ## Conclusions
Section 1/Video 1.5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # I have the clean data from the previous step that includes: # # ## X7 # ds: in datetime format <br> # TMAX_7avg: 7 day average of max temperature <br> # TMIN_7avg: 7 day average of min temperature <br> # TOBS_7avg: 7-day average of observed temperature <br> # PDO: pacific decadal oscillation; data given by the month but interpolated to the day <br> # AMO: atlantic decadal oscillation; data measure by the month but interpolated to the day <br> # PRCP: water equivalent (in) or precipitation (if all the snow was melted down to water) <br> # y: 7-day snowfall total # # ## X14 # ds: in datetime format <br> # TMAX_14avg: 14-day average of max temperature <br> # TMIN_14avg: 14-day average of min temperature <br> # TOBS_14avg: 14-day average of observed temperature <br> # PDO: pacific decadal oscillation; data given by the month but interpolated to the day <br> # AMO: atlantic decadal oscillation; data measure by the month but interpolated to the day <br> # PRCP: water equivalent (in) or precipitation (if all the snow was melted down to water) <br> # y: 14-day snowfall total # # ## X30 # ds: in datetime format <br> # TMAX_30avg: 30-day average of max temperature <br> # TMIN_30avg: 30-day average of min temperature <br> # TOBS_30avg: 30-day average of observed temperature <br> # PDO: pacific decadal oscillation; data given by the month but interpolated to the day <br> # AMO: atlantic decadal oscillation; data measure by the month but interpolated to the day <br> # PRCP: water equivalent (in) or precipitation (if all the snow was melted down to water) <br> # y: 30-day snowfall total # # # #load python packages import os import pandas as pd import datetime import seaborn as sns import matplotlib.pyplot as plt import numpy as np from sklearn.preprocessing import StandardScaler from mpl_toolkits.mplot3d import Axes3D import ppscore as pps from fbprophet import Prophet import featuretools as ft import pickle path="C:\Springboard\Github\Capstone1_alta\intermediate_data" os.chdir(path) X7 = pickle.load(open("X7_snow_pp.pkl", "rb")) X7.head() X14 = pickle.load(open("X14_snow_pp.pkl", "rb")) X14.head() X30 = pickle.load(open("X30_snow_pp.pkl", "rb")) X30.head() # I was going to run deep feature sythesis on this to create more possible features, but that doesn't seem to work. Instead I will just run FB Prophet and see if I can come back to this step after I finish with more time series modeling. # ## 1. FB Prophet modeling just on the time series - calculate error for 1 year # The first model that I build is just simple without any additional regressors. # slice off the last year of the X7 df to save to calculate error - may need to use boolean logic to extract these rows. year = 2018 # split date between test and train data set split_date = str(year) + '-11-14' # end date of the test set end_date = str(year+1) + '-04-15' train = X7['ds'] <= split_date X7_train = X7.loc[train] # data set from the split until the end of data keeping X7_long = X7.loc[~train] # shorten this data for just the next winter X7_test = X7_long[X7_long['ds'] <= end_date] # let's take a better look at this with FB's Prophet - not considering any other regressors yet from fbprophet import Prophet m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.fit(X7_train) future = m_fb.make_future_dataframe(periods=365) forecast = m_fb.predict(future) m_fb.plot_components(forecast) plt.savefig('Prophet_1.jpg') compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate mean squared error from sklearn.metrics import mean_squared_error MSE = mean_squared_error(X7_test['y'],compare['yhat']) print(MSE) np.sqrt(MSE) # for the whole winter November 15 to April 15 # Considering that the mean snowfall for 7-day period in the winter is ~17in, that's a very large RMSE # ### Ran this for these same 3 years (2016-2019) without using regressors <br> # Comparing RMSE for model vs. dummy <br> # 2016-17: [[16.725252595117972, 16.27826291591095], <br> # 2017-18: [10.466405773203524, 11.38487691487486], <br> # 2018-19: [14.453343292424275, 14.031903165705913]] <br> # There is barely any difference between the dummy model (heuristic of snowfall has same chance all winter) and the model we built. np.mean([16.725252595117972, 10.466405773203524, 14.453343292424275]), np.mean([16.27826291591095, 11.38487691487486, 14.031903165705913]) # ## 2. FB Prophet time series plus regression for 3 winters of snowfall # I'll add regressors for Temperature, PDO, AMO. We'll look through the last 3 winters of snowfall data and compare # + # let's try to add the regressors # manual loop for calculating error year by year RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # takes all of the data before the split date X7_train = X7[X7['ds'] <= split_date] # data set from the split through the next winter X7_test = X7[(X7['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.add_regressor('TMAX_7avg') m_fb.add_regressor('TMIN_7avg') m_fb.add_regressor('TOBS_7avg') m_fb.add_regressor('PDO') m_fb.add_regressor('AMO') m_fb.fit(X7_train) # predicts for 1 year based on that model future = m_fb.make_future_dataframe(periods=365) ### must add these regressor variables to the future dateframe #### # pull these from the X7_test forecast = m_fb.predict(X7_test) # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) dates.append([compare, X7_test, X7_dum]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE print(RMSE7) # - print("Model:", RMSE_model, "Dummy:", RMSE_dum, "Difference:", RMSE_diff) # + # let's plot the results for 2016-17 plt.plot(dates[0][0]['ds'],dates[0][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[0][1]['ds'],dates[0][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[0][2]['y'][20682], xmin=dates[0][1]['ds'][20531], xmax=dates[0][1]['ds'][20682], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2016-17 Winter') plt.show() # let's plot the results for 2017-2018 plt.plot(dates[1][0]['ds'],dates[1][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[1][1]['ds'],dates[1][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[1][2]['y'][20682], xmin=dates[1][1]['ds'][20895], xmax=dates[1][1]['ds'][21046], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2017-18 Winter') plt.show() # let's plot the results for 2018-19 plt.plot(dates[2][0]['ds'],dates[2][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[2][1]['ds'],dates[2][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[2][2]['y'][20682], xmin=dates[2][1]['ds'][21260], xmax=dates[2][1]['ds'][21411], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2018-19 Winter') plt.savefig('Prophet_reg1.jpg') plt.show() # - # That looks better than just using the time series. Let's see what happens if we add some more regressors: lag and square features # ## 3. FB Prophet Time Series with regression and more features to regress: lag precipitation, and square of PDO, AMO, and Tmax_avg # + # let's add some regressors # function - takes in dataframe to model and test (November 15 - April 15) # manual loop for calculating error year by year RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # takes all of the data before the split date X7_train = X7[X7['ds'] <= split_date] # data set from the split through the next winter X7_test = X7[(X7['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.add_regressor('TMAX_7avg') m_fb.add_regressor('TMIN_7avg') m_fb.add_regressor('TOBS_7avg') m_fb.add_regressor('PDO') m_fb.add_regressor('AMO') m_fb.add_regressor('TMAX_7avg_sq') m_fb.add_regressor('PRCP_30') m_fb.add_regressor('PRCP_60') m_fb.add_regressor('PRCP_90') m_fb.add_regressor('PRCP_120') m_fb.add_regressor('AMO_sq') m_fb.add_regressor('PDO-sq') m_fb.fit(X7_train) # predicts for 1 year based on that model future = m_fb.make_future_dataframe(periods=365) forecast = m_fb.predict(X7_test) # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) dates.append([compare, X7_test, X7_dum]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE print(RMSE7) # - print("Model:", RMSE_model, "Dummy:", RMSE_dum, "Difference:", RMSE_diff) 33.2173695089937/3 # + # let's plot the results for 2016-17 plt.plot(dates[0][0]['ds'],dates[0][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[0][1]['ds'],dates[0][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[0][2]['y'][20682], xmin=dates[0][1]['ds'][20531], xmax=dates[0][1]['ds'][20682], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2016-17 Winter') plt.show() # let's plot the results for 2017-2018 plt.plot(dates[1][0]['ds'],dates[1][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[1][1]['ds'],dates[1][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[1][2]['y'][20682], xmin=dates[1][1]['ds'][20895], xmax=dates[1][1]['ds'][21046], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2017-18 Winter') plt.show() # let's plot the results for 2018-19 plt.plot(dates[2][0]['ds'],dates[2][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[2][1]['ds'],dates[2][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[2][2]['y'][20682], xmin=dates[2][1]['ds'][21260], xmax=dates[2][1]['ds'][21411], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2018-19 Winter') plt.savefig('Prophet_reg2.jpg') plt.show() # - # This was barely any improvement over the model with just temperature; these additional variables may not add any more value, but let's try to model with just those variables (that can be seen further in advance) and drop the temperature regressors # ## 4. FB Prophet Time Series with regressor for just the things that can be modeled far in advance (PDO, AMO, and precip lag) # + # function - takes in dataframe to model and test (November 15 - April 15) # manual loop for calculating error year by year RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # takes all of the data before the split date X7_train = X7[X7['ds'] <= split_date] # data set from the split through the next winter X7_test = X7[(X7['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.add_regressor('PDO') m_fb.add_regressor('AMO') m_fb.add_regressor('PRCP_30') m_fb.add_regressor('PRCP_60') m_fb.add_regressor('PRCP_90') m_fb.add_regressor('PRCP_120') m_fb.add_regressor('AMO_sq') m_fb.add_regressor('PDO-sq') m_fb.fit(X7_train) # predicts for 1 year based on that model future = m_fb.make_future_dataframe(periods=365) forecast = m_fb.predict(X7_test) # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) dates.append([compare, X7_test, X7_dum]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE print(RMSE7) # - (RMSE7[0][0] + RMSE7[1][0] + RMSE7[2][1])/3 forecast print("Model:", RMSE_model, "Dummy:", RMSE_dum, "Difference:", RMSE_diff) # + # let's plot the results for 2016-17 plt.plot(dates[0][0]['ds'],dates[0][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[0][1]['ds'],dates[0][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[0][2]['y'][20682], xmin=dates[0][1]['ds'][20531], xmax=dates[0][1]['ds'][20682], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2016-17 Winter') plt.show() # let's plot the results for 2017-2018 plt.plot(dates[1][0]['ds'],dates[1][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[1][1]['ds'],dates[1][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[1][2]['y'][20682], xmin=dates[1][1]['ds'][20895], xmax=dates[1][1]['ds'][21046], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2017-18 Winter') plt.savefig('Prophet_reg3.jpg') plt.show() # let's plot the results for 2018-19 plt.plot(dates[2][0]['ds'],dates[2][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[2][1]['ds'],dates[2][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[2][2]['y'][20682], xmin=dates[2][1]['ds'][21260], xmax=dates[2][1]['ds'][21411], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2018-19 Winter') plt.show() # - # The results is not much better than just taking the dummy model of the mean. I wonder if we could run Random Forest on the residuals from the time series? I think that is possible, but requires some extra coding. # ## 5. FB Prophet for 7-day snowfall total with regressors using Random Forest Regressor on the residuals (instead of FB's built-in linear regressor) # + from sklearn.ensemble import RandomForestRegressor # manual loop for calculating error year by year RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # takes all of the data before the split date X7_train = X7[X7['ds'] <= split_date] # data set from the split through the next winter X7_test = X7[(X7['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.fit(X7_train) # predicts over the training data set forecast_train = m_fb.predict(X7_train) # compute the residual (from the time series modeling) over the training data set residual = X7_train['y'] - forecast_train['yhat'] ## model the Random Forest based on the long-term regressors and the residual regr = RandomForestRegressor(max_leaf_nodes=70, random_state=0) regr.fit(X7_train.drop(['y', 'ds'], axis=1), residual) #### now that we have both models: FB Prophet time series + RF Classifier, let's test # time series forecast_test = m_fb.predict(X7_test) # random forest forecast_rf = regr.predict(X7_test.drop(['ds', 'y'], axis=1)) # combine the forecasts forecast = forecast_test.copy() forecast['yhat'] = forecast['yhat'] + forecast_rf # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) dates.append([compare, X7_test, X7_dum]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE print(RMSE7) # - print("Model:", RMSE_model, "Dummy:", RMSE_dum, "Difference:", RMSE_diff) # + # let's plot the results for 2016-17 plt.plot(dates[0][0]['ds'],dates[0][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[0][1]['ds'],dates[0][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[0][2]['y'][20682], xmin=dates[0][1]['ds'][20531], xmax=dates[0][1]['ds'][20682], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2016-17 Winter') plt.show() # let's plot the results for 2017-2018 plt.plot(dates[1][0]['ds'],dates[1][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[1][1]['ds'],dates[1][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[1][2]['y'][20682], xmin=dates[1][1]['ds'][20895], xmax=dates[1][1]['ds'][21046], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2017-18 Winter') plt.savefig('Prophet_reg4.jpg') plt.show() # let's plot the results for 2018-19 plt.plot(dates[2][0]['ds'],dates[2][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[2][1]['ds'],dates[2][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[2][2]['y'][20682], xmin=dates[2][1]['ds'][21260], xmax=dates[2][1]['ds'][21411], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2018-19 Winter') plt.show() # - # That was the best score we've seen yet, but that looks like some crazy overfitting that is going on. I should probably limit the number of leaves on the RF tree and try different versions. # # 6. Loop to model for multiple hyperparameters for the RF Regressor # We are looking for a model that is not overfitting the data. We'll write a loop to make this happen. # ### Write a general loop that tries different values for number of leaves and then plots the RMSE that results # # We used 70 as the max leaf nodes, but we should really check over a range of values, but I don't want to check all 70. # Instead we are going to randomly select 10 from the range 5 to 70 # We'll store the top 3 that have the least RMSE and pick one from that group # # + # random values to use in the iteration import random n_leaves = random.sample(range(5,70), 10) # variables to keep score scores = [] for leaf in n_leaves: # loop for calculating error year by year RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # takes all of the data before the split date X7_train = X7[X7['ds'] <= split_date] # data set from the split through the next winter X7_test = X7[(X7['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.fit(X7_train) # predicts over the training data set forecast_train = m_fb.predict(X7_train) # compute the residual (from the time series modeling) over the training data set residual = X7_train['y'] - forecast_train['yhat'] ## model the Random Forest based on the long-term regressors and the residual regr = RandomForestRegressor(max_leaf_nodes=leaf, random_state=0) regr.fit(X7_train.drop(['y', 'ds'], axis=1), residual) #### now that we have both models: FB Prophet time series + RF Classifier, let's test # time series forecast_test = m_fb.predict(X7_test) # random forest forecast_rf = regr.predict(X7_test.drop(['ds', 'y'], axis=1)) # combine the forecasts forecast = forecast_test.copy() forecast['yhat'] = forecast['yhat'] + forecast_rf # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) #dates.append([compare, X7_test]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE scores.append(RMSE_diff) print(leaf, RMSE_diff) # - # 39 -9.185934399070401 <br> # 24 -8.996266195405111 <br> # 11 -8.48080108769526 # 44 -9.195394635418817 # 7 -8.095999210014266 # 64 -9.326012139825865 # 26 -9.028709155655164 # 10 -8.389041246775566 # 23 -8.974287983636987 # 31 -9.15934744223269 # plot the number of leaves versus the RSME difference plt.scatter(n_leaves, scores) plt.xlabel('Number of Leaves') plt.ylabel('RSME below dummy model') plt.savefig('Prophet_RF_tune1.jpg') plt.show() # This is great to see. I think 31 winds up being the "elbow" of this method. Let's use that value and see how things turn out in the model. # # 7. Model with best hyperparameter for the RF Regressor # We take the best hyperparameter from the loop (n_leaves = 31) and use that in the loop # + # manual loop for calculating error year by year. Let's use 31 leaves based on the results above RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # takes all of the data before the split date X7_train = X7[X7['ds'] <= split_date] # data set from the split through the next winter X7_test = X7[(X7['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.fit(X7_train) # predicts over the training data set forecast_train = m_fb.predict(X7_train) # compute the residual (from the time series modeling) over the training data set residual = X7_train['y'] - forecast_train['yhat'] ## model the Random Forest based on the long-term regressors and the residual regr = RandomForestRegressor(max_leaf_nodes=39, random_state=0) regr.fit(X7_train.drop(['y', 'ds'], axis=1), residual) #### now that we have both models: FB Prophet time series + RF Classifier, let's test # time series forecast_test = m_fb.predict(X7_test) # random forest forecast_rf = regr.predict(X7_test.drop(['ds', 'y'], axis=1)) # combine the forecasts forecast = forecast_test.copy() forecast['yhat'] = forecast['yhat'] + forecast_rf # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) dates.append([compare, X7_test, X7_dum]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE print(RMSE7) # - print("Model:", RMSE_model, "Dummy:", RMSE_dum, "Difference:", RMSE_diff) # + # let's plot the results for 2016-17 plt.plot(dates[0][0]['ds'],dates[0][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[0][1]['ds'],dates[0][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[0][2]['y'][20682], xmin=dates[0][1]['ds'][20531], xmax=dates[0][1]['ds'][20682], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2016-17 Winter') plt.show() # let's plot the results for 2017-2018 plt.plot(dates[1][0]['ds'],dates[1][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[1][1]['ds'],dates[1][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[1][2]['y'][20682], xmin=dates[1][1]['ds'][20895], xmax=dates[1][1]['ds'][21046], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2017-18 Winter') plt.savefig('Prophet_RF_tune2.jpg') plt.show() # let's plot the results for 2018-19 plt.plot(dates[2][0]['ds'],dates[2][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[2][1]['ds'],dates[2][1]['y'], color='red', label='Data') # scatter of the test plt.hlines(dates[2][2]['y'][20682], xmin=dates[2][1]['ds'][21260], xmax=dates[2][1]['ds'][21411], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2018-19 Winter') plt.show() # - # I was excited at first, but that model looks overfit. X7_train.columns # # 7. Loop to model for multiple hyperparameters for the RF Regressor without short-term predictors # We are looking for a model that is not overfitting the data. We'll write a loop to make this happen. <br> # We are removing the temperatures since they can't be predicted further than the weather. <br> # + # random values to use in the iteration import random random.seed(33) n_leaves = random.sample(range(5,40), 10) # variables to keep score scores = [] for leaf in n_leaves: # loop for calculating error year by year RMSE7 = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # let's drop the temperature features X7_lg = X7.drop(['TMAX_7avg', 'TMIN_7avg', 'TOBS_7avg', 'TMAX_7avg_sq', 'PRCP'], axis=1) # takes all of the data before the split date X7_train = X7_lg[X7_lg['ds'] <= split_date] # data set from the split through the next winter X7_test = X7_lg[(X7_lg['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.fit(X7_train) # predicts over the training data set forecast_train = m_fb.predict(X7_train) # compute the residual (from the time series modeling) over the training data set residual = X7_train['y'] - forecast_train['yhat'] ## model the Random Forest based on the long-term regressors and the residual regr = RandomForestRegressor(max_leaf_nodes=leaf, random_state=0) regr.fit(X7_train.drop(['ds', 'y'], axis=1), residual) #### now that we have both models: FB Prophet time series + RF Classifier, let's test # time series forecast_test = m_fb.predict(X7_test) # random forest forecast_rf = regr.predict(X7_test.drop(['ds', 'y'], axis=1)) # combine the forecasts forecast = forecast_test.copy() forecast['yhat'] = forecast['yhat'] + forecast_rf # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) #dates.append([compare, X7_test]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE7.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) # sum of the model error RMSE_model += RMSE # sum of the dummy model error RMSE_dum += RMSE_mean # sum of the differences RMSE_diff += (RMSE - RMSE_mean) # print the resulting MSE scores.append(RMSE_diff) print(leaf, RMSE_diff) # - # plot the number of leaves versus the RSME difference plt.scatter(n_leaves, scores) plt.xlabel('Number of Leaves') plt.ylabel('RSME below dummy model') plt.savefig('Prophet_RF_tune3.jpg') plt.show() # Let's look at this model versus the actual snowfall for all 3 recent ski season. # + # loop for calculating error year by year RMSE_tuned = [] dates = [] X7_dum = pd.DataFrame() RMSE_model = 0 RMSE_dum = 0 RMSE_diff = 0 for yr in range(2016,2019): # split date between test and train data set split_date = str(yr) + '-11-14' # end date of the test set at the end of winter end_date = str(yr+1) + '-04-15' # let's drop the temperature features X7_lg = X7.drop(['TMAX_7avg', 'TMIN_7avg', 'TOBS_7avg', 'TMAX_7avg_sq', 'PRCP'], axis=1) # takes all of the data before the split date X7_train = X7_lg[X7_lg['ds'] <= split_date] # data set from the split through the next winter X7_test = X7_lg[(X7_lg['ds'] <= end_date) & (X7['ds'] > split_date)] # create dummy model X - just a constant average throughout the season X7_dum['ds'] = X7_test['ds'] X7_dum['y'] = X7_train[X7_train['ds'].dt.month.isin(['12','01','02','03','04'])]['y'].mean() # model based on the training data set m_fb = Prophet(yearly_seasonality = True, daily_seasonality=False, weekly_seasonality=False) m_fb.fit(X7_train) # predicts over the training data set forecast_train = m_fb.predict(X7_train) # compute the residual (from the time series modeling) over the training data set residual = X7_train['y'] - forecast_train['yhat'] ## model the Random Forest based on the long-term regressors and the residual regr = RandomForestRegressor(max_leaf_nodes=33, random_state=0) regr.fit(X7_train.drop(['ds', 'y'], axis=1), residual) #### now that we have both models: FB Prophet time series + RF Classifier, let's test # time series forecast_test = m_fb.predict(X7_test) # random forest forecast_rf = regr.predict(X7_test.drop(['ds', 'y'], axis=1)) # combine the forecasts forecast = forecast_test.copy() forecast['yhat'] = forecast['yhat'] + forecast_rf # split out that last winter from the prediction set compare = forecast[(forecast['ds'] <= end_date) & (forecast['ds'] > split_date)] # calculate the mean squared error RMSE = np.sqrt(mean_squared_error(X7_test['y'],compare['yhat'])) #print(yr, compare['yhat'].shape, X7_test['y'].shape) dates.append([compare, X7_test]) #calculate the RMSE for the dummy model RMSE_mean = np.sqrt(mean_squared_error(X7_test['y'], X7_dum['y'])) # append the mean squared error to the list RMSE_tuned.append([RMSE, RMSE_mean, RMSE - RMSE_mean]) print(RMSE_tuned) # + # print the plots from those years of modeling # let's plot the results for 2016-17 plt.plot(dates[0][0]['ds'],dates[0][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[0][1]['ds'],dates[0][1]['y'], color='red', label='Data') # scatter of the test #plt.hlines(dates[0][2]['y'][20682], xmin=dates[0][1]['ds'][20531], xmax=dates[0][1]['ds'][20682], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2016-17 Winter') plt.savefig('Prophet_RF_tune3.jpg') plt.show() # let's plot the results for 2017-2018 plt.plot(dates[1][0]['ds'],dates[1][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[1][1]['ds'],dates[1][1]['y'], color='red', label='Data') # scatter of the test #plt.hlines(dates[1][2]['y'][20682], xmin=dates[1][1]['ds'][20895], xmax=dates[1][1]['ds'][21046], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2017-18 Winter') plt.savefig('Prophet_RF_tune4.jpg') plt.show() # let's plot the results for 2018-19 plt.plot(dates[2][0]['ds'],dates[2][0]['yhat'], color='green', label='Model') # line plot of the model plt.scatter(dates[2][1]['ds'],dates[2][1]['y'], color='red', label='Data') # scatter of the test #plt.hlines(dates[2][2]['y'][20682], xmin=dates[2][1]['ds'][21260], xmax=dates[2][1]['ds'][21411], color='blue', label='Mean') # line plot of the dummy model plt.legend() plt.title('2018-19 Winter') plt.savefig('Prophet_RF_tune5.jpg') plt.show() # - # # I will leave the Model Analysis here for now. # Should I save any of them odels? #
NOTEBOOKS/3.0 - Capstone_Alta_Modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classification Cluster Part 3 # + outputHidden=false inputHidden=false import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings("ignore") # fix_yahoo_finance is used to fetch data import fix_yahoo_finance as yf yf.pdr_override() # + outputHidden=false inputHidden=false stocks_dict = { 'Advanced Micro Devices':'AMD', 'Amazon': 'AMZN', 'Apple': 'AAPL', 'Walgreen': 'WBA', 'Northrop Grumman': 'NOC', 'Boeing': 'BA', 'Lockheed Martin': 'LMT', 'McDonalds': 'MCD', 'Intel': 'INTC', 'Navistar': 'NAV', 'IBM': 'IBM', 'Texas Instruments': 'TXN', 'MasterCard': 'MA', 'Microsoft': 'MSFT', 'General Electrics': 'GE', 'Symantec': 'SYMC', 'American Express': 'AXP', 'Pepsi': 'PEP', 'Coca Cola': 'KO', 'Johnson & Johnson': 'JNJ', 'Toyota': 'TM', 'Honda': 'HMC', 'Mistubishi': 'MSBHY', 'Sony': 'SNE', 'Exxon': 'XOM', 'Chevron': 'CVX', 'Valero Energy': 'VLO', 'Ford': 'F', 'Bank of America': 'BAC', 'Petrobras': 'PBR', 'Vale': 'VALE'} # + outputHidden=false inputHidden=false # Split dict into list of companies and symbols names, symbols = np.array(list(stocks_dict.items())).T # + outputHidden=false inputHidden=false start = '2017-01-01' end = '2017-12-31' print("Downloading values for period %s to %s:" % (start, end)) dataset = [yf.download(symbol,start,end) for symbol in symbols] print("Done!") # + outputHidden=false inputHidden=false market_dates = np.vstack([dataset.index]) open_price = np.array([p["Open"] for p in dataset]).astype(np.float) high_price = np.array([p["High"] for p in dataset]).astype(np.float) low_price = np.array([p["Low"] for p in dataset]).astype(np.float) close_price = np.array([p["Adj Close"] for p in dataset]).astype(np.float) volume_price = np.array([p["Volume"] for p in dataset]).astype(np.float) # + outputHidden=false inputHidden=false # Calculate percent change X = (close_price - open_price) / open_price # + outputHidden=false inputHidden=false # The daily variations of the quotes are what carry most information variation = close_price - open_price # + outputHidden=false inputHidden=false from sklearn import cluster, covariance, manifold # Learn a graphical structure from the correlations edge_model = covariance.GraphLassoCV() # standardize the time series: using correlations rather than covariance # is more efficient for structure recovery X = variation.copy().T X /= X.std(axis=0) edge_model.fit(X) # + outputHidden=false inputHidden=false # Cluster using affinity propagation _, labels = cluster.affinity_propagation(edge_model.covariance_) n_labels = labels.max() for i in range(n_labels + 1): print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i]))) # + outputHidden=false inputHidden=false # Find a low-dimension embedding for visualization: find the best position of # the nodes (the stocks) on a 2D plane # We use a dense eigen_solver to achieve reproducibility (arpack is # initiated with random vectors that we don't control). In addition, we # use a large number of neighbors to capture the large-scale structure. node_position_model = manifold.LocallyLinearEmbedding(n_components=2, eigen_solver='dense', n_neighbors=6) embedding = node_position_model.fit_transform(X.T).T # + outputHidden=false inputHidden=false # ############################################################################# import matplotlib.cm as cm cmap = cm.get_cmap("Spectral") # Visualization plt.figure(1, facecolor='w', figsize=(10, 8)) plt.clf() ax = plt.axes([0., 0., 1., 1.]) plt.axis('off') # Display a graph of the partial correlations partial_correlations = edge_model.precision_.copy() d = 1 / np.sqrt(np.diag(partial_correlations)) partial_correlations *= d partial_correlations *= d[:, np.newaxis] non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02) # Plot the nodes using the coordinates of our embedding plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels, cmap=cmap) # Plot the edges start_idx, end_idx = np.where(non_zero) # a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[embedding[:, start], embedding[:, stop]] for start, stop in zip(start_idx, end_idx)] values = np.abs(partial_correlations[non_zero]) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, .7 * values.max())) lc.set_array(values) lc.set_linewidths(15 * values) ax.add_collection(lc) # Add a label to each node. The challenge here is that we want to # position the labels to avoid overlap with other labels for index, (name, label, (x, y)) in enumerate( zip(names, labels, embedding.T)): dx = x - embedding[0] dx[index] = 1 dy = y - embedding[1] dy[index] = 1 this_dx = dx[np.argmin(np.abs(dy))] this_dy = dy[np.argmin(np.abs(dx))] if this_dx > 0: horizontalalignment = 'left' x = x + .002 else: horizontalalignment = 'right' x = x - .002 if this_dy > 0: verticalalignment = 'bottom' y = y + .002 else: verticalalignment = 'top' y = y - .002 plt.text(x, y, name, size=10, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, bbox=dict(facecolor='w', edgecolor=cmap(label / float(n_labels)), alpha=.6)) plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(), embedding[0].max() + .10 * embedding[0].ptp(),) plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(), embedding[1].max() + .03 * embedding[1].ptp()) plt.show()
Stock_Algorithms/Classification_Cluster_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/jgraving/deepposekit/blob/master/examples/step1_create_annotation_set.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # DeepPoseKit Step 1 - Create an annotation set # # This is step 1 of the example notebooks for using DeepPoseKit. This notebook shows you how to load and sample images from a video, define a keypoint skeleton, and save the data to a file for labelling with keypoints. # # # **NOTE**: If you run into problems, you can help us improve DeepPoseKit by [opening an issue](https://github.com/jgraving/deepposekit/issues/new) or [submitting a pull request](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork) # If you haven't already installed DeepPoseKit you can run the next cell import sys # !{sys.executable} -m pip install -U deepposekit # + import numpy as np import cv2 import h5py import matplotlib.pyplot as plt from deepposekit.io import VideoReader, DataGenerator, initialize_dataset from deepposekit.annotate import KMeansSampler import tqdm import glob import pandas as pd import os from os.path import expanduser try: import google.colab IN_COLAB = True except: IN_COLAB = False # - # Use the next cell to download the example data into your home directory # + # HOME = expanduser("~") if not IN_COLAB else '.' # HOME # - HOME = 'E:\\Work\\github repos\\deepposekit_data_custom' HOME # + # #!git clone https://github.com/jgraving/deepposekit-data {HOME + '/deepposekit-data'} # - # # A note on image resolutions # # Currently DeepPoseKit only supports image resolutions that can be repeatedly divided by 2. For example, all of these values are valid image resolutions for either height or width: # + exp = np.arange(1,12) exp = 2**exp print(1*exp) print(3*exp) print(5*exp) print(7*exp) print(11*exp) # - # Images that do not match these resolutions must be manually resized, cropped, or padded. We are working to add automated image size adjustments. # # Open a video # The `VideoReader` class allows you to load in single video frames or batches of frames from nearly any video format. # + # videos = glob.glob(HOME + '/deepposekit-data/datasets/fly/*.avi') # videos # - videos = glob.glob(HOME + '\\human\\*.mp4') videos video_file_path = '\\human\\' video_file_name = 'pose_human_test.mp4' reader = VideoReader(HOME + video_file_path + video_file_name, gray=True) frame = reader[0] # read a frame reader.close() frame.shape plt.figure(figsize=(5,5)) plt.imshow(frame[0,...,0]) plt.show() # # Sample video frames # This loads batches of 100 frames from the video, and then randomly samples frames from the batches to hold them in memory. You can use any method for sampling frames. # + reader = VideoReader(HOME + video_file_path+ video_file_name, batch_size=100, gray=True) randomly_sampled_frames = [] for idx in tqdm.tqdm(range(len(reader)-1)): batch = reader[idx] random_sample = batch[np.random.choice(batch.shape[0], 10, replace=False)] randomly_sampled_frames.append(random_sample) reader.close() randomly_sampled_frames = np.concatenate(randomly_sampled_frames) randomly_sampled_frames.shape # - # # Apply k-means to reduce correlation # # This applies the k-means algorithm to the images using `KMeansSampler` to even out sampling across the distribution of images and reduce correlation within the annotation set. kmeans = KMeansSampler(n_clusters=10, max_iter=1000, n_init=10, batch_size=100, verbose=True) kmeans.fit(randomly_sampled_frames) kmeans.plot_centers(n_rows=2) plt.show() kmeans_sampled_frames, kmeans_cluster_labels = kmeans.sample_data(randomly_sampled_frames, n_samples_per_label=10) kmeans_sampled_frames.shape # # Define a keypoints skeleton file # You must create a .xlsx or .csv file with keypoint names, parent relationships, and swapping relationships for bilaterally symmetric parts (only relevant if using flipping augmentations). If you leave out the `parent` and `swap` columns, then these will simply not be used for annotating data and training the model. # # See example skeleton.csv files for more details # skeleton = pd.read_csv(HOME + video_file_path + 'skeleton.csv') skeleton # # Initialize a new data set for annotations # # You can use any method for sampling images to create a numpy array with the shape (n_images, height, width, channels) and then initialize an annotation set. Check the doc string for more details: # + # initialize_dataset? # - initialize_dataset( images=kmeans_sampled_frames, datapath=HOME + video_file_path + 'example_annotation_set.h5', skeleton=HOME + video_file_path + 'skeleton.csv', # overwrite=True # This overwrites the existing datapath ) # # Create a data generator # This creates a `DataGenerator` for loading annotated data. Indexing the generator returns an image-keypoints pair, which you can then visualize. Right now all the keypoints are set to zero, because they haven't been annotated. # # You can also look at the doc string for more explanation: # + # DataGenerator? # + data_generator = DataGenerator(HOME + video_file_path + 'example_annotation_set.h5', mode="full") image, keypoints = data_generator[0] plt.figure(figsize=(5,5)) image = image[0] if image.shape[-1] is 3 else image[0, ..., 0] cmap = None if image.shape[-1] is 3 else 'gray' plt.imshow(image, cmap=cmap, interpolation='none') for idx, jdx in enumerate(data_generator.graph): if jdx > -1: plt.plot( [keypoints[0, idx, 0], keypoints[0, jdx, 0]], [keypoints[0, idx, 1], keypoints[0, jdx, 1]], 'r-' ) plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3) plt.show() # -
examples/step1_create_annotation_set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.5 64-bit # name: python37564bitedcdccb018dd4367a14589b436323787 # --- import numpy as np rand = np.random.RandomState(101) data = np.arange(12).reshape(3,4) data.shape data data[:] import pandas as pd series = pd.Series([0.25,0.5,0.75,1.0],index=['a','b','c','d']) series series['c'] series.keys list(series.items()) series['e']=1.25 series['a':'c'] series[1:3] series.loc['a'] series.loc['a':'d'] series.iloc[1] series.iloc[1:3] # + area = pd.Series({'California': 423967, 'Texas': 695662, 'New York': 141297, 'Florida': 170312, 'Illinois': 149995}) pop = pd.Series({'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135}) dataframe = pd.DataFrame({'area':area,'pop':pop}) # - dataframe['pop'] dataframe['density'] = dataframe['pop']/dataframe['area'] dataframe dataframe.values dataframe.T dataframe.iloc[:3,:2] dataframe.loc[:'New York',:'pop'] dataframe.loc[dataframe.density>100,['pop','density']] #it will add another column name (0,2) dataframe[0,2]=90 # we drop that unwanted column and inplace it. dataframe.drop(columns=[(0,2)],inplace=True) #now this is what I really want to do dataframe.iloc[0,2]=90 ser = pd.Series(rand.randint(0,10,4)) ser df = pd.DataFrame(rand.randint(0,10,(3,4)),columns=['a','b','c','d']) df np.exp(ser) np.sin(df * np.pi/4) area = pd.Series({'Alaska': 1723337, 'Texas': 695662, 'California': 423967}, name='area') population = pd.Series({'California': 38332521, 'Texas': 26448193, 'New York': 19651127}, name='population') population / area population.index | area.index population.index & area.index A = pd.Series([2,3,4],index=[0,1,2]) B = pd.Series([3,4,5],index=[1,2,3]) A + B A.add(B,fill_value=0) A = pd.DataFrame(rand.randint(0,10,(2,2)),columns=list('AB')) A B = pd.DataFrame(rand.randint(0,20,(3,3)),columns=list('BAC')) B A + B #filling the mean value of A # First stacking row of A and then get the mean of it. fill = A.stack().mean() # Apply this fill in fill_values A.add(B,fill_value=fill) A = rand.randint(10,size=(3,4)) A df = pd.DataFrame(A,columns=list('QRST')) df #Row-wise subtraction df - df.iloc[0] #Column-wise substraction df.subtract(df['R'],axis=0) halfrow = df.iloc[0,::2] halfrow df - halfrow
Code/Day61-ipythonExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from astropy.io import fits from astropy.io.fits import HDUList from astropy.wcs import WCS from numpy import arcsinh import matplotlib.pyplot as plt from astropy.visualization import astropy_mpl_style from reproject import reproject_interp import aplpy from aplpy.rgb import make_rgb_cube import matplotlib.pyplot as pyplot from PIL import Image plt.rcParams.update(plt.rcParamsDefault) # %ls -l data/ fits_files = ['./data/frame-i-000756-2-0427.fits', './data/frame-r-000756-2-0427.fits', './data/frame-g-000756-2-0427.fits'] output = './data/rgb-000756-2-0427.fits' make_rgb_cube(fits_files, output) # + # Make an RGB image aplpy.make_rgb_image('rgb-000756-2-0427.fits', 'rgb-000756-2-0427.png', stretch_r='arcsinh', stretch_g='arcsinh', stretch_b='arcsinh') # Plot the RGB image using the 2d image to indicate the projection f = aplpy.FITSFigure('rgb-000756-2-0427_2d.fits') f.show_rgb('rgb-000756-2-0427.png') # - twod_fits = fits.open('rgb-000756-2-0427_2d.fits') twod_fits[0].header # %ls -l a = fits.open('rgb-000756-2-0427.fits') a_hdu = a[0] a_header = a_hdu.header a_data = a_hdu.data a_header a_header['NAXIS'] = 2 # + #a_data = np.nan_to_num(a_data + a_data.min()) - a_data.min() # a_data[0] = convolve(a_data[0], kernel) # a_data[1] = convolve(a_data[1], kernel) # a_data[2] = convolve(a_data[2], kernel) # - n, x_shape, y_shape = a_data.shape # + def modify_image(data, gains=[0.9,1.1,1.8], gamma=0.01): R_IDX = 0 G_IDX = 1 B_IDX = 2 r = data[:,:,R_IDX].copy() g = data[:,:,G_IDX].copy() b = data[:,:,B_IDX].copy() r = (r * gains[R_IDX]).astype(np.uint8) g = (g * gains[G_IDX]).astype(np.uint8) b = (b * gains[B_IDX]).astype(np.uint8) r += (gamma * (r - g)).astype(np.uint8) b += (gamma * (b - g)).astype(np.uint8) r[r < 0] = 0 r[r > 255] = 255 g[g < 0] = 0 g[g > 255] = 255 b[b < 0] = 0 b[b > 255] = 255 result = np.empty(data.shape, dtype=np.uint8) result[:,:,0] = r result[:,:,1] = g result[:,:,2] = b return result def scale_rgb(data, sigma=10, min=0, max=1, gains=[0.9,1.1,1.8], gamma=0.1): R_IDX = 0 G_IDX = 1 B_IDX = 2 if min < 0: data = data - min max = max - min min = 0 r = data[R_IDX].copy() g = data[G_IDX].copy() b = data[B_IDX].copy() slope = 255 / arcsinh((max - min)/sigma) mean = (r + g + b)/3 mean[mean < min] = 0 r[mean == 0] = 0 g[mean == 0] = 0 b[mean == 0] = 0 scale = slope * arcsinh((mean - min) / sigma) / mean r = (r * scale).astype(int) g = (g * scale).astype(int) b = (b * scale).astype(int) r = (r * gains[R_IDX]).astype(int) g = (g * gains[G_IDX]).astype(int) b = (b * gains[B_IDX]).astype(int) r += (gamma * (r - g)).astype(int) b += (gamma * (b - g)).astype(int) r[r < 0] = 0 r[r > 255] = 255 g[g < 0] = 0 g[g > 255] = 255 b[b < 0] = 0 b[b > 255] = 255 result = np.empty(data.shape, dtype=np.uint8) result[0] = r result[1] = g result[2] = b return result # - a_data.min() a_data.max() a_data.shape # + nan_data = a_data.copy() nan_data[np.logical_not(np.isnan(nan_data))] = 0 nan_data[np.isnan(nan_data)] = 1 #print(nan_data.shape) output = np.empty((2050,1503,3)) output[:,:,0] = nan_data[0] output[:,:,1] = nan_data[1] output[:,:,2] = nan_data[2] # + #r, g, b = scale_rgb(a_data/a_data.max(), min=0, max=1) # - r = a_data[0] g = a_data[1] b = a_data[2] r.shape rgb_default = make_lupton_rgb(r, g, b, filename='rgb-000756-2-0427.png', Q=8, stretch=0.4) plt.figure(figsize=(20,40)) plt.grid() plt.imshow(rgb_default, origin='lower') # + def cutout(data, wcs, ra, dec, x_size=100, y_size=100): x_centre, y_centre = world2pix(ra, dec, wcs) x_top = int(round(x_centre) - x_size/2) y_top = int(round(y_centre) - y_size/2) x_bottom = x_top + x_size y_bottom = y_top + y_size return data[y_top:y_bottom, x_top:x_bottom] def world2pix(ra, dec, wcs): coords = np.array([[ra, dec]]) location = wcs.wcs_world2pix(coords, 0, ra_dec_order=True) return location[0][0], location[0][1] # + r_cutout = cutout(r, WCS(a_header), 179.689293428, -0.454379058, x_size=224, y_size=224) g_cutout = cutout(g, WCS(a_header), 179.689293428, -0.454379058, x_size=224, y_size=224) b_cutout = cutout(b, WCS(a_header), 179.689293428, -0.454379058, x_size=224, y_size=224) r_cutout[r_cutout < 0] = 0 g_cutout[g_cutout < 0] = 0 b_cutout[b_cutout < 0] = 0 vmax = max(r.max(), g.max(), b.max()) # - rgb_cutout = make_lupton_rgb(r_cutout, g_cutout, b_cutout, filename='rgb-cutout-000756-2-0427.png', Q=8, stretch=0.5) #plt.figure(figsize=(5,5)) plt.grid(False) plt.imshow(rgb_cutout, origin='lower') # + from astropy.convolution import Gaussian2DKernel from astropy.convolution import convolve kernel = Gaussian2DKernel(x_stddev=0.3) rgb_convoled = rgb_cutout.copy() rgb_convoled[:,:,0] = convolve(rgb_cutout[:,:,0], kernel) rgb_convoled[:,:,1] = convolve(rgb_cutout[:,:,1], kernel) rgb_convoled[:,:,2] = convolve(rgb_cutout[:,:,2], kernel) #plt.figure(figsize=(5,5)) plt.grid(False) plt.imshow(rgb_convoled, origin='lower') # - (rgb_cutout[:,:,1]).shape from astropy.coordinates import SkyCoord import astropy.units as u from astropy.nddata import Cutout2D position = SkyCoord(ra=179.689293428*u.deg, dec=-0.454379058*u.deg, frame='icrs') cutout_size = u.Quantity((25,25), u.arcsec) r_cutout_1 = Cutout2D(r,position=position,size=cutout_size, wcs=WCS(twod_fits[0].header)) g_cutout_1 = Cutout2D(g,position=position,size=cutout_size, wcs=WCS(twod_fits[0].header)) b_cutout_1 = Cutout2D(b,position=position,size=cutout_size, wcs=WCS(twod_fits[0].header)) # + # r_cutout_data = convolve(r_cutout_1.data, kernel) # g_cutout_data = convolve(g_cutout_1.data, kernel) # b_cutout_data = convolve(b_cutout_1.data, kernel) r_cutout_data = r_cutout_1.data g_cutout_data = g_cutout_1.data b_cutout_data = b_cutout_1.data r_cutout_data[r_cutout_data < 0] = 0 g_cutout_data[g_cutout_data < 0] = 0 b_cutout_data[b_cutout_data < 0] = 0 rgb = np.empty((3, r_cutout_data.shape[0], r_cutout_data.shape[1])) print(rgb.shape) rgb[0] = r_cutout_data rgb[1] = g_cutout_data rgb[2] = b_cutout_data #vmax = rgb.max() #modified_cutout = scale_rgb(rgb,sigma=1/10,max=vmax) modified_cutout = make_lupton_rgb(rgb[0]/vmax,rgb[1]/vmax,rgb[2]/vmax, filename='rgb-cutout-000756-2-0427.png', Q=0.00001, stretch=0.2) # rgb_cutout = make_lupton_rgb(r_cutout_data/vmax, g_cutout_data/vmax, b_cutout_data/vmax, filename='rgb-cutout-000756-2-0427.png', Q=10, stretch=0.2) # modified_cutout[:,:,0] = convolve(modified_cutout[:,:,0], kernel) # modified_cutout[:,:,1] = convolve(modified_cutout[:,:,1], kernel) # modified_cutout[:,:,2] = convolve(modified_cutout[:,:,2], kernel) plt.figure(figsize=(5,5)) plt.grid(False) plt.imshow(modified_cutout, origin='lower') # + from PIL import Image from PIL import ImageFilter #modified_cutout.dtype rescaled_image = Image.fromarray(modified_cutout).resize((512,512), Image.BICUBIC).transpose(Image.FLIP_TOP_BOTTOM) rescaled_image = rescaled_image.filter(ImageFilter.GaussianBlur()) plt.figure(figsize=(10,10)) plt.grid(False) plt.imshow(rescaled_image) # - rescaled_image.save('rescaled_cutout.jpeg') vmax wcs = WCS(a_header) ra, dec, petroRad = (179.686410, -0.602918, 3.719159) # + import numpy as np petroRad = 3.719159 nRp = 5 # 2 x 3Rp. 2Rp can still miss some of elliptical galaxies minScaling = 0.9 rotationFactor = np.sqrt(2) angularsize = nRp * rotationFactor / minScaling * petroRad angularsize # - from astropy.nddata import Cutout2D from astropy.coordinates import SkyCoord, ICRS import astropy.units as u from astropy.units import Quantity coords = SkyCoord(ra=ra, dec=dec, frame=ICRS, unit=u.deg) size = Quantity(angularsize, unit=u.arcsec) cutout_r = Cutout2D(r, position=coords, size=size, wcs=wcs).data cutout_g = Cutout2D(g, position=coords, size=size, wcs=wcs).data cutout_b = Cutout2D(b, position=coords, size=size, wcs=wcs).data cutout = np.empty((3, cutout_r.shape[0], cutout_r.shape[1])) cutout[0] = cutout_r cutout[1] = cutout_g cutout[2] = cutout_b # + scaled_data = scale_rgb(cutout, max=np.max(cutout), sigma=1/2.5) plt.imshow(np.moveaxis(scaled_data, 0, -1), interpolation='bicubic', origin='lower') # - def resize(data, size): output = np.empty((3, size, size)) output[0] = np.array(Image.fromarray(data[0]).resize((size, size), Image.BICUBIC)) output[1] = np.array(Image.fromarray(data[1]).resize((size, size), Image.BICUBIC)) output[2] = np.array(Image.fromarray(data[2]).resize((size, size), Image.BICUBIC)) return output img64 = resize(cutout, 64) img64.shape # + scaled_img64 = scale_rgb(img64, max=np.max(img64), sigma=1/2.5) plt.imshow(np.moveaxis(scaled_img64, 0, -1), interpolation='bicubic', origin='lower') # - resultant_cutoutsize = int(cutout_b.shape[0] / rotationFactor) resultant_cutoutsize cutout.shape # + cutout_size = cutout_b.shape[0] start = int((cutout_size - resultant_cutoutsize) / 2) end = start + resultant_cutoutsize imgRes = cutout[:,start:end,start:end] #imgRes = resize(cutout, resultant_cutoutsize) # scale_rgb(cutout, min=np.min(cutout), max=np.max(cutout), sigma=1/2.5) scaled_imgRes = scale_rgb(imgRes, max=np.max(imgRes), sigma=1/2.5) print(np.max(scaled_imgRes)) resized_imgRes = resize(scaled_imgRes/255.0, 224) plt.imshow(np.moveaxis(resized_imgRes, 0, -1), interpolation='bicubic', origin='lower') # -
notebooks/Cutout Scratchpad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Move the cells I used to calculate the tides from "Transport" so just read them in there # + import arrow import datetime import matplotlib.pyplot as plt import os import pandas as pd import xarray as xr # %matplotlib inline # - basedir = '/results2/SalishSea/nowcast-green.201806/' iY = 344; iX = 276; iZ = 10 year = 2018 start = datetime.datetime(year, 1, 1) endtime = datetime.datetime(year, 12, 31) timerange = arrow.Arrow.range('day', start, endtime) for i, day in enumerate(timerange): dir1 = day.format('DDMMMYY').lower() ymd = day.format('YYYYMMDD') filename = 'SalishSea_1h_'+ymd+'_'+ymd+'_grid_U.nc' fullfile = os.path.join(basedir, dir1, filename) u_vel = xr.open_dataset(fullfile) velocity = u_vel['vozocrtx'].isel(y=iY, x=iX).sel(depthu=iZ, method='nearest') u_vel.close() if i == 0: velocity_year = velocity.copy(deep=True) velocity.close() else: velocity_year = xr.concat([velocity_year, velocity], dim='time_counter') velocity.close() if i % 10 == 0: print (i) 2+2 velocity_year.plot() velocity_year.to_netcdf('velocity_2018.nc') # ## Now Low Pass Filter the Velocities ## velocity2015 = xr.open_dataset('velocity_2015.nc') velocity2016 = xr.open_dataset('velocity_2016.nc') velocity2017 = xr.open_dataset('velocity_2017.nc') velocity2018 = xr.open_dataset('velocity_2018.nc') velocity = xr.concat([velocity2015, velocity2016, velocity2017, velocity2018], dim='time_counter') velocity velocity.vozocrtx.plot(); velsquared = velocity * velocity velsquared day_avg_tide_vel = velsquared.resample(time_counter='1D').mean() day_avg_tide_vel.vozocrtx.plot(); day_avg_tide_pd = day_avg_tide_vel.to_dataframe() day_avg_tide_pd = day_avg_tide_pd.drop('depthu', 1) day_avg_tide_pd = day_avg_tide_pd.drop('nav_lat', 1) day_avg_tide_pd = day_avg_tide_pd.drop('nav_lon', 1) day_avg_tide_pd.to_csv('day_avg_tide_pd.csv') day_avg_tide_pd.plot() low_pass_tide = day_avg_tide_pd.rolling(4, center=True).mean() low_pass_tide.to_csv('low_pass_tide.csv') low_pass_tide.plot() velocity2015.close() velocity2016.close() velocity2017.close() velocity2018.close()
notebooks/Ariane/CalculateTides.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=[] # ## DATA 245 Fall 2021 Project # Group 2: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # + from mpl_toolkits.mplot3d import Axes3D from sklearn.preprocessing import StandardScaler import skimage from skimage import transform import cv2 import time import os import matplotlib.pyplot as plt import numpy as np import pandas as pd import os from glob import glob import seaborn as sns from PIL import Image from tqdm import tqdm # - # ### Import Data # + # endpoints (Canaan) folder_benign_train = '../245 Project Testin/dataset1/train/benign' folder_malignant_train = '../245 Project Testin/dataset1/train/malignant' folder_benign_test = '../245 Project Testin/dataset1/test/benign' folder_malignant_test = '../245 Project Testin/dataset1/test/malignant' # + active="" # # endpoints (Rick) # folder_benign_train = 'data/train/benign' # folder_malignant_train = 'data/train/malignant' # # folder_benign_test = 'data/test/benign' # folder_malignant_test = 'data/test/malignant' # + img_size = 200 read = lambda imname: np.asarray(Image.open(imname).convert("RGB")) # Load in training pictures ims_benign = [read(os.path.join(folder_benign_train, filename)) for filename in os.listdir(folder_benign_train)] X_benign = np.array(ims_benign, dtype='uint8') ims_malignant = [read(os.path.join(folder_malignant_train, filename)) for filename in os.listdir(folder_malignant_train)] X_malignant = np.array(ims_malignant, dtype='uint8') # Load in testing pictures ims_benign = [read(os.path.join(folder_benign_test, filename)) for filename in os.listdir(folder_benign_test)] X_benign_test = np.array(ims_benign, dtype='uint8') ims_malignant = [read(os.path.join(folder_malignant_test, filename)) for filename in os.listdir(folder_malignant_test)] X_malignant_test = np.array(ims_malignant, dtype='uint8') # - # ### Preprocess Images # + img_size = (200, 200) ## Resize function def _resize_image(image, target): image = cv2.resize(image, dsize=(target[0], target[1]), interpolation=cv2.INTER_LINEAR) image = cv2.GaussianBlur(image, (5,5),cv2.BORDER_DEFAULT) #image = cv2.blur(image, (50,50),cv2.BORDER_DEFAULT) return image image = [_resize_image(image=i, target=img_size) for i in X_benign] X_benign = np.stack(image, axis=0) image = [_resize_image(image=i, target=img_size) for i in X_malignant] X_malignant = np.stack(image, axis=0) image = [_resize_image(image=i, target=img_size) for i in X_benign_test] X_benign_test = np.stack(image, axis=0) image = [_resize_image(image=i, target=img_size) for i in X_malignant_test] X_malignant_test = np.stack(image, axis=0) # - X_benign.shape X_benign # ### Split into Train and Test # + # Create labels y_benign = np.zeros(X_benign.shape[0]) y_malignant = np.ones(X_malignant.shape[0]) y_benign_test = np.zeros(X_benign_test.shape[0]) y_malignant_test = np.ones(X_malignant_test.shape[0]) # Merge data X_train = np.concatenate((X_benign, X_malignant), axis = 0) y_train = np.concatenate((y_benign, y_malignant), axis = 0) X_test = np.concatenate((X_benign_test, X_malignant_test), axis = 0) y_test = np.concatenate((y_benign_test, y_malignant_test), axis = 0) # - # ### Explore Data # + # Shuffle data s = np.arange(X_train.shape[0]) np.random.shuffle(s) X_train = X_train[s] y_train = y_train[s] s = np.arange(X_test.shape[0]) np.random.shuffle(s) X_test = X_test[s] y_test = y_test[s] # Display first 15 images of moles, and how they are classified w=40 h=30 fig=plt.figure(figsize=(12, 8)) columns = 5 rows = 3 for i in range(1, columns*rows +1): ax = fig.add_subplot(rows, columns, i) if y_train[i] == 0: ax.title.set_text('Benign') else: ax.title.set_text('Malignant') plt.imshow(X_train[i], interpolation='nearest') plt.show() plt.bar(0, y_train[np.where(y_train == 0)].shape[0], label = 'benign') plt.bar(1, y_train[np.where(y_train == 1)].shape[0], label = 'malignant') plt.legend() plt.title("Training Data") plt.show() plt.bar(0, y_test[np.where(y_test == 0)].shape[0], label = 'benign') plt.bar(1, y_test[np.where(y_test == 1)].shape[0], label = 'malignant') plt.legend() plt.title("Test Data") plt.show() X_train = X_train/255. X_test = X_test/255. # - # ### SVM # + # support vector machine classifier #This is only a simple demostration if we apply any models on the training dataset from sklearn.svm import SVC start_SVC = time.time() model = SVC() model.fit(X_train.reshape(X_train.shape[0],-1), y_train) stop_SVC = time.time() train_time_SVC = stop_SVC - start_SVC from sklearn.metrics import accuracy_score, classification_report y_pred = model.predict(X_test.reshape(X_test.shape[0],-1)) svm = accuracy_score(y_test, y_pred) print(f'Accuracy Score: {svm:.3f}') print() print(classification_report(y_test, y_pred)) print(f"Training time: {stop_SVC - start_SVC}s") # - # ### KNN # + # KNN classifier from sklearn.neighbors import KNeighborsClassifier knn_model = KNeighborsClassifier(n_neighbors=3) start_KNN = time.time() knn_model.fit(X_train.reshape(X_train.shape[0],-1), y_train) stop_KNN = time.time() train_time_KNN = stop_KNN - start_KNN from sklearn.metrics import accuracy_score y_pred = knn_model.predict(X_test.reshape(X_test.shape[0],-1)) knn = accuracy_score(y_test, y_pred) print(f'Accuracy Score: {knn:.3f}') print(f"Training time: {stop_KNN - start_KNN}s") # - ## tune best k accus = [] ks = list(range(2,20)) for k in ks: knn_model = KNeighborsClassifier(n_neighbors=k) knn_model.fit(X_train.reshape(X_train.shape[0],-1), y_train) y_pred = knn_model.predict(X_test.reshape(X_test.shape[0],-1)) accus.append(accuracy_score(y_test, y_pred)) fig = plt.figure() plt.plot(ks, accus) plt.xlabel('k in kNN') plt.ylabel('Accuracy') fig.suptitle('kNN hyperparameter (k) tuning', fontsize=20) # ### Import Metrics Libraries from sklearn.metrics import confusion_matrix, classification_report from sklearn.metrics import accuracy_score, recall_score from sklearn.metrics import roc_curve from sklearn.metrics import plot_confusion_matrix # ### Confusion Matrix confmat = pd.DataFrame(confusion_matrix(y_test, y_pred), index =['True[0]','True[1]'], columns=['Predict[0]','Predict[1]']) confmat plot_confusion_matrix(knn_model, X_test.reshape(X_test.shape[0],-1), y_test, cmap=plt.cm.Blues) plt.title('Confustion Matrix') plt.show() # ### ROC Curve import scikitplot as skplt skplt.metrics.plot_roc(y_test, knn_model.predict_proba(X_test.reshape(X_test.shape[0],-1)), plot_micro=False) plt.show() target_names = ['benign', 'malignant'] print(classification_report(y_test, y_pred, target_names=target_names)) # ### Decision Tree / Random Forest # + from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(n_estimators=100, random_state=0) start_RF = time.time() forest.fit(X_train.reshape(X_train.shape[0],-1), y_train) stop_RF = time.time() train_time_RF = stop_RF - start_RF y_pred_rf = forest.predict(X_test.reshape(X_test.shape[0],-1)) rf = accuracy_score(y_test, y_pred_rf) print('Accuracy on the test set:{:.3f}'.format(rf)) print(f"Training time: {stop_RF - start_RF}s") # + from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(random_state=0) start_DT = time.time() dt.fit(X_train.reshape(X_train.shape[0],-1), y_train) stop_DT = time.time() train_time_DT = stop_DT - start_DT y_pred_dt = dt.predict(X_test.reshape(X_test.shape[0],-1)) tree = accuracy_score(y_test, y_pred_dt) print('Accuracy on the test set:{:.3f}'.format(tree)) print(f"Training time: {stop_DT - start_DT}s") # - # ### Side-by-Side # manual import of CNN value (see Data245ProjCNN.ipynb) cnn = 0.870 print(f'SVM Accuracy : {svm:.3f}') print(f'KNN Accuracy : {knn:.3f}') print(f'Decision Tree Accuracy : {tree:.3f}') print(f'Random Forest Accuracy : {rf:.3f}') print(f'CNN (19 epoch) Accuracy : {cnn:.3f}') accuracy = [cnn, svm, rf, knn, tree] acc_name = ["Convolution Neural Network Accuracy", "SVM Accuracy", "Random Forest Accuracy", "KNN Accuracy", "Decision Tree Accuracy"] f, ax = plt.subplots(figsize=(10,6)) ax = plt.bar(acc_name, accuracy, width=0.6, color="#FF5733", edgecolor="black") plt.grid(True) plt.axis(ymin=0.5, ymax=1) plt.tick_params(axis='x', labelrotation=45) plt.ylabel("Accuracy") plt.show() # ### Training Time print(f'SVM Accuracy : {train_time_SVC:.3f}') print(f'KNN Accuracy : {train_time_KNN:.3f}') print(f'Decision Tree Accuracy : {train_time_DT:.3f}') print(f'Random Forest Accuracy : {train_time_RF:.3f}') # ### Save ML Models # Import pickle Package import pickle # + # Save the Modle to file in the current working directory Pkl_Filename_SVM = "Pickle_SVM_Model.pkl" # model (SVM) with open(Pkl_Filename_SVM, 'wb') as file: pickle.dump(model, file) # + # knn_model (KNN) Pkl_Filename_KNN = "Pickle_KNN_Model.pkl" with open(Pkl_Filename_KNN, 'wb') as file: pickle.dump(knn_model, file) # + # forest (Random Forest) Pkl_Filename_RF = "Pickle_FOREST_Model.pkl" with open(Pkl_Filename_RF, 'wb') as file: pickle.dump(forest, file) # + # dt (Decision Tree) Pkl_Filename_DT = "Pickle_TREE_Model.pkl" with open(Pkl_Filename_DT, 'wb') as file: pickle.dump(dt, file) # - # ### Check pickle file on a model # + # Load the Model back from file with open(Pkl_Filename_SVM, 'rb') as file: Pickled_SVM_Model = pickle.load(file) Pickled_SVM_Model # + # Use the Reloaded Model to # Calculate the accuracy score and predict target values # Calculate the Score score = Pickled_SVM_Model.score(X_test.reshape(X_test.shape[0],-1), y_test) # Print the Score print("Test score: {0:.2f} %".format(100 * score)) # + # Predict the Labels using the reloaded Model Ypredict = Pickled_SVM_Model.predict(X_test.reshape(X_test.shape[0],-1)) Ypredict # -
245_Project_Code_Blur.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Q# # language: qsharp # name: iqsharp # --- # # Key Distribution Kata # # **Key Distribution** quantum kata is a series of exercises designed to get you familiar with # the BB84 protocol for quantum key distribution. This protocol allows two parties, Alice and Bob, to share a random secret key. # # * You can find a description of the BB84 protocol [here](https://en.wikipedia.org/wiki/BB84). # * [Short animated video introducing BB84 protocol](https://www.youtube.com/watch?v=UVzRbU6y7Ks). # # Each task is wrapped in one operation preceded by the description of the task. # Your goal is to fill in the blank (marked with the `// ...` comments) # with some Q# code that solves the task. To verify your answer, run the cell using Ctrl/⌘+Enter. # To begin, first prepare this notebook for execution (if you skip this step, you'll get "Syntax does not match any known patterns" error when you try to execute Q# code in the next cells): %package Microsoft.Quantum.Katas::0.10.1911.307 # > The package versions in the output of the cell above should always match. If you are running the Notebooks locally and the versions do not match, please install the IQ# version that matches the version of the `Microsoft.Quantum.Katas` package. # > <details> # > <summary><u>How to install the right IQ# version</u></summary> # > For example, if the version of `Microsoft.Quantum.Katas` package above is 0.1.2.3, the installation steps are as follows: # > # > 1. Stop the kernel. # > 2. Uninstall the existing version of IQ#: # > dotnet tool uninstall microsoft.quantum.iqsharp -g # > 3. Install the matching version: # > dotnet tool install microsoft.quantum.iqsharp -g --version 0.1.2.3 # > 4. Reinstall the kernel: # > dotnet iqsharp install # > 5. Restart the Notebook. # > </details> # # ## Part I. Preparation # ### Task 1.1. Diagonal polarization # # **Input:** $N$ qubits (stored in an array of length $N$). Each qubit is either in $|0\rangle$ or in $|1\rangle$ state. # # **Goal:** Convert the qubits to diagonal polarization: # * if `qs[i]` was in state $|0\rangle$, it should be transformed to $|+\rangle = \frac{1}{\sqrt2}(|0\rangle + |1\rangle)$, # * if `qs[i]` was in state $|1\rangle$, it should be transformed to $|-\rangle = \frac{1}{\sqrt2}(|0\rangle - |1\rangle)$. # + %kata T11_DiagonalPolarization_Test operation DiagonalPolarization (qs : Qubit[]) : Unit { // ... } # - # ### Task 1.2. Equal superposition # # **Input**: A qubit in the $|0\rangle$ state. # # **Goal**: Change the qubit state to a superposition state that has equal probabilities of measuring 0 and 1. # # > Note that this is not the same as keeping the qubit in the $|0\rangle$ state with 50% probability and converting it to the $|1\rangle$ state with 50% probability! # + %kata T12_EqualSuperposition_Test operation EqualSuperposition (q : Qubit) : Unit { // ... } # - # ## Part II. BB84 Protocol # ### Task 2.1. Generate random array # # **Input:** An integer $N$. # # **Output** : A `Bool` array of length N, where each element is chosen at random. # # > This will be used by both Alice and Bob to choose either the sequence of bits to send or the sequence of bases (`false` indicates $|0\rangle$ / $|1\rangle$ basis, and `true` indicates $|+\rangle$ / $|-\rangle$ basis) to use when encoding/measuring the bits. # + %kata T21_RandomArray_Test operation RandomArray (N : Int) : Bool[] { // ... return new Bool[N]; } # - # ### Task 2.2. Prepare Alice's qubits # # **Inputs:** # # 1. `qs`: an array of $N$ qubits in the $|0\rangle$ states, # 2. `bases`: a `Bool` array of length $N$; # `bases[i]` indicates the basis to prepare the i-th qubit in: # * `false`: use $|0\rangle$ / $|1\rangle$ (computational) basis, # * `true`: use $|+\rangle$ / $|-\rangle$ (Hadamard/diagonal) basis. # 3. `bits`: a `Bool` array of length $N$; # `bits[i]` indicates the bit to encode in the i-th qubit: `false` = 0, `true` = 1. # # **Goal:** Prepare the qubits in the described state. # + %kata T22_PrepareAlicesQubits_Test operation PrepareAlicesQubits (qs : Qubit[], bases : Bool[], bits : Bool[]) : Unit { // ... } # - # ### Task 2.3. Measure Bob's qubits # # **Inputs:** # # 1. `qs`: an array of $N$ qubits; # each qubit is in one of the following states: $|0\rangle$, $|1\rangle$, $|+\rangle$, $|-\rangle$. # 2. `bases`: a `Bool` array of length $N$; # `bases[i]` indicates the basis used to prepare the i-th qubit: # * `false`: $|0\rangle$ / $|1\rangle$ (computational) basis, # * `true`: $|+\rangle$ / $|-\rangle$ (Hadamard/diagonal) basis. # # **Output:** Measure each qubit in the corresponding basis and return an array of results # (encoding measurement result `Zero` as `false` and `One` as `true`). # The state of the qubits at the end of the operation does not matter. # + %kata T23_MeasureBobsQubits_Test operation MeasureBobsQubits (qs : Qubit[], bases : Bool[]) : Bool[] { // ... return new Bool[0]; } # - # ### Task 2.4. Generate the shared key! # # **Inputs:** # # 1. `basesAlice` and `basesBob`: `Bool` arrays of length $N$ # describing Alice's and Bobs's choice of bases, respectively; # 2. `measurementsBob`: a `Bool` array of length $N$ describing Bob's measurement results. # # **Output:** a `Bool` array representing the shared key generated by the protocol. # # > Note that you don't need to know both Alice's and Bob's bits to figure out the shared key! # + %kata T24_GenerateSharedKey_Test function GenerateSharedKey (basesAlice : Bool[], basesBob : Bool[], measurementsBob : Bool[]) : Bool[] { // ... return new Bool[0]; } # - # ### Task 2.5. Was communication secure? # # **Inputs:** # # 1. `keyAlice` and `keyBob`: `Bool` arrays of equal length $N$ describing # the versions of the shared key obtained by Alice and Bob, respectively. # 2. `threshold`: an integer between 50 and 100 - the percentage of the key bits that have to match. # # **Output:** `true` if the percentage of matching bits is greater than or equal to the threshold, and `false` otherwise. # + %kata T25_CheckKeysMatch_Test function CheckKeysMatch (keyAlice : Bool[], keyBob : Bool[], threshold : Int) : Bool { // The following lines enforce the constraints on the input that you are given. // You don't need to modify them. Feel free to remove them, this won't cause your code to fail. Fact(Length(keyAlice) == Length(keyBob), "Input arrays should have the same length"); // ... return false; } # - # ### Task 2.6. Putting it all together # # **Goal:** Implement the entire BB84 protocol using tasks 2.1 - 2.5 # and following the comments in the operation template. # # > This is an open-ended task, and is not covered by a unit test. To run the code, execute the cell with the definition of the `Run_BB84Protocol` operation first; if it compiled successfully without any errors, you can run the operation by executing the next cell (`%simulate Run_BB84Protocol`). operation Run_BB84Protocol () : Unit { // 1. Alice chooses a random set of bits to encode in her qubits // and a random set of bases to prepare her qubits in. // ... // 2. Alice allocates qubits, encodes them using her choices and sends them to Bob. // (Note that you can not reflect "sending the qubits to Bob" in Q#) // ... // 3. Bob chooses a random set of bases to measure Alice's qubits in. // ... // 4. Bob measures Alice's qubits in his chosen bases. // ... // 5. Alice and Bob compare their chosen bases and use the bits in the matching positions to create a shared key. // ... // 6. Alice and Bob check to make sure nobody eavesdropped by comparing a subset of their keys // and verifying that more than a certain percentage of the bits match. // For this step, you can check the percentage of matching bits using the entire key // (in practice only a subset of indices is chosen to minimize the number of discarded bits). // ... // If you've done everything correctly, the generated keys will always match, since there is no eavesdropping going on. // In the next section you will explore the effects introduced by eavesdropping. } %simulate Run_BB84Protocol # ## Part III. Eavesdropping # ### Task 3.1. Eavesdrop! # # In this task you will try to implement an eavesdropper, Eve. # # Eve will intercept a qubit from the quantum channel that Alice and Bob are using. # She will measure it in either the $|0\rangle$ / $|1\rangle$ basis or the $|+\rangle$ / $|-\rangle$ basis, # reconstruct the qubit into the original state and send it back to the channel. # Eve hopes that if she properly reconstructs the qubit after measurement she won't be caught! # # **Inputs:** # # 1. `q`: a qubit in one of the following states: $|0\rangle$, $|1\rangle$, $|+\rangle$, $|-\rangle$. # 2. `basis`: Eve's guess of the basis she should use for measuring. # Recall that `false` indicates $|0\rangle$ / $|1\rangle$ basis and `true` indicates $|+\rangle$ / $|-\rangle$ basis. # # **Output:** the bit encoded in the qubit (`false` for $|0\rangle$ / $|+\rangle$ states, `true` for $|1\rangle$ / $|-\rangle$ states). # # In this task you are guaranteed that the basis you're given matches the one # in which the qubit is encoded, that is, if you are given a qubit in state # $|0\rangle$ or $|1\rangle$, you will be given `basis = false`, and if you are given a qubit in state # $|+\rangle$ or $|-\rangle$, you will be given `basis = true`. This is different from a real # eavesdropping scenario, in which you have to guess the basis yourself. # + %kata T31_Eavesdrop_Test operation Eavesdrop (q : Qubit, basis : Bool) : Bool { // ... return false; } # - # ### Task 3.2. Catch the eavesdropper # # Add an eavesdropper into the BB84 protocol from task 2.6. # # Note that now we should be able to detect Eve and therefore we have to discard some of our keys! # # > Similar to task 2.6, this is an open-ended task, and is not covered by a unit test. To run the code, execute the cell with the definition of the `Run_BB84ProtocolWithEavesdropper` operation first; if it compiled successfully without any errors, you can run the operation by executing the next cell (`%simulate Run_BB84ProtocolWithEavesdropper`). operation Run_BB84ProtocolWithEavesdropper () : Unit { // ... } %simulate Run_BB84ProtocolWithEavesdropper
KeyDistribution_BB84/KeyDistribution_BB84.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # URL: http://bokeh.pydata.org/en/latest/docs/gallery/dot.html import holoviews as hv hv.extension('bokeh') # ## Declare data # + factors = ["a", "b", "c", "d", "e", "f", "g", "h"] x = [50, 40, 65, 10, 25, 37, 80, 60] scatter = hv.Scatter((factors, x)) spikes = hv.Spikes(scatter) x = ["foo", "foo", "foo", "bar", "bar", "bar", "baz", "baz", "baz"] y = ["foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"] z = [0, 1, 2, 3, 4, 5, 6, 7, 8] heatmap = hv.HeatMap((x, y, z)) # - # ## Plot # + # %%opts Layout [shared_axes=False] spike_plot = dict(labelled=[], invert_axes=True, color_index=None) spike_style = dict(color='green', line_width=4) scatter_style = dict(size=15, fill_color="orange", line_color="green") heatmap + spikes(plot=spike_plot, style=spike_style) * scatter(style=scatter_style)
examples/gallery/demos/bokeh/dot_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # + [markdown] colab_type="text" id="H4OxaKTdoTEM" # # Pipeline Example with BERT and TensorFlow Extended (TFX) # ![](img/tfx-overview.png) # # Based on the following gist: https://gist.github.com/hanneshapke/e2dd30ece4c778d335e7d3fafd6ce4ff # + [markdown] colab_type="text" id="o18JmOsdRTEw" # ## Motivation # # Instead of converting the input to a tranformer model into token ids on the client side, the model exported from this pipeline will allow the conversion on the server side. # # The pipeline takes advantage of the broad TensorFlow Eco system, including: # * Loading the IMDB dataset via **TensorFlow Datasets** # * Loading a pre-trained model via **tf.hub** # * Manipulating the raw input data with **tf.text** # * Building a simple model architecture with **Keras** # * Composing the model pipeline with **TensorFlow Extended (TFX)**, e.g. TensorFlow Transform, TensorFlow Data Validation and then consuming the tf.Keras model with the latest Trainer component from TFX # # # ### Outline # # * Load the training data set # * Create the TFX Pipeline # * Export the trained Model # * Test the exported Model # + [markdown] colab_type="text" id="Rinax0YJ_otk" # # Project Setup # # ## Install Required Packages # - # !pip install -q pip --upgrade # !pip install -q wrapt --upgrade --ignore-installed # !pip install -q tensorflow==2.1.0 # !pip install -q transformers==2.8.0 # + colab={"base_uri": "https://localhost:8080/", "height": 829} colab_type="code" id="Sjjgiv0bM0hi" outputId="a33dfe60-c206-4bb2-8c3f-0ba4fd90e898" # !pip install -Uq tfx==0.21.4 # !pip install -Uq tensorflow-text==2.1.1 # the tf-text version needs to match the tf version # !pip install -Uq tensorflow-model-analysis==0.22.1 # !pip install -Uq tensorflow-data-validation==0.22.0 # !pip install -Uq tensorflow-transform==0.22.0 # !pip install -Uq tensorflow_hub==0.8.0 # !pip install -Uq tensorflow_datasets==3.2.1 # - # # _Ignore ERRORs ^^ ABOVE ^^_ # + # Restart the kernel to pick up pip installed libraries from IPython.core.display import HTML HTML("<script>Jupyter.notebook.kernel.restart()</script>") # + colab={} colab_type="code" id="oThi-x8xLlv-" import glob import os import pprint import re import tempfile from shutil import rmtree import numpy as np import pandas as pd import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_model_analysis as tfma import tensorflow_transform as tft import tensorflow_transform.beam as tft_beam from tensorflow_transform.beam.tft_beam_io import transform_fn_io from tensorflow_transform.saved import saved_transform_io from tensorflow_transform.tf_metadata import dataset_metadata, dataset_schema, metadata_io, schema_utils from tfx.components import ( Evaluator, ExampleValidator, ImportExampleGen, ModelValidator, Pusher, ResolverNode, SchemaGen, StatisticsGen, Trainer, Transform, ) from tfx.components.base import executor_spec from tfx.components.trainer.executor import GenericExecutor from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.proto import evaluator_pb2, example_gen_pb2, pusher_pb2, trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model, ModelBlessing from tfx.utils.dsl_utils import external_input import tensorflow_datasets as tfds import tensorflow_model_analysis as tfma import tensorflow_text as text from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext # %load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip # + [markdown] colab_type="text" id="q4OMItPSLnDj" # ## Download the IMDB Dataset from TensorFlow Datasets # # For our demo example, we are using the [IMDB data set](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews) to train a sentiment model based on the pre-trained BERT model. The data set is provided through [TensorFlow Datasets](https://www.tensorflow.org/datasets). Our ML pipeline can read TFRecords, however it expects only TFRecord files in the data folder. That is the reason why we need to delete the additional files provided by TFDS. # - # !mkdir -p ./content/tfds/ # + colab={} colab_type="code" id="KjWjnzPGKjIk" def clean_before_download(base_data_dir): rmtree(base_data_dir) def delete_unnecessary_files(base_path): os.remove(base_path + "dataset_info.json") os.remove(base_path + "label.labels.txt") counter = 2 for f in glob.glob(base_path + "imdb_reviews-unsupervised.*"): os.remove(f) counter += 1 print(f"Deleted {counter} files") def get_dataset(name="imdb_reviews", version="1.0.0"): base_data_dir = "./content/tfds/" config = "plain_text" version = "1.0.0" clean_before_download(base_data_dir) tfds.disable_progress_bar() builder = tfds.text.IMDBReviews(data_dir=base_data_dir, config=config, version=version) download_config = tfds.download.DownloadConfig(download_mode=tfds.GenerateMode.FORCE_REDOWNLOAD) builder.download_and_prepare(download_config=download_config) base_tfrecords_filename = os.path.join(base_data_dir, "imdb_reviews", config, version, "") train_tfrecords_filename = base_tfrecords_filename + "imdb_reviews-train*" test_tfrecords_filename = base_tfrecords_filename + "imdb_reviews-test*" label_filename = os.path.join(base_tfrecords_filename, "label.labels.txt") labels = [label.rstrip("\n") for label in open(label_filename)] delete_unnecessary_files(base_tfrecords_filename) return (train_tfrecords_filename, test_tfrecords_filename), labels tfrecords_filenames, labels = get_dataset() # + [markdown] colab_type="text" id="RDtPNfOwriT8" # ## Helper function to load the BERT model as Keras layer # # In our pipeline components, we are reusing the BERT Layer from tf.hub in two places # * in the model architecture when we define our Keras model # * in our preprocessing function when we extract the BERT settings (casing and vocab file path) to reuse the settings during the tokenization # + colab={} colab_type="code" id="Tre_oQu0rlrU" # %%skip_for_export # %%writefile bert.py import tensorflow_hub as hub BERT_TFHUB_URL = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2" def load_bert_layer(model_url=BERT_TFHUB_URL): # Load the pre-trained BERT model as layer in Keras bert_layer = hub.KerasLayer(handle=model_url, trainable=True) return bert_layer # + [markdown] colab_type="text" id="k-5QGnm_lFJD" # # TFX Pipeline # # The TensorFlow Extended Pipeline is more or less following the example setup shown here. We'll only note deviations from the original setup. # + [markdown] colab_type="text" id="arPCEBYEFqEr" # ## Initializing the Interactive TFX Pipeline # + colab={} colab_type="code" id="sBO0T3D5kkOt" context = InteractiveContext() # + [markdown] colab_type="text" id="qo2Q-c_ynL2x" # ## Loading the dataset # + colab={} colab_type="code" id="W8GqUHwAKm6j" output = example_gen_pb2.Output( split_config=example_gen_pb2.SplitConfig( splits=[ example_gen_pb2.SplitConfig.Split(name="train", hash_buckets=45), example_gen_pb2.SplitConfig.Split(name="eval", hash_buckets=5), ] ) ) # Load the data from our prepared TFDS folder examples = external_input("./content/tfds/imdb_reviews/plain_text/1.0.0") example_gen = ImportExampleGen(input=examples, output_config=output) context.run(example_gen) # + colab={} colab_type="code" id="iu2ejZTWK-E5" # %%skip_for_export for artifact in example_gen.outputs["examples"].get(): print(artifact.uri) # + [markdown] colab_type="text" id="nE9VL-pmF6L_" # ## TensorFlow Data Validation # + colab={} colab_type="code" id="EglytaKVLQzr" # %%skip_for_export statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"]) context.run(statistics_gen) context.show(statistics_gen.outputs["statistics"]) # + colab={} colab_type="code" id="IBYoEPhBeQUi" # %%skip_for_export schema_gen = SchemaGen(statistics=statistics_gen.outputs["statistics"], infer_feature_shape=True) context.run(schema_gen) context.show(schema_gen.outputs["schema"]) # + colab={} colab_type="code" id="bl2gkqytjr0w" # %%skip_for_export # check the data schema for the type of input tensors tfdv.load_schema_text(schema_gen.outputs["schema"].get()[0].uri + "/schema.pbtxt") # + colab={} colab_type="code" id="IWRswNYye6So" # %%skip_for_export example_validator = ExampleValidator( statistics=statistics_gen.outputs["statistics"], schema=schema_gen.outputs["schema"] ) context.run(example_validator) context.show(example_validator.outputs["anomalies"]) # + [markdown] colab_type="text" id="_zqjzTx2s5HS" # ## TensorFlow Transform # # This is where we perform the BERT processing. # # + colab={} colab_type="code" id="K91irJq7q6vC" # %%skip_for_export # %%writefile transform.py import tensorflow as tf import tensorflow_text as text from bert import load_bert_layer MAX_SEQ_LEN = 64 # max number is 512 do_lower_case = load_bert_layer().resolved_object.do_lower_case.numpy() def preprocessing_fn(inputs): """Preprocess input column of text into transformed columns of. * input token ids * input mask * input type ids """ CLS_ID = tf.constant(101, dtype=tf.int64) SEP_ID = tf.constant(102, dtype=tf.int64) PAD_ID = tf.constant(0, dtype=tf.int64) vocab_file_path = load_bert_layer().resolved_object.vocab_file.asset_path bert_tokenizer = text.BertTokenizer( vocab_lookup_table=vocab_file_path, token_out_type=tf.int64, lower_case=do_lower_case ) def tokenize_text(text, sequence_length=MAX_SEQ_LEN): """ Perform the BERT preprocessing from text -> input token ids """ # convert text into token ids tokens = bert_tokenizer.tokenize(text) # flatten the output ragged tensors tokens = tokens.merge_dims(1, 2)[:, :sequence_length] # Add start and end token ids to the id sequence start_tokens = tf.fill([tf.shape(text)[0], 1], CLS_ID) end_tokens = tf.fill([tf.shape(text)[0], 1], SEP_ID) tokens = tokens[:, : sequence_length - 2] tokens = tf.concat([start_tokens, tokens, end_tokens], axis=1) # truncate sequences greater than MAX_SEQ_LEN tokens = tokens[:, :sequence_length] # pad shorter sequences with the pad token id tokens = tokens.to_tensor(default_value=PAD_ID) pad = sequence_length - tf.shape(tokens)[1] tokens = tf.pad(tokens, [[0, 0], [0, pad]], constant_values=PAD_ID) # and finally reshape the word token ids to fit the output # data structure of TFT return tf.reshape(tokens, [-1, sequence_length]) def preprocess_bert_input(text): """ Convert input text into the input_word_ids, input_mask, input_type_ids """ input_word_ids = tokenize_text(text) input_mask = tf.cast(input_word_ids > 0, tf.int64) input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) zeros_dims = tf.stack(tf.shape(input_mask)) input_type_ids = tf.fill(zeros_dims, 0) input_type_ids = tf.cast(input_type_ids, tf.int64) return (input_word_ids, input_mask, input_type_ids) input_word_ids, input_mask, input_type_ids = preprocess_bert_input(tf.squeeze(inputs["text"], axis=1)) return { "input_word_ids": input_word_ids, "input_mask": input_mask, "input_type_ids": input_type_ids, "label": inputs["label"], } # + colab={} colab_type="code" id="Sz5cevHYrR6M" transform = Transform( examples=example_gen.outputs["examples"], schema=schema_gen.outputs["schema"], module_file=os.path.abspath("transform.py"), ) context.run(transform) # + [markdown] colab_type="text" id="yCcNJxWSKPPv" # #### Check the Output Data Struture of the TF Transform Operation # + colab={} colab_type="code" id="PcUEGmuhtmGi" from tfx_bsl.coders.example_coder import ExampleToNumpyDict pp = pprint.PrettyPrinter() # Get the URI of the output artifact representing the transformed examples, which is a directory train_uri = transform.outputs["transformed_examples"].get()[0].uri print(train_uri) # Get the list of files in this directory (all compressed TFRecord files) tfrecord_folders = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] tfrecord_filenames = [] for tfrecord_folder in tfrecord_folders: for name in os.listdir(tfrecord_folder): tfrecord_filenames.append(os.path.join(tfrecord_folder, name)) # Create a TFRecordDataset to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") for tfrecord in dataset.take(1): serialized_example = tfrecord.numpy() example = ExampleToNumpyDict(serialized_example) pp.pprint(example) # + [markdown] colab_type="text" id="MdsXSG52kVoL" # ## Training of the Keras Model # + colab={} colab_type="code" id="ywjksr-vtxrX" # %%skip_for_export # %%writefile trainer.py import tensorflow as tf import tensorflow_hub as hub import tensorflow_model_analysis as tfma import tensorflow_transform as tft from tensorflow_transform.tf_metadata import schema_utils from typing import Text import absl import tensorflow as tf from tensorflow import keras import tensorflow_transform as tft from tfx.components.trainer.executor import TrainerFnArgs _LABEL_KEY = "label" BERT_TFHUB_URL = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2" def _gzip_reader_fn(filenames): """Small utility returning a record reader that can read gzip'ed files.""" return tf.data.TFRecordDataset(filenames, compression_type="GZIP") def load_bert_layer(model_url=BERT_TFHUB_URL): # Load the pre-trained BERT model as layer in Keras bert_layer = hub.KerasLayer(handle=model_url, trainable=False) # model can be fine-tuned return bert_layer def get_model(tf_transform_output, max_seq_length=64, num_labels=2): # dynamically create inputs for all outputs of our transform graph feature_spec = tf_transform_output.transformed_feature_spec() feature_spec.pop(_LABEL_KEY) inputs = { key: tf.keras.layers.Input(shape=(max_seq_length), name=key, dtype=tf.int64) for key in feature_spec.keys() } input_word_ids = tf.cast(inputs["input_word_ids"], dtype=tf.int32) input_mask = tf.cast(inputs["input_mask"], dtype=tf.int32) input_type_ids = tf.cast(inputs["input_type_ids"], dtype=tf.int32) bert_layer = load_bert_layer() pooled_output, _ = bert_layer([input_word_ids, input_mask, input_type_ids]) # Add additional layers depending on your problem x = tf.keras.layers.Dense(256, activation="relu")(pooled_output) dense = tf.keras.layers.Dense(64, activation="relu")(x) pred = tf.keras.layers.Dense(1, activation="sigmoid")(dense) keras_model = tf.keras.Model( inputs=[inputs["input_word_ids"], inputs["input_mask"], inputs["input_type_ids"]], outputs=pred ) keras_model.compile( loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), metrics=["accuracy"] ) return keras_model def _get_serve_tf_examples_fn(model, tf_transform_output): """Returns a function that parses a serialized tf.Example and applies TFT.""" model.tft_layer = tf_transform_output.transform_features_layer() @tf.function def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" feature_spec = tf_transform_output.raw_feature_spec() feature_spec.pop(_LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) outputs = model(transformed_features) return {"outputs": outputs} return serve_tf_examples_fn def _input_fn(file_pattern: Text, tf_transform_output: tft.TFTransformOutput, batch_size: int = 32) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: input tfrecord file pattern. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy() dataset = tf.data.experimental.make_batched_features_dataset( file_pattern=file_pattern, batch_size=batch_size, features=transformed_feature_spec, reader=_gzip_reader_fn, label_key=_LABEL_KEY, ) return dataset # TFX Trainer will call this function. def run_fn(fn_args: TrainerFnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 32) eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 32) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = get_model(tf_transform_output=tf_transform_output) model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, ) signatures = { "serving_default": _get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples") ), } model.save(fn_args.serving_model_dir, save_format="tf", signatures=signatures) # + colab={} colab_type="code" id="b4n7fkCbnvHW" # NOTE: Adjust the number of training and evaluation steps TRAINING_STEPS = 50 EVALUATION_STEPS = 50 trainer = Trainer( module_file=os.path.abspath("trainer.py"), custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor), examples=transform.outputs["transformed_examples"], transform_graph=transform.outputs["transform_graph"], schema=schema_gen.outputs["schema"], train_args=trainer_pb2.TrainArgs(num_steps=TRAINING_STEPS), eval_args=trainer_pb2.EvalArgs(num_steps=EVALUATION_STEPS), ) context.run(trainer) # + colab={} colab_type="code" id="LD2kK5XQenDL" model_resolver = ResolverNode( instance_name="latest_blessed_model_resolver", resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ) context.run(model_resolver) # + [markdown] colab_type="text" id="SgH50dYW5C2T" # ## TensorFlow Model Evaluation # + colab={} colab_type="code" id="NVOPbS9Te6MW" eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key="label")], metrics_specs=[ tfma.MetricsSpec( metrics=[tfma.MetricConfig(class_name="ExampleCount")], thresholds={ "binary_accuracy": tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold(lower_bound={"value": 0.5}), change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-10} ), ) }, ) ], slicing_specs=[ # An empty slice spec means the overall slice, i.e. the whole dataset. tfma.SlicingSpec(), ], ) evaluator = Evaluator( examples=example_gen.outputs["examples"], model=trainer.outputs["model"], baseline_model=model_resolver.outputs["model"], eval_config=eval_config, ) context.run(evaluator) # + colab={} colab_type="code" id="CD3Q8gnznAnT" # Check the blessing # !ls {evaluator.outputs['blessing'].get()[0].uri} # + [markdown] colab_type="text" id="5f4Z0vJWOIyk" # ## Model Export for Serving # + colab={} colab_type="code" id="CxxXrsdebY63" # !mkdir ./content/serving_model_dir serving_model_dir = "./content/serving_model_dir" pusher = Pusher( model=trainer.outputs["model"], model_blessing=evaluator.outputs["blessing"], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem(base_directory=serving_model_dir) ), ) context.run(pusher) # + [markdown] colab_type="text" id="WWni3fVVafDa" # ## Test your Exported Model # + colab={} colab_type="code" id="bTi19Ojrbumq" def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) push_uri = pusher.outputs.model_push.get()[0].uri latest_version = max(os.listdir(push_uri)) latest_version_path = os.path.join(push_uri, latest_version) loaded_model = tf.saved_model.load(latest_version_path) example_str = b"This is the finest show ever produced for TV. Each episode is a triumph. The casting, the writing, the timing are all second to none. This cast performs miracles." example = tf.train.Example(features=tf.train.Features(feature={"text": _bytes_feature(example_str)})) serialized_example = example.SerializeToString() f = loaded_model.signatures["serving_default"] print(f(tf.constant([serialized_example]))) # + colab={} colab_type="code" id="Y-Yr3qVov33c" def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) push_uri = pusher.outputs.model_push.get()[0].uri latest_version = max(os.listdir(push_uri)) latest_version_path = os.path.join(push_uri, latest_version) loaded_model = tf.saved_model.load(latest_version_path) example_str = b"I loved it!" example = tf.train.Example(features=tf.train.Features(feature={"text": _bytes_feature(example_str)})) serialized_example = example.SerializeToString() f = loaded_model.signatures["serving_default"] print(f(tf.constant([serialized_example]))) # + colab={} colab_type="code" id="mRpBI4Ojw_hT" def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) push_uri = pusher.outputs.model_push.get()[0].uri latest_version = max(os.listdir(push_uri)) latest_version_path = os.path.join(push_uri, latest_version) loaded_model = tf.saved_model.load(latest_version_path) example_str = b"It's OK." example = tf.train.Example(features=tf.train.Features(feature={"text": _bytes_feature(example_str)})) serialized_example = example.SerializeToString() f = loaded_model.signatures["serving_default"] print(f(tf.constant([serialized_example]))) # + colab={} colab_type="code" id="Fxy84A2sxAba" def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) push_uri = pusher.outputs.model_push.get()[0].uri latest_version = max(os.listdir(push_uri)) latest_version_path = os.path.join(push_uri, latest_version) loaded_model = tf.saved_model.load(latest_version_path) example_str = b"The worst product ever." example = tf.train.Example(features=tf.train.Features(feature={"text": _bytes_feature(example_str)})) serialized_example = example.SerializeToString() f = loaded_model.signatures["serving_default"] print(f(tf.constant([serialized_example]))) # - print("Model has been exported to {}".format(pusher.outputs.model_push.get()[0].uri)) for path in os.walk("{}/".format(pusher.outputs.model_push.get()[0].uri)): print(path[0]) # + language="javascript" # Jupyter.notebook.save_checkpoint() # Jupyter.notebook.session.delete(); # -
10_pipeline/tfx/01_Create_Pipeline_Train_and_Deploy_Reviews_BERT_TensorFlow_TFX.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Penguin Random House Dataset # The data were collected in January 2022 from [Penguin Random House](https://www.penguinrandomhouse.com/) via [API](https://developer.penguinrandomhouse.com/page). # # The collected data were parsed and saved [in this folder](data_interm) (see details [here](downloading.ipynb)). # + tags=[] import os import sys # Append sys.path with the project root path sys.path.append(os.path.dirname(os.path.abspath(''))) # + pycharm={"name": "#%%\n"} import warnings import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from IPython.display import Image from bs4 import BeautifulSoup from treelib import Tree from utils.preprocessing import get_weighted_rating # Suppress warnings warnings.filterwarnings('ignore', category=UserWarning, module='bs4') # To print many rows pd.set_option('display.max_rows', 100) # To wrap long values pd.set_option('display.max_colwidth', 78) # To use seaborn style for plots sns.set_style('darkgrid', {'axes.facecolor': '.9'}) sns.set_context('talk') sns_palette = sns.color_palette(plt.cm.get_cmap('tab20c').colors) # + [markdown] tags=[] # ## Categories # + [markdown] pycharm={"name": "#%% md\n"} # Books are assigned to one or more categories identified by a numeric `cat_id`. Each category is part of a larger category set and may be arranged in relation to other categories in the set to form a hierarchy. # + pycharm={"name": "#%%\n"} # Load categories info path_cats = os.path.join('data_interm', 'categories.txt') # Read data categories = pd.read_json(path_cats, lines=True) # Set the proper types categories['parent'] = categories['parent'].astype('Int64') categories['cat_id'] = categories['cat_id'].astype('Int64') # Set index categories.set_index('cat_id', inplace=True) # Change the name of the top category categories.loc[2000000000, 'menu_text'] = 'Consumer Category' # Show categories.head(10) # - # Each category has the following [properties](https://developer.penguinrandomhouse.com/docs/read/enhanced_prh_api/concepts/Categories): # - `cat_id`, a number that uniquely identifies this category # - `menu_text`, a brief description of the category, suitable for use in a navigational menu # - `description`, the description of the category, suitable for display # - `parent`, it is a `cat_id` of the parent category # + pycharm={"name": "#%%\n"} # If there are NaNs mask = categories.isna().any(axis=1) categories[mask] # This is ok, because the highest level doesn't have a parent # + # Visualize the hierarchy tree = Tree() for index, row in categories.iterrows(): # If this category is the top if pd.isna(row.parent): tree.create_node(row.menu_text, index) continue # Child categories tree.create_node(row.menu_text, index, parent=row.parent) tree.show() # + [markdown] pycharm={"name": "#%% md\n"} tags=[] # ## Contributor Roles # + [markdown] pycharm={"name": "#%% md\n"} # In PRH, every ISBN has up to five people mapped to it and each mapping is qualified with a contributor role code that identifies the type of contribution that person made to the ISBN. # # Beside of the role code and description, there is a sequence value that [is used](https://developer.penguinrandomhouse.com/docs/read/enhanced_prh_api/concepts/Contributor_role) to prioritize certain roles over others. This sequencing is used to order contributors within a work or ISBN. # + pycharm={"name": "#%%\n"} # Load roles path_roles = os.path.join('data_interm', 'contributor_roles.txt') roles = pd.read_json(path_roles, lines=True) # Rename code to `role_id` roles.rename(columns={'code': 'role_id'}, inplace=True) roles.set_index('role_id', inplace=True) roles.head(10) # + [markdown] tags=[] toc-hr-collapsed=true # ## Books # + # Load books info path_books = os.path.join('data_interm', 'books.txt') books = pd.read_json(path_books, lines=True) # Set `isbn` column as index books.set_index('isbn', inplace=True) books.head(2) # - books.dtypes # Let's analyze each column individually. # + [markdown] tags=[] # ### Onsale # - # Values of the column `onsale` are dates when books began to be sold. So, convert them to the appropriate type: # If there are NaNs books['onsale'].isna().any() # + # Convert to datetime books['onsale'] = pd.to_datetime(books['onsale']) # Rename books.rename(columns={'onsale': 'onsale_date'}, inplace=True) # - # Show the distribution _, ax = plt.subplots(figsize=(10, 8)) sns.histplot(x=books['onsale_date'], palette=sns_palette, ax=ax, bins=100) ax.set_ylabel('Number of books') ax.set_xlabel('Year of starting selling') ax.set_title('Distribution of dates when books started to be sold') plt.show() # + # Show the newest and the oldest books books_sorted_onsale = books.sort_values('onsale_date') # The oldest books books_sorted_onsale[['title', 'onsale_date']].head(5) # - # The newest (their selling will start soon) books_sorted_onsale[['title', 'onsale_date']].tail(5) # + [markdown] tags=[] # ### Language # - # The column `language` contains book languages. # There are books in different languages: books['language'].value_counts(dropna=False) # The majority of books, of course, in English. Convert the language codes to their full names: # + languages = { 'E': 'English', 'SP': 'Spanish', 'FR': 'French', 'JA': 'Japanese', 'CH': 'Chinese', 'IT': 'Italian', 'LA': 'Latin', } books['language'].replace(languages, inplace=True) # + [markdown] tags=[] # ### Cover # - # These values are just images of book covers: # Look at a random image Image(url=books.loc[9780375806131, 'cover']) # + [markdown] tags=[] # ### Title # - # Book titles. Check if there are missing titles: books['title'].isna().any() # + [markdown] tags=[] # ### Price # - # The dataset contains `price` information in different currencies: books['price'].head(5) book_price = books[['price']].explode('price') # However, there are some text NaNs: book_price['price'].isna().any() # Drop nans and convert from dict to data frame book_price = book_price.dropna().reset_index() book_price = book_price.drop('price', axis=1)\ .join(pd.DataFrame(book_price['price'].tolist())) book_price.head(10) # Empty column book_price['pricingType'].isna().all() # Get pivot table with prices in different currencies book_price = book_price.pivot(index='isbn', columns='currencyCode', values='amount') book_price.head(5) # All USD values are present book_price['USD'].isna().any() # + # Join prices with the main dataset # Keep only USD books = books.join(book_price[['USD']]) # Drop the old column with price books.drop(columns=['price'], inplace=True) # Rename USD to price books.rename(columns={'USD': 'price'}, inplace=True) # - # Show the distribution of the prices books['price'].describe() # Show the distribution of the prices _, ax = plt.subplots(figsize=(10, 8)) sns.histplot(x=books['price'], bins=100, palette=sns_palette, ax=ax) ax.set_xlim(0, 75) ax.set_ylabel('Number of books') ax.set_xlabel('Price') ax.set_title('Distribution of book prices in USD') plt.show() # Show the most expensive books books_price_sorted = books.sort_values(['price'], ascending=False) books_price_sorted[['title', 'price']].head(10) # + [markdown] tags=[] # ### Format # - # Here, there are several book formats: books['format_family'].value_counts(dropna=False) # They are already well written. There is nothing to improve here, just to rename: books.rename(columns={'format_family': 'format'}, inplace=True) # + [markdown] tags=[] # ### Pages and Minutes # - # There are two columns considering length of book: pages and projected_minutes (for audiobooks): books[['pages', 'projected_minutes', 'format']].head() audio_mask = books['format'] == 'Audio' audio_books = books.loc[audio_mask, ['title', 'pages', 'projected_minutes']] nonaudio_books = books.loc[~audio_mask, ['title', 'pages', 'projected_minutes']] # There are NaNs audio_books['projected_minutes'].isna().any() # There are NaNs nonaudio_books['pages'].isna().any() # Prove that audiobooks have no pages audio_books['pages'].isna().all() # Prove that non-audio books have no minutes nonaudio_books['projected_minutes'].isna().all() # Show the longest non-audio books nonaudio_books.sort_values('pages', ascending=False).head(10) # Show the longest audiobooks audio_books.sort_values('projected_minutes', ascending=False).head(10) # + [markdown] tags=[] # ### Flapcopy and Excerpt # - # There are several columns with text description: # # - `flapcopy`, the brief summary that often appears on the inside of a hardcover book's dust jacket. # - `excerpt`, a fragment from the book. books[['flapcopy', 'excerpt']].head(5) # The values contain a lot of HTML tags. Let's remove them: # + def clean_text(text: str) -> str: """Clean text from HTML tags.""" return BeautifulSoup(text, 'lxml').getText()\ .replace(u'\xa0', ' ').strip() for col in ['flapcopy', 'excerpt']: nans = books[col].isna() books.loc[~nans, col] = books.loc[~nans, col].apply(clean_text) # - # Show how many descriptions are absent for col in ['flapcopy', 'excerpt']: nans_count = books[col].isna().sum() print(f'{round(100 * nans_count / len(books))}% of column {col} are absent.') # + [markdown] tags=[] # ### Publisher # - # The column contains book publishers. books['publisher'].head(5) # Parse dicts book_publisher = books[['publisher']].dropna().reset_index() book_publisher = book_publisher.drop('publisher', axis=1)\ .join(pd.DataFrame(book_publisher['publisher'].tolist())) book_publisher.set_index('isbn', inplace=True) book_publisher.head(5) # If there are any NaNs book_publisher.isna().any() # The most frequent publishers book_publisher['description'].value_counts().head(10) # + # Join parsed publishers with the main dataframe # Keep only publisher's code books = books.join(book_publisher[['code']]) # Drop the old column with publishers books.drop(columns=['publisher'], inplace=True) # Rename publisher code to publisher_id books.rename(columns={'code': 'publisher_id'}, inplace=True) # - # Move publisher data to a separate dataframe publishers = book_publisher.drop_duplicates('code')\ .rename(columns={'code': 'publisher_id', 'description': 'name'})\ .set_index('publisher_id') publishers.head(5) # + [markdown] tags=[] # ### Series # - # Let's extract the series information. Each row contains a `series_number` and info about `series` itself. # + # Most of the books are not in any series no_series = books['series_number'].isna().sum() print(f'{no_series} of {len(books)} books are not part of any series.') # Convert series_number to int data type books['series_number'] = books['series_number'].astype('Int32') # - books[['series', 'series_number']].head(5) # Move information about series in a separate dataframe book_series = books[['series']].explode('series').dropna().reset_index() book_series = book_series.drop('series', axis=1)\ .join(pd.DataFrame(book_series['series'].tolist())) book_series.set_index('isbn', inplace=True) book_series.head(5) # Drop HTML tags from description description_nans = book_series['description'].isna() book_series.loc[~description_nans, 'description'] = \ book_series.loc[~description_nans, 'description'].apply(clean_text) # + # Save only `series_id` in the main dataset books = books.join(book_series[['series_id']]) # Drop series column books.drop(columns=['series'], inplace=True) # Rename books.rename(columns={'series_number': 'number_in_series'}, inplace=True) # + # Move series data to a separate dataframe series = book_series.drop_duplicates('series_id').set_index('series_id') series.head(5) # - # How many unique series exists len(series) # Show series with the largest number of books series.sort_values('series_count', ascending=False).head(10) # + [markdown] pycharm={"name": "#%% md\n"} tags=[] toc-hr-collapsed=true # ### Contributors # - # The column `authors` may contain illustrators, photographer in addition to the main author. books['authors'].head(5) # Move information about authors in a separate dataframe book_authors = books[['authors']].explode('authors').dropna().reset_index() book_authors = book_authors.drop('authors', axis=1)\ .join(pd.DataFrame(book_authors['authors'].tolist())) book_authors.set_index('isbn', inplace=True) book_authors.head(5) # Since a book may have several authors, we need a dataframe for mapping book-authors and another one for author information. # Dataframe for authors info authors = book_authors[['author_id', 'first_name', 'last_name', 'company', 'client_source_id']]\ .drop_duplicates('author_id').set_index('author_id') authors.head(5) # Dataframe for mapping books and authors book_authors = book_authors[['author_id', 'role']].reset_index() book_authors.rename(columns={'role': 'role_id'}, inplace=True) book_authors.head(5) # Drop authors from books books.drop(columns='authors', inplace=True) # + [markdown] pycharm={"name": "#%% md\n"} # In some cases, we can see the same information about authors with different ids. For example: # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} authors.loc[[159086, 20334]] # + [markdown] pycharm={"name": "#%% md\n"} # Here is the rephrased explanation from [the PRH site](https://developer.penguinrandomhouse.com/docs/read/enhanced_prh_api/concepts/Multiple_authors): # In cases where an author contributes to books by both a core PRH division and by one of our distribution clients, MyHouse (content management system) creates a separate author ID so that our distribution clients have the opportunity to attach their own content to that ID. This is why there are two # author records in our system. # # In this way, distribution authors, whose IDs were created within the content management system as copies of core authors, have `client_source_id` as a reference to the `author_id` of the core author. We can see from the values on the 159086 Daniel Mendelsohn record that it has a reference to the 20334 record. # - # Here, we can assume that the persons themselves are more important than the company where they work. So, we can use only one ID per author and skip the company info. external_authors = authors[authors['client_source_id'] > 0] rh_authors = authors[authors['client_source_id'] == 0] # + works_rh = external_authors['client_source_id'].isin(rh_authors.index) # If author works in RH, change to the main `source_id` rh_converted = external_authors.loc[works_rh, 'client_source_id'].to_dict() authors.rename(index=rh_converted, inplace=True) book_authors['author_id'].replace(rh_converted, inplace=True) # If author does not work in RH, set `client_source_id` as main nrh_converted = external_authors.loc[~works_rh, 'client_source_id'].to_dict() authors.rename(index=nrh_converted, inplace=True) book_authors['author_id'].replace(nrh_converted, inplace=True) # Drop duplicates and unused columns authors.drop(columns=['client_source_id', 'company'], inplace=True) authors = authors[~authors.index.duplicated(keep='first')] # Show authors.head() # + [markdown] tags=[] # ### Works # - # Here is the explanation of work from the PRH: Work ID is a Penguin Random House internally managed number that serves to group different formats of a title together. The object identified by the work ID is called the Work, and it represents the collection of all the different ISBNs together as a single object. Even though a Work is just a collection of ISBNs and does not have data of its own, certain attributes are computed at load time based on rules derived from the available ISBNs. The onsale date attribute is taken from the ISBN in the work with the oldest on-sale date. The title, subtitle and author (the display field) are derived a little differently. For each field, the ISBN values are sorted alphabetically and the first value is used as the work attribute. All other data is derived for a Work based on the ISBNs it has (more details [here](https://developer.penguinrandomhouse.com/docs/read/enhanced_prh_api/concepts/Works_and_ISBNs)). books['works'].head(5) # Parse dicts book_work = books[['works']].explode('works').dropna().reset_index() book_work = book_work.drop('works', axis=1)\ .join(pd.DataFrame(book_work['works'].tolist())) book_work.set_index('isbn', inplace=True) book_work.head(5) # Each book have only one `work_id` book_work.index.duplicated().any() # + # Save only `work_id` in the main dataset books = books.join(book_work[['work_id']]) # Drop works column books.drop(columns=['works'], inplace=True) # - # To avoid the duplication between book data and work data, we can find and save only one isbn per work that will present its information. Let's use the same steps to find the front ISBN as it is written in the description from PRH. # + # Use only `work_id` works = book_work.loc[:, ['work_id']] works.reset_index(inplace=True) # Determine front ISBN works.sort_values('isbn', inplace=True) works.drop_duplicates('work_id', keep='first', inplace=True) works.set_index('work_id', inplace=True) works.rename(columns={'isbn': 'front_isbn'}, inplace=True) # Show works.head(5) # + [markdown] tags=[] # ### Categories # - # Categories are sequenced such that the first category should be considered more relevant or important than the last. books['categories'].head(5) # Move information about categories in a separate dataframe book_cats = books[['categories']].explode('categories').dropna().reset_index() book_cats = book_cats.drop('categories', axis=1)\ .join(pd.DataFrame(book_cats['categories'].tolist())) book_cats.head(5) # Drop category column from the main dataset books.drop(columns='categories', inplace=True) # There are a lot of category sets [in the PRH](https://developer.penguinrandomhouse.com/docs/read/enhanced_prh_api/concepts/Categories). However, we use only "CN" categories here. Thus, we need to remove unnecessary ones. book_cats = book_cats[book_cats['category_id'].isin(categories.index)] book_cats.head(5) # ## Ratings # Load all ratings path_ratings = os.path.join('data_interm', 'ratings_joined.csv') ratings = pd.read_csv(path_ratings) init_rating_count = len(ratings) print(f'Number of all ratings: {init_rating_count}') # Next, we need to remove the ratings of books that were not downloaded from PRH. ratings = ratings[ratings['isbn13'].isin(books.index)] print(f'Number of book ratings that were downloaded from PRH: ' f'{len(ratings)} that is {len(ratings) * 100 / init_rating_count:.2f}% ' f'from all ratings.') ratings.head(5) # Since people most often rate the book content rather than a particular edition of the book, we will build recommendations using `work_id` instead of `isbn13`. ratings = pd.merge(ratings, books[['work_id']], left_on='isbn13', right_index=True) ratings.head(5) # Some users may have rated different editions (ISBNs) of the same work. Let's average their ratings so that there is only one rating per work from each user: # + ratings_per_work = ratings[['work_id', 'user_id', 'rating']]\ .groupby(['work_id', 'user_id'], observed=True).mean() # Drop duplicated ratings work_ratings = ratings.drop_duplicates(['user_id', 'work_id'], keep='first') work_ratings = work_ratings[['user_id', 'work_id']]\ .merge(ratings_per_work, left_on=['user_id', 'work_id'], right_on=['user_id', 'work_id'], how='left') work_ratings.head(5) # - # Show the distribution of ratings per works _, ax = plt.subplots(figsize=(10, 8)) sns.histplot(work_ratings['rating'], ax=ax, bins=10) ax.set_ylabel('Counts') ax.set_xlabel('Ratings') ax.set_title('Work rating distribution'); by_work = work_ratings[['work_id', 'rating']]\ .groupby('work_id', observed=True) by_work_count = by_work[['rating']].count() # + work_rated_count = len(by_work_count) work_5times_rated_count = (by_work_count['rating'] >= 5).sum() print(f'Number of works which have ratings: {work_rated_count} ' f'that is {work_rated_count * 100 / len(works):.4f}%') print(f'Number of works which have at least 5 ratings: ' f'{work_5times_rated_count} that is ' f'{work_5times_rated_count * 100 / len(works):.4f}%') # - # Let's calculate weighted ratings of books. To use usual rating is not the best idea, because a book with a rating of 9 from 10 voters will be considered "better" than a book with a rating of 8.9 from 10,000 voters. Thus, it is better to use "weighted rating". See more details [in the Datacamp article](https://www.datacamp.com/community/tutorials/recommender-systems-python). # Calculate weighted rating min_work_rate_count = by_work_count['rating'].quantile(0.99) mean_work_rate = work_ratings['rating'].mean() work_ids = by_work_count[by_work_count['rating'] >= min_work_rate_count].index by_work_weighted = work_ratings\ .loc[work_ratings['work_id'].isin(work_ids), ['work_id', 'rating']]\ .groupby('work_id', observed=True).apply(get_weighted_rating, min_rate_count=min_work_rate_count, mean_rate=mean_work_rate)\ .to_frame(name='weighted_rating') # Get work info works_data = pd.merge(works[['front_isbn']], books[['title']], left_on='front_isbn', right_index=True) # + _, ax = plt.subplots(figsize=(12, 10)) # Most rated books high_rated_works_top = by_work_weighted\ .sort_values(by='weighted_rating', ascending=False).head(20) high_rated_works_top = pd.merge(high_rated_works_top, works_data[['title']], how='left', right_index=True, left_index=True) sns.barplot(x=high_rated_works_top['weighted_rating'], y=high_rated_works_top['title'], palette=sns_palette, ax=ax, ci=None) ax.set_xlim(8.5, 9) ax.set_ylabel('Work titles') ax.set_xlabel('Rating') ax.set_title('Top 20 most rated books') plt.show() # - # ## Save books.to_csv(os.path.join('data_prep', 'books.csv'), index=True) categories.to_csv(os.path.join('data_prep', 'categories.csv'), index=True) roles.to_csv(os.path.join('data_prep', 'contributor_roles.csv'), index=True) publishers.to_csv(os.path.join('data_prep', 'publishers.csv'), index=True) series.to_csv(os.path.join('data_prep', 'series.csv'), index=True) authors.to_csv(os.path.join('data_prep', 'contributors.csv'), index=True) works.to_csv(os.path.join('data_prep', 'works.csv'), index=True) book_cats.to_csv(os.path.join('data_prep', 'book_categories.csv')) book_authors.to_csv(os.path.join('data_prep', 'book_contributors.csv')) work_ratings.to_csv(os.path.join('data_prep', 'work_ratings.csv'))
penguin_random_house/preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Finding the Largest or Smallest N Items # ## Problem # # - You want to make a list of the largest or smallest N items in a collection. # ## Solution # - The __`heapq`__ module has two functions—__`nlargest()`__ and __`nsmallest()`__ # ## Example 1 import heapq nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2] print(heapq.nlargest(3, nums)) # Prints [42, 37, 23] print(heapq.nsmallest(3, nums)) # Prints [-4, 1, 2] # ## Example 2 # - Use with key parameter # + portfolio = [ {'name': 'IBM', 'shares': 100, 'price': 91.1}, {'name': 'AAPL', 'shares': 50, 'price': 543.22}, {'name': 'FB', 'shares': 200, 'price': 21.09}, {'name': 'HPQ', 'shares': 35, 'price': 31.75}, {'name': 'YHOO', 'shares': 45, 'price': 16.35}, {'name': 'ACME', 'shares': 75, 'price': 115.65} ] cheap = heapq.nsmallest(3, portfolio, key=lambda s: s['price']) expensive = heapq.nlargest(3, portfolio, key=lambda s: s['price']) print(cheap) print(expensive) # - # ## Understand heap # - heap converts data into an ordered list nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2] heap = list(nums) heapq.heapify(heap) heap # - `heap[0]` is always the smallest item print(heapq.heappop(heap)) print(heapq.heappop(heap)) print(heapq.heappop(heap)) # ## Sorted # - You can also use `sorted()` function sorted(nums)[:3]
notebooks/ch01/04_finding_the_largest_or_smallest_n_items.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="XNRtE_8qsdx2" colab_type="code" colab={} import pandas as pd import numpy as np import matplotlib.pyplot as plt # + id="Nt51Kxjbsdx8" colab_type="code" colab={} train= pd.read_csv('/home/train_file.csv').copy() test= pd.read_csv('/home/test_file.csv').copy() # + id="KMPmp775sdyA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="a6dccc12-7830-4872-c96d-c92b9abc3ecd" print(train.shape) train.head(3) # + id="HpJM0IM-H_ie" colab_type="code" colab={} test.shape # + id="16Mh5QHnsdyJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="a17fd75a-d7e5-45d3-cd5f-899cbf48c479" train.columns # + id="IJDlnK1TsdyV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="38c07284-0918-4659-cfe0-707bc108c7bf" train.info() # + id="q8PjNUCYsdya" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="a538ea9c-9edd-4c9d-b999-63f2776becfe" train.isna().sum() # + id="b6tfgj5Dsdyd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="df4c7cd4-f4be-422f-dfa1-718398efcfc8" test.isna().sum()/len(test) # + id="SQjrd840sdyi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="c208000f-346e-4379-8340-11d931e0b4ff" train['sample']=1 test['sample']=0 train.head(3) # + id="jRj-csEnsdyl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="9cb5039f-b5d8-40a1-a6c8-85f865266204" df=pd.concat([train,test]) # + id="6HzZH6bK2yks" colab_type="code" colab={} print(df.shape) df.head(3) # + id="PdWKxbZPsdyp" colab_type="code" colab={} df.reset_index(inplace=True,drop=True) # chnages which has been done must be permanent # + id="ImBgy2tgsdys" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="e0a14be2-d8b3-42fe-f63e-d706bd367252" print(df.shape) df.head(3) # + id="PMH-62k4sdyx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="8a460f22-e77c-45ed-f9d7-28bb4ea33b13" df['Greater_Risk_Question'].value_counts() # give the unique one questions # + id="mRANapbUsdy3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="d48d77ed-0193-4c34-f2f2-974254b4dd76" df[df['GeoLocation'].isna()].LocationDesc.value_counts() # + id="76j7vsrbsdy9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cd8d9498-1d61-46bb-e712-a1efc360ca63" df[df['LocationDesc']=='United States'].GeoLocation.value_counts() # + id="F4XMTrhKsdzC" colab_type="code" colab={} n=df[df['LocationDesc']=='United States'].index for i in n: df.loc[i, 'GeoLocation']='(39.7837304, -100.4458825)' # + id="6AbFfVCzsdzF" colab_type="code" colab={} n=df[df['LocationDesc']=='Navajo'].index for i in n: df.loc[i, 'GeoLocation']='(35.1819775, -110.3549451)' # + id="DznPPkl5sdzO" colab_type="code" colab={} n=df[df['LocationDesc']=='New Orleans, LA'].index for i in n: df.loc[i, 'GeoLocation']='(29.9499323, -90.0701156)' # + id="XKv4xce3sdzS" colab_type="code" colab={} n=df[df['LocationDesc']=='Shelby County, TN'].index for i in n: df.loc[i, 'GeoLocation']='(35.1782557, -89.8739775)' # + id="Tmo3qaAjsdzV" colab_type="code" colab={} n=df[df['LocationDesc']=='Cherokee Nation'].index for i in n: df.loc[i, 'GeoLocation']='(36.1342565, -94.9477014208174)' # + id="kuPni2qesdzY" colab_type="code" colab={} n=df[df['LocationDesc']=='Nez Perce'].index for i in n: df.loc[i, 'GeoLocation']='(46.3959861, -116.8072307)' # + id="shWaioGt6YEF" colab_type="code" colab={} df[df['GeoLocation'].isna()].LocationDesc.value_counts() # + id="bPwfK-_jsdzd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} outputId="2d51224a-9b80-4165-9591-d8501d4062c1" df.isna().sum() # + id="I5VHXgDosdzi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="6193b22a-9b7e-4e4e-fe39-ba4cbece3b70" df['Greater_Risk_Question'].value_counts() # + id="CKlCjdznsdzl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="43033db8-785f-499f-c1b8-5b2f8ee86794" df['Grade'].value_counts() # + id="x4BuCH2Bsdzp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="799a4a9b-8a1a-4bc2-9d39-ddb46fd9fcde" df['QuestionCode'].value_counts() # + id="ZlxBGACysdzu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="ab3a2e09-f657-44b1-f564-08f3f1b9dd67" df['Race'].value_counts() # + id="RIHK9EYPsdzz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="130bf816-6644-4c8e-fed8-91501c20b765" df['Sex'].value_counts() # + id="CJF2xh87sdz3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="39421eba-4454-4b31-df36-5e9aa23b8fbd" df['StratID1'].value_counts() # + id="E9tGtHMIsdz8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="76db8fac-5d8e-4b61-d122-3d1cbe8dc693" df['StratificationType'].value_counts() # + id="TOOy27vvsd0A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="cba06374-7043-462a-9bcd-4b0659a945b3" df['Subtopic'].value_counts() # + id="tvdISxcMsd0D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="7ba2d111-7384-44a4-b324-d187bfc5dccf" df['YEAR'].value_counts() # + id="TJxrMsOMsd0I" colab_type="code" colab={} data = df.copy() # + id="c1Hk9qlKsd0M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c1a25670-3ba0-4db4-ca7e-cd9e614af3c8" data.shape # + id="6tb3wd1B7J6T" colab_type="code" colab={} jk = '(29.9499323, -90.0701156)' jk = jk[1:] print(jk) # + id="DFRxn4To7pab" colab_type="code" colab={} jk = '(29.9499323, -90.0701156)' jk = jk[1:] print(jk.split(',')) # + id="L_2Bn-vjsd0Q" colab_type="code" colab={} def location(x): x=x[1:-1] return x.split(',') # + id="pBbVw8TCsd0T" colab_type="code" colab={} GeoLocation=data.GeoLocation.apply(location) # + id="8kBkVVsO717L" colab_type="code" colab={} data.head(3) # + id="q4M0fdhZ8dAO" colab_type="code" colab={} GeoLocation.values.tolist()[:5] # + id="OcSBo5Cisd0W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 142} outputId="42890ac9-512a-481a-f702-538a464e433c" pd.DataFrame(GeoLocation.values.tolist(),columns=['latitute','longitude']).head(3) # + id="-GyJQhfz8SJr" colab_type="code" colab={} print(data.shape) data.head(3) # + id="RS3iVirlsd0a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 142} outputId="5c946e6b-ff0e-4b12-91b0-c885d405d111" pd.get_dummies(data.StratificationType).head(3) # + id="pL2TNMecsd0f" colab_type="code" colab={} data = pd.concat([data,pd.get_dummies(data.Greater_Risk_Question),pd.get_dummies(data.QuestionCode),pd.get_dummies(data.Race),pd.get_dummies(data.Sex),pd.get_dummies(data.StratificationType),pd.DataFrame(GeoLocation.tolist(),columns=['latitute','longitude'])],axis=1) # + id="4Ei1XrG79jdr" colab_type="code" colab={} print(data.shape) data.head(3) # + id="QGoBCBAPsd0s" colab_type="code" colab={} data=data.select_dtypes(exclude='object') # + id="l0vc1zW3sd02" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="4c33ea71-c2f8-4d82-f817-3dcdd4fc7b1f" print(data.shape) data.head(3) # + id="u7k7gY-qBaZH" colab_type="code" colab={} # + id="mmcX-YkEsd1L" colab_type="code" colab={} train=data[data['sample']==1] test=data[data['sample']==0] test.reset_index(inplace=True,drop=True) train=train.drop(['sample'],axis=1) test=test.drop(['sample','Greater_Risk_Probability'],axis=1) # + id="HmjMSgDQ-eQE" colab_type="code" colab={} print(train.shape , test.shape) # + id="70Ze-gh1_NJ4" colab_type="code" colab={} train.head(3) # + id="zHAxDOwlsd1O" colab_type="code" colab={} train_y=train['Greater_Risk_Probability'] train_x=train.drop(['Greater_Risk_Probability'],axis=1) # + id="lWPHedrwsd1S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="8e12135a-de8d-4b34-e9ee-a3165ecfdca7" train_y.head(3) # + id="8hCJn1mqsd1Z" colab_type="code" colab={} from sklearn import tree clf = tree.DecisionTreeRegressor() clf = clf.fit(train_x, train_y) # + id="Q0U9NHmZsd1b" colab_type="code" colab={} pred=clf.predict(test) # + id="sWzn_WGg-1w8" colab_type="code" colab={} pred # + id="ql1Yig4-sd1g" colab_type="code" colab={} sub=pd.DataFrame() sub['Patient_ID']=test['Patient_ID'] sub['Greater_Risk_Probability']=pred # + id="t9lCdbPLsd1m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 142} outputId="baf6ef06-dc9c-448d-b2c4-64a5b585060f" sub.head(3) # + id="7_6sKL8asd1r" colab_type="code" colab={} sub.to_csv('/home/submit.csv',index=False) # + id="Psq0zRCmsd1t" colab_type="code" colab={}
Drug_prediction/Drug_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from matplotlib import pyplot as plt import matplotlib # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # - # # Test Correlations in the Hammersley Generator # --- # The Hammerlsey sequence should produce random numbers on (0,1) that are less correlated than the basic random generator: # + from distgen.dist import random_generator rands = random_generator((4,1000), sequence='hammersley') cov = np.cov(rands) print('Covariance of Hammersley samples:') print(cov) plt.plot(rands[0,:], rands[1,:],'.'); # + rands = random_generator((4,1000), sequence='pseudo') cov = np.cov(rands) print('Covariance of Rand samples:') print(cov) plt.plot(rands[0,:], rands[1,:], '.') # - # # Radial Distributions # --- # # Test correlation in sin(theta)cos(theta) import math rands = random_generator((2,10000), sequence='hammersley') np.sum( np.cos(2*math.pi)*rands[1,:] * np.sin(2*math.pi)*rands[1,:]) rands = random_generator((2,10000), sequence='pseudo') np.sum( np.cos(2*math.pi)*rands[1,:] * np.sin(2*math.pi)*rands[1,:]) # # Generate x,y for uniform dist and check correlation # # + R = 1 N=100000 rands1 = random_generator((2,N), sequence='hammersley') rands2 = random_generator((2,N), sequence='pseudo') rands3 = np.linspace(0, 1, N) rs = R*np.sqrt(rands1[0,:]) xs1 = rs*np.cos(2*math.pi*rands1[1,:]) ys1 = rs*np.sin(2*math.pi*rands1[1,:]) xs2 = rs*np.cos(2*math.pi*rands2[1,:]) ys2 = rs*np.sin(2*math.pi*rands2[1,:]) xs3 = rs*np.cos(2*math.pi*rands3) ys3 = rs*np.sin(2*math.pi*rands3) plt.plot(xs1,ys1,'.'); # - print( np.mean( (xs1-xs1.mean())*(ys1-ys1.mean())) ) print( np.mean( (xs2-xs2.mean())*(ys2-ys2.mean())) ) print( np.mean( (xs3-xs3.mean())*(ys3-ys3.mean())) ) # + sigma=np.cov(rands1) v,V = np.linalg.eig(sigma) #np.matmul(np.matmul(V.T, sigma), V) randsp = np.matmul(V.T, rands1) np.cov(randsp) rs = R*np.sqrt(randsp[0,:]) xsp = rs*np.cos(2*math.pi*randsp[1,:]) ysp = rs*np.sin(2*math.pi*randsp[1,:]) print(np.mean( (xsp-xsp.mean())*(ysp-ysp.mean()))) # + input_str=""" n_particle: 100000 random_type: hammersley total_charge: value: 1 units: pC start: type: time r_dist: max_r: units: m value: 2 min_r: units: m value: 0 type: radial_uniform """ import yaml yaml.safe_load(input_str) # + from distgen import Generator gen = Generator(input_str) gen.run() xs = gen.particles['x'] ys = gen.particles['y'] gen.particles.cov('x','y') # -
regression_tests/correlations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Setup import pandas as pd import numpy as np import nltk import string import matplotlib.pyplot as plot import pickle from wordcloud import WordCloud as WC from wordcloud import STOPWORDS from collections import Counter from nltk.corpus import stopwords from nltk.corpus import stopwords from nltk.tokenize import word_tokenize nltk.download('stopwords') nltk.download('punkt') # ## Loading Data df=pd.read_csv('train.csv') valid=pd.read_csv('dev.csv') # ### Splitting Data into Real and Fake tables fake=df[df['label']==1] real=df[df['label']==0] print('number of real reviews:',(len(real))) print('number of fake reviews:',(len(fake))) # ### Exploration # #### Real vs. Fake reviews: histograms of ratings hist=df['rating'].hist(by=df['label']) hist # #### Real vs. Fake reviews: proportions of unique reviewers and unique products print('% of real reviews that are unique user_ids', len(np.unique(real['user_id']))/len(real)) print('% of fake reviews that are unique user_ids', len(np.unique(fake['user_id']))/len(fake)) print('% of real reviews that are unique prod_ids', len(np.unique(real['prod_id']))/len(real)) print('% of real reviews that are unique prod_ids', len(np.unique(fake['prod_id']))/len(fake)) # ### Tokenizing # #### Function to get words from reviews #tokenize fake and real reviews fake['tokens'] =fake['review'].apply(lambda x: word_tokenize(x)) real['tokens'] =real['review'].apply(lambda x: word_tokenize(x)) valid['tokens']=valid['review'].apply(lambda x: word_tokenize(x)) #define function to clean tokens def clean_tokens(tokens): #remove punctuation from each word table = str.maketrans('', '', string.punctuation) tokens=[w.translate(table) for w in tokens] #make all words lowercase tokens = [token.lower() for token in tokens] #remove stopwords tokens=[word for word in tokens if not word in stopwords.words()] return tokens valid['filtered_tokens'] = valid['tokens'].apply(lambda x:clean_tokens(x)) valid.to_csv('valid_tokens.csv') fake['filtered_tokens'] = fake['tokens'].apply(lambda x:clean_tokens(x)) fake.to_csv('fake_tokens.csv') real['tokens'] =real['review'].apply(lambda x: word_tokenize(x)) real['filtered_tokens'] = real['tokens'].apply(lambda x:clean_tokens(x)) #defining bag of words function def bow(words): bow=[] for item in words: word_counter=Counter() for word in item: word_counter[word]+=1 #bow.append(word_counter) return word_counter fake_words=fake['filtered_tokens'] real_words=real['filtered_tokens'] real.to_csv('real_tokens.csv') #counters of words and their frequencies fake_freq_w=bow(fake_words) real_freq_w=bow(real_words) # ### Word Clouds import ast fake=pd.read_csv('fake_tokens.csv') fake=fake.rename(columns={'Unnamed: 0':'index'}) real=pd.read_csv('real_tokens.csv') real=real.rename(columns={'Unnamed: 0':'index'}) # + real['filtered_tokens']=real['filtered_tokens'].apply(lambda x: ast.literal_eval(x)) real['filtered_tokens']=real['filtered_tokens'].apply(lambda x: ' '.join(x).split()) fake['filtered_tokens']=fake['filtered_tokens'].apply(lambda x: ast.literal_eval(x)) fake['filtered_tokens']=fake['filtered_tokens'].apply(lambda x: ' '.join(x).split()) # - import itertools fake_words2= list(itertools.chain(*fake['filtered_tokens'])) real_words2=list(itertools.chain(*real['filtered_tokens'])) def bow2(words): bow=[] word_counter=Counter() for item in words: word_counter[item]+=1 return word_counter # + from collections import Counter c = list((Counter(fake_words2) & Counter(real_words2)).elements()) commons=bow2(c) common_stop=[i[0] for i in commons.most_common(50)] # - new_fake_words=[i for i in fake_words2 if i not in common_stop] fake_freq_w2=bow2(new_fake_words) # + #fake reviews WITHOUT common words stopword = set(common_stop) wcloud = WC(width = 800, height = 800, background_color ='white', stopwords = stopword, min_font_size =5).generate_from_frequencies(fake_freq_w2) #plot the WordCloud image plot.figure(figsize = (8, 8), facecolor = None) plot.imshow(wcloud) plot.axis("off") plot.tight_layout(pad = 0) # + #fake reviews stopwords = set(STOPWORDS) wcloud = WC(width = 800, height = 800, background_color ='white', stopwords = stopwords, min_font_size =5).generate_from_frequencies(fake_freq_w) #plot the WordCloud image plot.figure(figsize = (8, 8), facecolor = None) plot.imshow(wcloud) plot.axis("off") plot.tight_layout(pad = 0) # - new_real_words=[i for i in real_words2 if i not in common_stop] real_freq_w2=bow2(new_real_words) # + #real reviews WITHOUT common words stopwords = set(common_stop) wcloud = WC(width = 800, height = 800, background_color ='white', stopwords = stopwords, min_font_size =5).generate_from_frequencies(real_freq_w2) #plot the WordCloud image plot.figure(figsize = (8, 8), facecolor = None) plot.imshow(wcloud) plot.axis("off") plot.tight_layout(pad = 0) # + #real reviews stopwords = set(STOPWORDS) wcloud = WC(width = 800, height = 800, background_color ='white', stopwords = stopwords, min_font_size =5).generate_from_frequencies(real_freq_w) # plot the WordCloud image plot.figure(figsize = (8, 8), facecolor = None) plot.imshow(wcloud) plot.axis("off") plot.tight_layout(pad = 0)
notebooks/gh1408/1003ProjectGH_Exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8 - AzureML # language: python # name: python38-azureml # --- # # Entity Explorer - Linux Host # <details> # <summary>&nbsp;<u>Details...</u></summary> # # **Notebook Version:** 1.1<br> # **Python Version:** Python 3.6 (including Python 3.6 - AzureML)<br> # **Required Packages**: kqlmagic, msticpy, pandas, pandas_bokeh, numpy, matplotlib, networkx, seaborn, datetime, ipywidgets, ipython, dnspython, ipwhois, folium, maxminddb_geolite2<br> # # **Data Sources Required**: # - Log Analytics/Azure Sentinel - Syslog, Secuirty Alerts, Auditd, Azure Network Analytics. # - (Optional) - AlienVault OTX (requires account and API key) # </details> # # This Notebooks brings together a series of tools and techniques to enable threat hunting within the context of a singular Linux host. The notebook utilizes a range of data sources to achieve this but in order to support the widest possible range of scenarios this Notebook prioritizes using common Syslog data. If there is detailed auditd data available for a host you may wish to edit the Notebook to rely primarily on this dataset, as it currently stands auditd is used when available to provide insight not otherwise available via Syslog. # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Notebook-initialization" data-toc-modified-id="Notebook-initialization-0.1"><span class="toc-item-num">0.1&nbsp;&nbsp;</span>Notebook initialization</a></span></li><li><span><a href="#Get-WorkspaceId-and-Authenticate-to-Log-Analytics" data-toc-modified-id="Get-WorkspaceId-and-Authenticate-to-Log-Analytics-0.2"><span class="toc-item-num">0.2&nbsp;&nbsp;</span>Get WorkspaceId and Authenticate to Log Analytics</a></span></li></ul></li><li><span><a href="#Set-Hunting-Time-Frame" data-toc-modified-id="Set-Hunting-Time-Frame-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Set Hunting Time Frame</a></span><ul class="toc-item"><li><span><a href="#Select-Host-to-Investigate" data-toc-modified-id="Select-Host-to-Investigate-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Select Host to Investigate</a></span></li></ul></li><li><span><a href="#Host-Summary" data-toc-modified-id="Host-Summary-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Host Summary</a></span><ul class="toc-item"><li><span><a href="#Host-Alerts" data-toc-modified-id="Host-Alerts-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Host Alerts</a></span></li></ul></li><li><span><a href="#Re-scope-Hunting-Time-Frame" data-toc-modified-id="Re-scope-Hunting-Time-Frame-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Re-scope Hunting Time Frame</a></span></li><li><span><a href="#How-to-use-this-Notebook" data-toc-modified-id="How-to-use-this-Notebook-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>How to use this Notebook</a></span></li><li><span><a href="#Host-Logon-Events" data-toc-modified-id="Host-Logon-Events-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Host Logon Events</a></span><ul class="toc-item"><li><span><a href="#Logon-Sessions" data-toc-modified-id="Logon-Sessions-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Logon Sessions</a></span><ul class="toc-item"><li><span><a href="#Session-Details" data-toc-modified-id="Session-Details-5.1.1"><span class="toc-item-num">5.1.1&nbsp;&nbsp;</span>Session Details</a></span></li><li><span><a href="#Raw-data-from-user-session" data-toc-modified-id="Raw-data-from-user-session-5.1.2"><span class="toc-item-num">5.1.2&nbsp;&nbsp;</span>Raw data from user session</a></span></li></ul></li><li><span><a href="#Process-Tree-from-session" data-toc-modified-id="Process-Tree-from-session-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Process Tree from session</a></span></li><li><span><a href="#Sudo-Session-Investigation" data-toc-modified-id="Sudo-Session-Investigation-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Sudo Session Investigation</a></span></li></ul></li><li><span><a href="#User-Activity" data-toc-modified-id="User-Activity-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>User Activity</a></span></li><li><span><a href="#Application-Activity" data-toc-modified-id="Application-Activity-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Application Activity</a></span><ul class="toc-item"><li><span><a href="#Display-process-tree" data-toc-modified-id="Display-process-tree-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>Display process tree</a></span></li><li><span><a href="#Application-Logs-with-associated-Threat-Intelligence" data-toc-modified-id="Application-Logs-with-associated-Threat-Intelligence-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>Application Logs with associated Threat Intelligence</a></span></li></ul></li><li><span><a href="#Network-Activity" data-toc-modified-id="Network-Activity-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>Network Activity</a></span><ul class="toc-item"><li><span><a href="#Choose-ASNs/IPs-to-Check-for-Threat-Intel-Reports" data-toc-modified-id="Choose-ASNs/IPs-to-Check-for-Threat-Intel-Reports-8.1"><span class="toc-item-num">8.1&nbsp;&nbsp;</span>Choose ASNs/IPs to Check for Threat Intel Reports</a></span></li></ul></li><li><span><a href="#Configuration" data-toc-modified-id="Configuration-9"><span class="toc-item-num">9&nbsp;&nbsp;</span>Configuration</a></span><ul class="toc-item"><li><span><a href="#msticpyconfig.yaml-configuration-File" data-toc-modified-id="msticpyconfig.yaml-configuration-File-9.1"><span class="toc-item-num">9.1&nbsp;&nbsp;</span><code>msticpyconfig.yaml</code> configuration File</a></span></li></ul></li></ul></div> # - # # Hunting Hypothesis: # Our broad initial hunting hypothesis is that a particular Linux host in our environment has been compromised, we will need to hunt from a range of different positions to validate or disprove this hypothesis. # # --- # ### Notebook initialization # The next cell: # - Checks for the correct Python version # - Checks versions and optionally installs required packages # - Imports the required packages into the notebook # - Sets a number of configuration options. # # This should complete without errors. If you encounter errors or warnings look at the following two notebooks: # - [TroubleShootingNotebooks](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/TroubleShootingNotebooks.ipynb) # - [ConfiguringNotebookEnvironment](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/ConfiguringNotebookEnvironment.ipynb) # # If you are running in the Azure Sentinel Notebooks environment (Azure Notebooks or Azure ML) you can run live versions of these notebooks: # - [Run TroubleShootingNotebooks](./TroubleShootingNotebooks.ipynb) # - [Run ConfiguringNotebookEnvironment](./ConfiguringNotebookEnvironment.ipynb) # # You may also need to do some additional configuration to successfully use functions such as Threat Intelligence service lookup and Geo IP lookup. # There are more details about this in the `ConfiguringNotebookEnvironment` notebook and in these documents: # - [msticpy configuration](https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html) # - [Threat intelligence provider configuration](https://msticpy.readthedocs.io/en/latest/data_acquisition/TIProviders.html#configuration-file) # + from pathlib import Path from IPython.display import display, HTML REQ_PYTHON_VER=(3, 6) REQ_MSTICPY_VER=(1, 0, 0) REQ_MP_EXTRAS = ["ml"] update_nbcheck = ( "<p style='color: orange; text-align=left'>" "<b>Warning: we needed to update '<i>utils/nb_check.py</i>'</b><br>" "Please restart the kernel and re-run this cell." "</p>" ) display(HTML("<h3>Starting Notebook setup...</h3>")) if Path("./utils/nb_check.py").is_file(): try: from utils.nb_check import check_versions except ImportError as err: # %xmode Minimal # !curl https://raw.githubusercontent.com/Azure/Azure-Sentinel-Notebooks/master/utils/nb_check.py > ./utils/nb_check.py 2>/dev/null display(HTML(update_nbcheck)) if "check_versions" not in globals(): raise ImportError("Old version of nb_check.py detected - see instructions below.") # %xmode Verbose check_versions(REQ_PYTHON_VER, REQ_MSTICPY_VER, REQ_MP_EXTRAS) # If the installation fails try to manually install using # # !pip install --upgrade msticpy from msticpy.nbtools import nbinit additional_packages = [ "oauthlib", "pyvis", "python-whois", "seaborn" ] nbinit.init_notebook( namespace=globals(), additional_packages=additional_packages, extra_imports=extra_imports, ); from bokeh.models import ColumnDataSource, FactorRange from bokeh.palettes import viridis from bokeh.plotting import show, Row, figure from bokeh.transform import factor_cmap, cumsum from dns import reversename, resolver from functools import lru_cache from ipaddress import ip_address from ipwhois import IPWhois from math import pi from msticpy.common.exceptions import MsticpyException from msticpy.nbtools import observationlist from msticpy.nbtools.foliummap import get_map_center from msticpy.sectools import auditdextract from msticpy.sectools.cmd_line import risky_cmd_line from msticpy.sectools.ip_utils import convert_to_ip_entities from msticpy.sectools.syslog_utils import create_host_record, cluster_syslog_logons_df, risky_sudo_sessions from pyvis.network import Network import datetime as dt import re # - # ### Get WorkspaceId and Authenticate to Log Analytics # <details> # <summary> <u>Details...</u></summary> # If you are using user/device authentication, run the following cell. # - Click the 'Copy code to clipboard and authenticate' button. # - This will pop up an Azure Active Directory authentication dialog (in a new tab or browser window). The device code will have been copied to the clipboard. # - Select the text box and paste (Ctrl-V/Cmd-V) the copied value. # - You should then be redirected to a user authentication page where you should authenticate with a user account that has permission to query your Log Analytics workspace. # # Use the following syntax if you are authenticating using an Azure Active Directory AppId and Secret: # ``` # # %kql loganalytics://tenant(aad_tenant).workspace(WORKSPACE_ID).clientid(client_id).clientsecret(client_secret) # ``` # instead of # ``` # # %kql loganalytics://code().workspace(WORKSPACE_ID) # ``` # # Note: you may occasionally see a JavaScript error displayed at the end of the authentication - you can safely ignore this.<br> # On successful authentication you should see a ```popup schema``` button. # To find your Workspace Id go to [Log Analytics](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.OperationalInsights%2Fworkspaces). Look at the workspace properties to find the ID. # </details> # + # See if we have an Azure Sentinel Workspace defined in our config file. # If not, let the user specify Workspace and Tenant IDs ws_config = WorkspaceConfig() if not ws_config.config_loaded: ws_config.prompt_for_ws() qry_prov = QueryProvider(data_environment="AzureSentinel") print("done") # - # Authenticate to Azure Sentinel workspace qry_prov.connect(ws_config) # ## Set Hunting Time Frame # To begin the hunt we need to et the time frame in which you wish to test your compromised host hunting hypothesis within. Use the widget below to select your start and end time for the hunt. query_times = nbwidgets.QueryTime(units='day', max_before=14, max_after=1, before=1) query_times.display() # ### Select Host to Investigate # Select the host you want to test your hunting hypothesis against, only hosts with Syslog data within the time frame you specified are available. If the host you wish to select is not present try adjusting your time frame. #Get a list of hosts with syslog data in our hunting timegframe to provide easy selection syslog_query = f"""Syslog | where TimeGenerated between (datetime({query_times.start}) .. datetime({query_times.end})) | summarize by Computer""" md("Collecting avaliable host details...") hosts_list = qry_prov._query_provider.query(query=syslog_query) if isinstance(hosts_list, pd.DataFrame) and not hosts_list.empty: hosts = hosts_list["Computer"].unique().tolist() host_text = nbwidgets.SelectItem(description='Select host to investigate: ', item_list=hosts, width='75%', auto_display=True) else: display(md("There are no hosts with syslog data in this time period to investigate")) # ## Host Summary # Below is a overview of the selected host based on available data sources. hostname=host_text.value az_net_df = None # Collect data on the host all_syslog_query = f"Syslog | where TimeGenerated between (datetime({query_times.start}) .. datetime({query_times.end})) | where Computer =~ '{hostname}'""" all_syslog_data = qry_prov.exec_query(all_syslog_query) if isinstance(all_syslog_data, pd.DataFrame) and not all_syslog_data.empty: heartbeat_query = f"""Heartbeat | where TimeGenerated >= datetime({query_times.start}) | where TimeGenerated <= datetime({query_times.end})| where Computer == '{hostname}' | top 1 by TimeGenerated desc nulls last""" if "AzureNetworkAnalytics_CL" in qry_prov.schema: aznet_query = f"""AzureNetworkAnalytics_CL | where TimeGenerated >= datetime({query_times.start}) | where TimeGenerated <= datetime({query_times.end}) | where VirtualMachine_s has '{hostname}' | where ResourceType == 'NetworkInterface' | top 1 by TimeGenerated desc | project PrivateIPAddresses = PrivateIPAddresses_s, PublicIPAddresses = PublicIPAddresses_s""" print("Getting network data...") az_net_df = qry_prov.exec_query(query=aznet_query) print("Getting host data...") host_hb = qry_prov.exec_query(query=heartbeat_query) # Create host entity record, with Azure network data if any is avaliable if az_net_df is not None and isinstance(az_net_df, pd.DataFrame) and not az_net_df.empty: host_entity = create_host_record(syslog_df=all_syslog_data, heartbeat_df=host_hb, az_net_df=az_net_df) else: host_entity = create_host_record(syslog_df=all_syslog_data, heartbeat_df=host_hb) md( "<b>Host Details</b><br>" f"<b>Hostname</b>: {host_entity.computer}<br>" f"<b>OS</b>: {host_entity.OSType} {host_entity.OSName}<br>" f"<b>IP Address</b>: {host_entity.IPAddress.Address}<br>" f"<b>Location</b>: {host_entity.IPAddress.Location.CountryName}<br>" f"<b>Installed Applications</b>: {host_entity.Applications}<br>" ) else: md_warn("No Syslog data found, check hostname and timeframe.") md("The data query may be timing out, consider reducing the timeframe size.") # ### Host Alerts & Bookmarks # This section provides an overview of any security alerts or Hunting Bookmarks in Azure Sentinel related to this host, this will help scope and guide our hunt. # + related_alerts = qry_prov.SecurityAlert.list_related_alerts( query_times, host_name=hostname) realted_bookmarks = qry_prov.AzureSentinel.list_bookmarks_for_entity(query_times, entity_id=hostname) if isinstance(related_alerts, pd.DataFrame) and not related_alerts.empty: host_alert_items = (related_alerts[['AlertName', 'TimeGenerated']] .groupby('AlertName').TimeGenerated.agg('count').to_dict()) def print_related_alerts(alertDict, entityType, entityName): if len(alertDict) > 0: md(f"Found {len(alertDict)} different alert types related to this {entityType} (\'{entityName}\')") for (k, v) in alertDict.items(): md(f"- {k}, Count of alerts: {v}") else: md(f"No alerts for {entityType} entity \'{entityName}\'") print_related_alerts(host_alert_items, 'host', host_entity.HostName) nbdisplay.display_timeline( data=related_alerts, source_columns=["AlertName"], title="Host alerts over time", height=300, color="red") else: md('No related alerts found.') if isinstance(realted_bookmarks, pd.DataFrame) and not realted_bookmarks.empty: nbdisplay.display_timeline(data=realted_bookmarks, source_columns=["BookmarkName"], height=200, color="orange", title="Host bookmarks over time",) else: md('No related bookmarks found.') # + rel_alert_select = None def show_full_alert(selected_alert): global security_alert, alert_ip_entities security_alert = SecurityAlert( rel_alert_select.selected_alert) nbdisplay.display_alert(security_alert, show_entities=True) # Show selected alert when selected if isinstance(related_alerts, pd.DataFrame) and not related_alerts.empty: related_alerts['CompromisedEntity'] = related_alerts['Computer'] md('### Click on alert to view details.') rel_alert_select = nbwidgets.SelectAlert(alerts=related_alerts, action=show_full_alert) rel_alert_select.display() else: md('No related alerts found.') # - # ## Re-scope Hunting Time Frame # Based on the security alerts for this host we can choose to re-scope our hunting time frame. # + if rel_alert_select is None or rel_alert_select.selected_alert is None: start = query_times.start else: start = rel_alert_select.selected_alert['TimeGenerated'] # Set new investigation time windows based on the selected alert invest_times = nbwidgets.QueryTime( units='day', max_before=24, max_after=12, before=1, after=1, origin_time=start) invest_times.display() # - # ## How to use this Notebook # Whilst this notebook is linear in layout it doesn't need to be linear in usage. We have selected our host to investigate and set an initial hunting time-frame to work within. We can now start to test more specific hunting hypothesis with the aim of validating our broader initial hunting hypothesis. To do this we can start by looking at: # - <a>Host Logon Events</a> # - <a>User Activity</a> # - <a>Application Activity</a> # - <a>Network Activity</a> # # You can choose to start below with a hunt in host logon events or choose to jump to one of the other sections listed above. The order in which you choose to run each of these major sections doesn't matter, they are each self contained. You may also choose to rerun sections based on your findings from running other sections. # This notebook uses external threat intelligence sources to enrich data. The next cell loads the TILookup class. # > **Note**: to use TILookup you will need configuration settings in your msticpyconfig.yaml # > <br>see [TIProviders documenation](https://msticpy.readthedocs.io/en/latest/TIProviders.html) # > <br>and [Configuring Notebook Environment notebook](./ConfiguringNotebookEnvironment.ipynb) # > <br>or [ConfiguringNotebookEnvironment (GitHub static view)](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/ConfiguringNotebookEnvironment.ipynb) tilookup = TILookup() md("Threat intelligence provider loading complete.") # ## Host Logon Events # **Hypothesis:** That an attacker has gained legitimate access to the host via compromised credentials and has logged into the host to conduct malicious activity. # # This section provides an overview of logon activity for the host within our hunting time frame, the purpose of this is to allow for the identification of anomalous logons or attempted logons. # + # Collect logon events for this, seperate them into sucessful and unsucessful and cluster sucessful one into sessions logon_events = qry_prov.LinuxSyslog.user_logon(start=invest_times.start, end=invest_times.end, host_name=hostname) remote_logons = None failed_logons = None if isinstance(logon_events, pd.DataFrame) and not logon_events.empty: remote_logons = (logon_events[logon_events['LogonResult'] == 'Success']) failed_logons = (logon_events[logon_events['LogonResult'] == 'Failure']) else: print("No logon events in this timeframe") if (isinstance(remote_logons, pd.DataFrame) and not remote_logons.empty) or (isinstance(failed_logons, pd.DataFrame) and not failed_logons.empty): #Provide a timeline of sucessful and failed logon attempts to aid identification of potential brute force attacks display(Markdown('### Timeline of sucessful host logons.')) tooltip_cols = ['User', 'ProcessName', 'SourceIP'] if rel_alert_select is not None: logon_timeline = nbdisplay.display_timeline(data=remote_logons, overlay_data=failed_logons, source_columns=tooltip_cols, height=200, overlay_color="red", alert = rel_alert_select.selected_alert) else: logon_timeline = nbdisplay.display_timeline(data=remote_logons, overlay_data=failed_logons, source_columns=tooltip_cols, height=200, overlay_color="red") display(Markdown('<b>Key:</b><p style="color:darkblue">Sucessful logons </p><p style="color:Red">Failed Logon Attempts (via su)</p>')) all_df = pd.DataFrame(dict(successful= remote_logons['ProcessName'].value_counts(), failed = failed_logons['ProcessName'].value_counts())).fillna(0) fail_data = pd.value_counts(failed_logons['User'].values, sort=True).head(10).reset_index(name='value').rename(columns={'User':'Count'}) fail_data['angle'] = fail_data['value']/fail_data['value'].sum() * 2*pi fail_data['color'] = viridis(len(fail_data)) fp = figure(plot_height=350, plot_width=450, title="Relative Frequencies of Failed Logons by Account", toolbar_location=None, tools="hover", tooltips="@index: @value") fp.wedge(x=0, y=1, radius=0.5, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color="white", fill_color='color', legend='index', source=fail_data) sucess_data = pd.value_counts(remote_logons['User'].values, sort=False).reset_index(name='value').rename(columns={'User':'Count'}) sucess_data['angle'] = sucess_data['value']/sucess_data['value'].sum() * 2*pi sucess_data['color'] = viridis(len(sucess_data)) sp = figure(plot_height=350, width=450, title="Relative Frequencies of Sucessful Logons by Account", toolbar_location=None, tools="hover", tooltips="@index: @value") sp.wedge(x=0, y=1, radius=0.5, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color="white", fill_color='color', legend='index', source=sucess_data) fp.axis.axis_label=None fp.axis.visible=False fp.grid.grid_line_color = None sp.axis.axis_label=None sp.axis.visible=False sp.grid.grid_line_color = None processes = all_df.index.values.tolist() results = all_df.columns.values.tolist() fail_sucess_data = {'processes' :processes, 'sucess' : all_df['successful'].values.tolist(), 'failure': all_df['failed'].values.tolist()} palette = viridis(2) x = [ (process, result) for process in processes for result in results ] counts = sum(zip(fail_sucess_data['sucess'], fail_sucess_data['failure']), ()) source = ColumnDataSource(data=dict(x=x, counts=counts)) b = figure(x_range=FactorRange(*x), plot_height=350, plot_width=450, title="Failed and Sucessful logon attempts by process", toolbar_location=None, tools="", y_minor_ticks=2) b.vbar(x='x', top='counts', width=0.9, source=source, line_color="white", fill_color=factor_cmap('x', palette=palette, factors=results, start=1, end=2)) b.y_range.start = 0 b.x_range.range_padding = 0.1 b.xaxis.major_label_orientation = 1 b.xgrid.grid_line_color = None show(Row(sp,fp,b)) ip_list = [convert_to_ip_entities(i, ip_col="SourceIP")[0] for i in remote_logons['SourceIP'].unique() if i != ""] ip_fail_list = [convert_to_ip_entities(i)[0] for i in failed_logons['SourceIP'].unique() if i != ""] location = get_map_center(ip_list + ip_fail_list) folium_map = FoliumMap(location = location, zoom_start=1.4) #Map logon locations to allow for identification of anomolous locations if len(ip_fail_list) > 0: md('<h3>Map of Originating Location of Logon Attempts</h3>') icon_props = {'color': 'red'} folium_map.add_ip_cluster(ip_entities=ip_fail_list, **icon_props) if len(ip_list) > 0: icon_props = {'color': 'green'} folium_map.add_ip_cluster(ip_entities=ip_list, **icon_props) display(folium_map.folium_map) md('<p style="color:red">Warning: the folium mapping library ' 'does not display correctly in some browsers.</p><br>' 'If you see a blank image please retry with a different browser.') # - # ### Logon Sessions # Based on the detail above if you wish to focus your hunt on a particular user jump to the [User Activity](#user) section. Alternatively to further further refine our hunt we need to select a logon session to view in more detail. Select a session from the list below to continue. Sessions that occurred at the time an alert was raised for this host, or where the user has a abnormal ratio of failed to successful login attempts are highlighted. # + logon_sessions_df = None try: print("Clustering logon sessions...") logon_sessions_df = cluster_syslog_logons_df(logon_events) except Exception as err: print(f"Error clustering logons: {err}") if logon_sessions_df is not None: logon_sessions_df["Alerts during session?"] = np.nan # check if any alerts occur during logon window. logon_sessions_df['Start (UTC)'] = [(time - dt.timedelta(seconds=5)) for time in logon_sessions_df['Start']] logon_sessions_df['End (UTC)'] = [(time + dt.timedelta(seconds=5)) for time in logon_sessions_df['End']] for TimeGenerated in related_alerts['TimeGenerated']: logon_sessions_df.loc[(TimeGenerated >= logon_sessions_df['Start (UTC)']) & (TimeGenerated <= logon_sessions_df['End (UTC)']), "Alerts during session?"] = "Yes" logon_sessions_df.loc[logon_sessions_df['User'] == 'root', "Root?"] = "Yes" logon_sessions_df.replace(np.nan, "No", inplace=True) ratios = [] for _, row in logon_sessions_df.iterrows(): suc_fail = logon_events.apply(lambda x: True if x['User'] == row['User'] and x["LogonResult"] == 'Success' else( False if x['User'] == row['User'] and x["LogonResult"] == 'Failure' else None), axis=1) numofsucess = len(suc_fail[suc_fail == True].index) numoffail = len(suc_fail[suc_fail == False].index) if numoffail == 0: ratio = 1 else: ratio = numofsucess/numoffail ratios.append(ratio) logon_sessions_df["Sucessful to failed logon ratio"] = ratios def color_cells(val): if isinstance(val, str): color = 'yellow' if val == "Yes" else 'white' elif isinstance(val, float): color = 'yellow' if val > 0.5 else 'white' else: color = 'white' return 'background-color: %s' % color display(logon_sessions_df[['User','Start (UTC)', 'End (UTC)', 'Alerts during session?', 'Sucessful to failed logon ratio', 'Root?']] .style.applymap(color_cells).hide_index()) logon_items = ( logon_sessions_df[['User','Start (UTC)', 'End (UTC)']] .to_string(header=False, index=False, index_names=False) .split('\n') ) logon_sessions_df["Key"] = logon_items logon_sessions_df.set_index('Key', inplace=True) logon_dict = logon_sessions_df[['User','Start (UTC)', 'End (UTC)']].to_dict('index') logon_selection = nbwidgets.SelectItem(description='Select logon session to investigate: ', item_dict=logon_dict , width='80%', auto_display=True) else: md("No logon sessions during this timeframe") # - # #### Session Details # + def view_syslog(selected_facility): return [syslog_events.query('Facility == @selected_facility')] # Produce a summary of user modification actions taken if "Add" in x: return len(add_events.replace("", np.nan).dropna(subset=['User'])['User'].unique().tolist()) elif "Modify" in x: return len(mod_events.replace("", np.nan).dropna(subset=['User'])['User'].unique().tolist()) elif "Delete" in x: return len(del_events.replace("", np.nan).dropna(subset=['User'])['User'].unique().tolist()) else: return "" crn_tl_data = {} user_tl_data = {} sudo_tl_data = {} sudo_sessions = None tooltip_cols = ['SyslogMessage'] if logon_sessions_df is not None: #Collect data based on the session selected for investigation invest_sess = {'StartTimeUtc': logon_selection.value.get('Start (UTC)'), 'EndTimeUtc': logon_selection.value.get( 'End (UTC)'), 'Account': logon_selection.value.get('User'), 'Host': hostname} session = entities.HostLogonSession(invest_sess) syslog_events = qry_prov.LinuxSyslog.all_syslog( start=session.StartTimeUtc, end=session.EndTimeUtc, host_name=session.Host) sudo_events = qry_prov.LinuxSyslog.sudo_activity( start=session.StartTimeUtc, end=session.EndTimeUtc, host_name=session.Host, user=session.Account) if isinstance(sudo_events, pd.DataFrame) and not sudo_events.empty: try: sudo_sessions = cluster_syslog_logons_df(logon_events=sudo_events) except MsticpyException: pass # Display summary of cron activity in session cron_events = qry_prov.LinuxSyslog.cron_activity( start=session.StartTimeUtc, end=session.EndTimeUtc, host_name=session.Host) if not isinstance(cron_events, pd.DataFrame) or cron_events.empty: md(f'<h3> No Cron activity for {session.Host} between {session.StartTimeUtc} and {session.EndTimeUtc}</h3>') else: cron_events['CMD'].replace('', np.nan, inplace=True) crn_tl_data = {"Cron Exections": {"data": cron_events[['TimeGenerated', 'CMD', 'CronUser', 'SyslogMessage']].dropna(), "source_columns": tooltip_cols, "color": "Blue"}, "Cron Edits": {"data": cron_events.loc[cron_events['SyslogMessage'].str.contains('EDIT')], "source_columns": tooltip_cols, "color": "Green"}} md('<h2> Most common commands run by cron:</h2>') md('This shows how often each cron job was exected within the specified time window') cron_commands = (cron_events[['EventTime', 'CMD']] .groupby(['CMD']).count() .dropna() .style .set_table_attributes('width=900px, text-align=center') .background_gradient(cmap='Reds', low=0.5, high=1) .format("{0:0>1.0f}")) display(cron_commands) # Display summary of user and group creations, deletions and modifications during the session user_activity = qry_prov.LinuxSyslog.user_group_activity( start=session.StartTimeUtc, end=session.EndTimeUtc, host_name=session.Host) if not isinstance(user_activity, pd.DataFrame) or user_activity.empty: md(f'<h3>No user or group moidifcations for {session.Host} between {session.StartTimeUtc} and {session.EndTimeUtc}></h3>') else: add_events = user_activity[user_activity['UserGroupAction'].str.contains( 'Add')] del_events = user_activity[user_activity['UserGroupAction'].str.contains( 'Delete')] mod_events = user_activity[user_activity['UserGroupAction'].str.contains( 'Modify')] user_activity['Count'] = user_activity.groupby('UserGroupAction')['UserGroupAction'].transform('count') if add_events.empty and del_events.empty and mod_events.empty: md('<h2> Users and groups added or deleted:</h2<>') md(f'No users or groups were added or deleted on {host_entity.HostName} between {query_times.start} and {query_times.end}') user_tl_data = {} else: md("<h2>Users added, modified or deleted</h2>") display(user_activity[['UserGroupAction','Count']].drop_duplicates().style.hide_index()) account_actions = pd.DataFrame({"User Additions": [add_events.replace("", np.nan).dropna(subset=['User'])['User'].unique().tolist()], "User Modifications": [mod_events.replace("", np.nan).dropna(subset=['User'])['User'].unique().tolist()], "User Deletions": [del_events.replace("", np.nan).dropna(subset=['User'])['User'].unique().tolist()]}) display(account_actions.style.hide_index()) user_tl_data = {"User adds": {"data": add_events, "source_columns": tooltip_cols, "color": "Orange"}, "User deletes": {"data": del_events, "source_columns": tooltip_cols, "color": "Red"}, "User modfications": {"data": mod_events, "source_columns": tooltip_cols, "color": "Grey"}} # Display sudo activity during session if not isinstance(sudo_sessions, pd.DataFrame) or sudo_sessions.empty: md(f"<h3>No Sudo sessions for {session.Host} between {logon_selection.value.get('Start (UTC)')} and {logon_selection.value.get('End (UTC)')}</h3>") sudo_tl_data = {} else: sudo_start = sudo_events[sudo_events["SyslogMessage"].str.contains( "pam_unix.+session opened")].rename(columns={"Sudoer": "User"}) sudo_tl_data = {"Host logons": {"data": remote_logons, "source_columns": tooltip_cols, "color": "Cyan"}, "Sudo sessions": {"data": sudo_start, "source_columns": tooltip_cols, "color": "Purple"}} try: risky_actions = cmd_line.risky_cmd_line(events=sudo_events, log_type="Syslog") suspicious_events = cmd_speed( cmd_events=sudo_events, time=60, events=2, cmd_field="Command") except: risky_actions = None suspicious_events = None if risky_actions is None and suspicious_events is None: pass else: risky_sessions = risky_sudo_sessions( risky_actions=risky_actions, sudo_sessions=sudo_sessions, suspicious_actions=suspicious_events) for key in risky_sessions: if key in sudo_sessions: sudo_sessions[f"{key} - {risky_sessions[key]}"] = sudo_sessions.pop( key) if isinstance(sudo_events, pd.DataFrame): sudo_events_val = sudo_events[['EventTime', 'CommandCall']][sudo_events['CommandCall']!=""].dropna(how='any', subset=['CommandCall']) if sudo_events_val.empty: md(f"No sucessful sudo activity for {hostname} between {logon_selection.value.get('Start (UTC)')} and {logon_selection.value.get('End (UTC)')}") else: sudo_events.replace("", np.nan, inplace=True) md('<h2> Frequency of sudo commands</h2>') md('This shows how many times each command has been run with sudo. /bin/bash is usally associated with the use of "sudo -i"') sudo_commands = (sudo_events[['EventTime', 'CommandCall']] .groupby(['CommandCall']) .count() .dropna() .style .set_table_attributes('width=900px, text-align=center') .background_gradient(cmap='Reds', low=.5, high=1) .format("{0:0>3.0f}")) display(sudo_commands) else: md(f"No sucessful sudo activity for {hostname} between {logon_selection.value.get('Start (UTC)')} and {logon_selection.value.get('End (UTC)')}") # Display a timeline of all activity during session crn_tl_data.update(user_tl_data) crn_tl_data.update(sudo_tl_data) if crn_tl_data: md('<h2> Session Timeline.</h2>') nbdisplay.display_timeline( data=crn_tl_data, title='Session Timeline', height=300) else: md("No logon sessions during this timeframe") # - # #### Raw data from user session # Use this syslog message data to further investigate suspicous activity during the session if isinstance(logon_sessions_df, pd.DataFrame) and not logon_sessions_df.empty: #Return syslog data and present it to the use for investigation session_syslog = qry_prov.LinuxSyslog.all_syslog( start=session.StartTimeUtc, end=session.EndTimeUtc, host_name=session.Host) if session_syslog.empty: display(HTML( f' No syslog for {session.Host} between {session.StartTimeUtc} and {session.EndTimeUtc}')) def view_sudo(selected_cmd): return [sudo_events.query('CommandCall == @selected_cmd')[ ['TimeGenerated', 'SyslogMessage', 'Sudoer', 'SudoTo', 'Command', 'CommandCall']]] # Show syslog messages associated with selected sudo command items = sudo_events['CommandCall'].dropna().unique().tolist() if items: md("<h3>View all messages associated with a sudo command</h3>") display(nbwidgets.SelectItem(item_list=items, action=view_sudo)) else: md("No logon sessions during this timeframe") if isinstance(logon_sessions_df, pd.DataFrame) and not logon_sessions_df.empty: # Display syslog messages from the session witht he facility selected items = syslog_events['Facility'].dropna().unique().tolist() md("<h3>View all messages associated with a syslog facility</h3>") display(nbwidgets.SelectItem(item_list=items, action=view_syslog)) else: md("No logon sessions during this timeframe") # ### Process Tree from session if isinstance(logon_sessions_df, pd.DataFrame) and not logon_sessions_df.empty: display(HTML("<h3>Process Trees from session</h3>")) print("Building process tree, this may take some time...") # Find the table with auditd data in regex = '.*audit.*\_cl?' matches = ((re.match(regex, key, re.IGNORECASE)) for key in qry_prov.schema) for match in matches: if match != None: audit_table = match.group(0) else: audit_table = None # Retrieve auditd data if audit_table: audit_data = qry_prov.LinuxAudit.auditd_all( start=session.StartTimeUtc, end=session.EndTimeUtc, host_name=hostname ) if isinstance(audit_data, pd.DataFrame) and not audit_data.empty: audit_events = auditdextract.extract_events_to_df( data=audit_data ) process_tree = auditdextract.generate_process_tree(audit_data=audit_events) process_tree.mp_process_tree.plot() else: display(HTML("No auditd data avaliable to build process tree")) else: display(HTML("No auditd data avaliable to build process tree")) else: md("No logon sessions during this timeframe") # Click [here](#app) to start a process/application focused hunt or continue with session based hunt below by selecting a sudo session to investigate. # ### Sudo Session Investigation # Sudo activity is often required by an attacker to conduct actions on target, and more granular data is avalibale for sudo sessions allowing for deeper level hunting within these sesions. if logon_sessions_df is not None and sudo_sessions is not None: sudo_items = sudo_sessions[['User','Start', 'End']].to_string(header=False, index=False, index_names=False).split('\n') sudo_sessions["Key"] = sudo_items sudo_sessions.set_index('Key', inplace=True) sudo_dict = sudo_sessions[['User','Start', 'End']].to_dict('index') sudo_selection = nbwidgets.SelectItem(description='Select sudo session to investigate: ', item_dict=sudo_dict, width='100%', height='300px', auto_display=True) else: sudo_selection = None md("No logon sessions during this timeframe") # + #Collect data associated with the sudo session selected sudo_events = None from msticpy.sectools.tiproviders.ti_provider_base import TISeverity def ti_check_sev(severity, threshold): severity = TISeverity.parse(severity) threshold = TISeverity.parse(threshold) return severity.value >= threshold.value if sudo_selection: sudo_sess = {'StartTimeUtc': sudo_selection.value.get('Start'), 'EndTimeUtc': sudo_selection.value.get( 'End'), 'Account': sudo_selection.value.get('User'), 'Host': hostname} sudo_session = entities.HostLogonSession(sudo_sess) sudo_events = qry_prov.LinuxSyslog.sudo_activity(start=sudo_session.StartTimeUtc.round( '-1s') - pd.Timedelta(seconds=1), end=(sudo_session.EndTimeUtc.round('1s')+ pd.Timedelta(seconds=1)), host_name=sudo_session.Host) if isinstance(sudo_events, pd.DataFrame) and not sudo_events.empty: display(sudo_events.replace('', np.nan).dropna(axis=0, subset=['Command'])[ ['TimeGenerated', 'Command', 'CommandCall', 'SyslogMessage']]) # Extract IOCs from the data ioc_extractor = iocextract.IoCExtract() os_family = host_entity.OSType if host_entity.OSType else 'Linux' print('Extracting IoCs.......') ioc_df = ioc_extractor.extract(data=sudo_events, columns=['SyslogMessage'], os_family=os_family, ioc_types=['ipv4', 'ipv6', 'dns', 'url', 'md5_hash', 'sha1_hash', 'sha256_hash']) if len(ioc_df) > 0: ioc_count = len( ioc_df[["IoCType", "Observable"]].drop_duplicates()) md(f"Found {ioc_count} IOCs") #Lookup the extracted IOCs in TI feed ti_resps = tilookup.lookup_iocs(data=ioc_df[["IoCType", "Observable"]].drop_duplicates( ).reset_index(), obs_col='Observable', ioc_type_col='IoCType') i = 0 ti_hits = [] ti_resps.reset_index(drop=True, inplace=True) while i < len(ti_resps): if ti_resps['Result'][i] == True and ti_check_sev(ti_resps['Severity'][i], 1): ti_hits.append(ti_resps['Ioc'][i]) i += 1 else: i += 1 md(f"Found {len(ti_hits)} IoCs in Threat Intelligence") for ioc in ti_hits: md(f"Messages containing IoC found in TI feed: {ioc}") display(sudo_events[sudo_events['SyslogMessage'].str.contains( ioc)][['TimeGenerated', 'SyslogMessage']]) else: md("No IoC patterns found in Syslog Messages.") else: md('No sudo messages for this session') else: md("No Sudo session to investigate") # - # Jump to: # - <a>Host Logon Events</a> # - <a>Application Activity</a> # - <a>Network Activity</a> # <a></a> # ## User Activity # **Hypothesis:** That an attacker has gained access to the host and is using a user account to conduct actions on the host. # # This section provides an overview of activity by user within our hunting time frame, the purpose of this is to allow for the identification of anomalous activity by a user. This hunt can be driven be investigation of suspected users or as a hunt across all users seen on the host. # + # Get list of users with logon or sudo sessions on host logon_events = qry_prov.LinuxSyslog.user_logon(query_times, host_name=hostname) users = logon_events['User'].replace('', np.nan).dropna().unique().tolist() all_users = list(users) if isinstance(sudo_events, pd.DataFrame) and not sudo_events.empty: sudoers = sudo_events['Sudoer'].replace( '', np.nan).dropna().unique().tolist() all_users.extend(x for x in sudoers if x not in all_users) # Pick Users if not logon_events.empty: user_select = nbwidgets.SelectItem(description='Select user to investigate: ', item_list=all_users, width='75%', auto_display=True) else: md("There was no user activity in the timeframe specified.") user_select = None # + folium_user_map = FoliumMap() def view_sudo(cmd): return [user_sudo_hold.query('CommandCall == @cmd')[ ['TimeGenerated', 'HostName', 'Command', 'CommandCall', 'SyslogMessage']]] user_sudo_hold = None if user_select is not None: # Get all syslog relating to these users username = user_select.value user_events = all_syslog_data[all_syslog_data['SyslogMessage'].str.contains(username)] logon_sessions = cluster_syslog_logons_df(logon_events) # Display all logons associated with the user md(f"<h1> User Logon Activity for {username}</h1>") user_logon_events = logon_events[logon_events['User'] == username] try: user_logon_sessions = cluster_syslog_logons_df(user_logon_events) except: user_logon_sessions = None user_remote_logons = ( user_logon_events[user_logon_events['LogonResult'] == 'Success'] ) user_failed_logons = ( user_logon_events[user_logon_events['LogonResult'] == 'Failure'] ) if not user_remote_logons.empty: for _, row in logon_sessions_df.iterrows(): end = row['End'] user_sudo_events = qry_prov.LinuxSyslog.sudo_activity(start=user_remote_logons.sort_values( by='TimeGenerated')['TimeGenerated'].iloc[0], end=end, host_name=hostname, user=username) else: user_sudo_events = None if user_logon_sessions is None and user_remote_logons.empty and user_failed_logons.empty: pass else: display(HTML( f"{len(user_remote_logons)} sucessfull logons and {len(user_failed_logons)} failed logons for {username}")) display(Markdown('### Timeline of host logon attempts.')) tooltip_cols = ['SyslogMessage'] dfs = {"User Logons" :user_remote_logons, "Failed Logons": user_failed_logons, "Sudo Events" :user_sudo_events} user_tl_data = {} for k,v in dfs.items(): if v is not None and not v.empty: user_tl_data.update({k :{"data":v,"source_columns":tooltip_cols}}) nbdisplay.display_timeline( data=user_tl_data, title="User logon timeline", height=300) all_user_df = pd.DataFrame(dict(successful= user_remote_logons['ProcessName'].value_counts(), failed = user_failed_logons['ProcessName'].value_counts())).fillna(0) processes = all_user_df.index.values.tolist() results = all_user_df.columns.values.tolist() user_fail_sucess_data = {'processes' :processes, 'sucess' : all_user_df['successful'].values.tolist(), 'failure': all_user_df['failed'].values.tolist()} palette = viridis(2) x = [ (process, result) for process in processes for result in results ] counts = sum(zip(user_fail_sucess_data['sucess'], fail_sucess_data['failure']), ()) source = ColumnDataSource(data=dict(x=x, counts=counts)) b = figure(x_range=FactorRange(*x), plot_height=350, plot_width=450, title="Failed and Sucessful logon attempts by process", toolbar_location=None, tools="", y_minor_ticks=2) b.vbar(x='x', top='counts', width=0.9, source=source, line_color="white", fill_color=factor_cmap('x', palette=palette, factors=results, start=1, end=2)) b.y_range.start = 0 b.x_range.range_padding = 0.1 b.xaxis.major_label_orientation = 1 b.xgrid.grid_line_color = None user_logons = pd.DataFrame({"Sucessful Logons" : [int(all_user_df['successful'].sum())], "Failed Logons" : [int(all_user_df['failed'].sum())]}).T user_logon_data = pd.value_counts(user_logon_events['LogonResult'].values, sort=True).head(10).reset_index(name='value').rename(columns={'User':'Count'}) user_logon_data = user_logon_data[user_logon_data['index']!="Unknown"].copy() user_logon_data['angle'] = user_logon_data['value']/user_logon_data['value'].sum() * 2*pi user_logon_data['color'] = viridis(len(user_logon_data)) p = figure(plot_height=350, plot_width=450, title="Relative Frequencies of Failed Logons by Account", toolbar_location=None, tools="hover", tooltips="@index: @value") p.axis.visible = False p.xgrid.visible = False p.ygrid.visible = False p.wedge(x=0, y=1, radius=0.5, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color="white", fill_color='color', legend='index', source=user_logon_data) show(Row(p,b)) user_ip_list = [convert_to_ip_entities(i)[0] for i in user_remote_logons['SourceIP']] user_ip_fail_list = [convert_to_ip_entities(i)[0] for i in user_failed_logons['SourceIP']] user_location = get_map_center(ip_list + ip_fail_list) user_folium_map = FoliumMap(location = location, zoom_start=1.4) #Map logon locations to allow for identification of anomolous locations if len(ip_fail_list) > 0: md('<h3>Map of Originating Location of Logon Attempts</h3>') icon_props = {'color': 'red'} user_folium_map.add_ip_cluster(ip_entities=user_ip_fail_list, **icon_props) if len(ip_list) > 0: icon_props = {'color': 'green'} user_folium_map.add_ip_cluster(ip_entities=user_ip_list, **icon_props) display(user_folium_map.folium_map) md('<p style="color:red">Warning: the folium mapping library ' 'does not display correctly in some browsers.</p><br>' 'If you see a blank image please retry with a different browser.') #Display sudo activity of the user if not isinstance(user_sudo_events, pd.DataFrame) or user_sudo_events.empty: md(f"<h3>No sucessful sudo activity for {username}</h3>") else: user_sudo_hold = user_sudo_events user_sudo_commands = (user_sudo_events[['EventTime', 'CommandCall']].replace('', np.nan).groupby(['CommandCall']).count().dropna().style.set_table_attributes('width=900px, text-align=center').background_gradient(cmap='Reds', low=.5, high=1).format("{0:0>3.0f}")) display(user_sudo_commands) md("Select a sudo command to investigate in more detail") display(nbwidgets.SelectItem(item_list=items, action=view_sudo)) else: md("No user session selected") # - # If the user has sudo activity extract and IOCs from the logs and look them up in TI feeds if not isinstance(user_sudo_hold, pd.DataFrame) or user_sudo_hold.empty: md(f"No sudo messages data") else: # Extract IOCs ioc_extractor = iocextract.IoCExtract() os_family = host_entity.OSType if host_entity.OSType else 'Linux' print('Extracting IoCs.......') ioc_df = ioc_extractor.extract(data=user_sudo_hold, columns=['SyslogMessage'], ioc_types=['ipv4', 'ipv6', 'dns', 'url', 'md5_hash', 'sha1_hash', 'sha256_hash']) if len(ioc_df) > 0: ioc_count = len(ioc_df[["IoCType", "Observable"]].drop_duplicates()) md(f"Found {ioc_count} IOCs") ti_resps = tilookup.lookup_iocs(data=ioc_df[["IoCType", "Observable"]].drop_duplicates( ).reset_index(), obs_col='Observable', ioc_type_col='IoCType') i = 0 ti_hits = [] ti_resps.reset_index(drop=True, inplace=True) while i < len(ti_resps): if ti_resps['Result'][i] == True and ti_check_sev(ti_resps['Severity'][i], 1): ti_hits.append(ti_resps['Ioc'][i]) i += 1 else: i += 1 md(f"Found {len(ti_hits)} IoCs in Threat Intelligence") for ioc in ti_hits: md(f"Messages containing IoC found in TI feed: {ioc}") display(user_sudo_hold[user_sudo_hold['SyslogMessage'].str.contains( ioc)][['TimeGenerated', 'SyslogMessage']]) else: md("No IoC patterns found in Syslog Message.") # Jump to: # - <a>Host Logon Events</a> # - <a>User Activity</a> # - <a>Network Activity</a> # <a></a> # ## Application Activity # # **Hypothesis:** That an attacker has compromised an application running on the host and is using the applications process to conduct actions on the host. # # This section provides an overview of activity by application within our hunting time frame, the purpose of this is to allow for the identification of anomalous activity by an application. This hunt can be driven be investigation of suspected applications or as a hunt across all users seen on the host. # Get list of Applications apps = all_syslog_data['ProcessName'].replace('', np.nan).dropna().unique().tolist() system_apps = ['sudo', 'CRON', 'systemd-resolved', 'snapd', '50-motd-news', 'systemd-logind', 'dbus-deamon', 'crontab'] if len(host_entity.Applications) > 0: installed_apps = [] installed_apps.extend(x for x in apps if x not in system_apps) # Pick Applications app_select = nbwidgets.SelectItem(description='Select sudo session to investigate: ', item_list=installed_apps, width='75%', auto_display=True) else: display(HTML("No applications other than stand OS applications present")) # + # Get all syslog relating to these Applications app = app_select.value app_data = all_syslog_data[all_syslog_data['ProcessName'] == app].copy() # App log volume over time if isinstance(app_data, pd.DataFrame) and not app_data.empty: app_data_volume = app_data.set_index( "TimeGenerated").resample('5T').count() app_data_volume.reset_index(level=0, inplace=True) app_data_volume.rename(columns={"TenantId" : "NoOfLogMessages"}, inplace=True) nbdisplay.display_timeline_values(data=app_data_volume, y='NoOfLogMessages', source_columns=['NoOfLogMessages'], title=f"{app} log volume over time") app_high_sev = app_data[app_data['SeverityLevel'].isin( ['emerg', 'alert', 'crit', 'err', 'warning'])] if isinstance(app_high_sev, pd.DataFrame) and not app_high_sev.empty: app_hs_volume = app_high_sev.set_index( "TimeGenerated").resample('5T').count() app_hs_volume.reset_index(level=0, inplace=True) app_hs_volume.rename(columns={"TenantId" : "NoOfLogMessages"}, inplace=True) nbdisplay.display_timeline_values(data=app_hs_volume, y='NoOfLogMessages', source_columns=['NoOfLogMessages'], title=f"{app} high severity log volume over time") risky_messages = risky_cmd_line(events=app_data, log_type="Syslog", cmd_field="SyslogMessage") if risky_messages: print(risky_messages) # - # ### Display process tree # Due to the large volume of data involved you may wish to make you query window smaller # + if rel_alert_select is None or rel_alert_select.selected_alert is None: start = query_times.start else: start = rel_alert_select.selected_alert['TimeGenerated'] # Set new investigation time windows based on the selected alert proc_invest_times = nbwidgets.QueryTime(units='hours', max_before=6, max_after=3, before=2, origin_time=start) proc_invest_times.display() # + audit_table = None app_audit_data = None app = app_select.value process_tree_data = None regex = '.*audit.*\_cl?' # Find the table with auditd data in and collect the data matches = ((re.match(regex, key, re.IGNORECASE)) for key in qry_prov.schema) for match in matches: if match != None: audit_table = match.group(0) #Check if the amount of data expected to be returned is a reasonable size, if not prompt before continuing if audit_table != None: if isinstance(app_audit_data, pd.DataFrame): pass else: print('Collecting audit data, please wait this may take some time....') app_audit_query_count = f"""{audit_table} | where TimeGenerated >= datetime({proc_invest_times.start}) | where TimeGenerated <= datetime({proc_invest_times.end}) | where Computer == '{hostname}' | summarize count() """ count_check = qry_prov.exec_query(query=app_audit_query_count) if count_check['count_'].iloc[0] > 100000 and not count_check.empty: size = count_check['count_'].iloc[0] print( f"You are returning a very large dataset ({size} rows).", "It is reccomended that you consider scoping the size\n", "of your query down.\n", "Are you sure you want to proceed?" ) response = (input("Y/N") or "N") if ( (count_check['count_'].iloc[0] < 100000) or (count_check['count_'].iloc[0] > 100000 and response.casefold().startswith("y")) ): print("querying audit data...") audit_data = qry_prov.LinuxAudit.auditd_all( start=proc_invest_times.start, end=proc_invest_times.end, host_name=hostname ) if isinstance(audit_data, pd.DataFrame) and not audit_data.empty: print("building process tree...") audit_events = auditdextract.extract_events_to_df( data=audit_data ) process_tree_data = auditdextract.generate_process_tree(audit_data=audit_events) plot_lim = 1000 if len(process_tree) > plot_lim: md_warn(f"More than {plot_lim} processes to plot, limiting to top {plot_lim}.") process_tree[:plot_lim].mp_process_tree.plot(legend_col="exe") else: process_tree.mp_process_tree.plot(legend_col="exe") size = audit_events.size print(f"Collected {size} rows of data") else: md("No audit events avalaible") else: print("Resize query window") else: md("No audit events avalaible") # - md(f"<h3>Process tree for {app}</h3>") if process_tree_data is not None: process_tree_df = process_tree_data[process_tree_data["exe"].str.contains(app, na=False)].copy() if not process_tree_df.empty: app_roots = process_tree_data.apply(lambda x: ptree.get_root(process_tree_data, x), axis=1) trees = [] for root in app_roots["source_index"].unique(): trees.append(process_tree_data[process_tree_data["path"].str.startswith(root)]) app_proc_trees = pd.concat(trees) app_proc_trees.mp_process_tree.plot(legend_col="exe", show_table=True) else: display(f"No process tree data avaliable for {app}") process_tree = None else: md("No data avaliable to build process tree") # ### Application Logs with associated Threat Intelligence # These logs are associated with the process being investigated and include IOCs that appear in our TI feeds. # + # Extract IOCs from syslog assocated with the selected process ioc_extractor = iocextract.IoCExtract() os_family = host_entity.OSType if host_entity.OSType else 'Linux' md('Extracting IoCs...') ioc_df = ioc_extractor.extract(data=app_data, columns=['SyslogMessage'], ioc_types=['ipv4', 'ipv6', 'dns', 'url', 'md5_hash', 'sha1_hash', 'sha256_hash']) if process_tree_data is not None and not process_tree_data.empty: app_process_tree = app_proc_trees.dropna(subset=['cmdline']) audit_ioc_df = ioc_extractor.extract(data=app_process_tree, columns=['cmdline'], ioc_types=['ipv4', 'ipv6', 'dns', 'url', 'md5_hash', 'sha1_hash', 'sha256_hash']) ioc_df = ioc_df.append(audit_ioc_df) # Look up IOCs in TI feeds if len(ioc_df) > 0: ioc_count = len(ioc_df[["IoCType", "Observable"]].drop_duplicates()) md(f"Found {ioc_count} IOCs") md("Looking up threat intel...") ti_resps = tilookup.lookup_iocs(data=ioc_df[[ "IoCType", "Observable"]].drop_duplicates().reset_index(drop=True), obs_col='Observable') i = 0 ti_hits = [] ti_resps.reset_index(drop=True, inplace=True) while i < len(ti_resps): if ti_resps['Result'][i] == True and ti_check_sev(ti_resps['Severity'][i], 1): ti_hits.append(ti_resps['Ioc'][i]) i += 1 else: i += 1 display(HTML(f"Found {len(ti_hits)} IoCs in Threat Intelligence")) for ioc in ti_hits: display(HTML(f"Messages containing IoC found in TI feed: {ioc}")) display(app_data[app_data['SyslogMessage'].str.contains( ioc)][['TimeGenerated', 'SyslogMessage']]) else: md("<h3>No IoC patterns found in Syslog Message.</h3>") # - # Jump to: # - <a>Host Logon Events</a> # - <a>User Activity</a> # - <a>Application Activity</a> # ## Network Activity # **Hypothesis:** That an attacker is remotely communicating with the host in order to compromise the host or for C2 or data exfiltration purposes after compromising the host. # # This section provides an overview of network activity to and from the host during hunting time frame, the purpose of this is to allow for the identification of anomalous network traffic. If you wish to investigate a specific IP in detail it is recommended that you use the IP Explorer Notebook (include link). # + # Get list of IPs from Syslog and Azure Network Data ioc_extractor = iocextract.IoCExtract() os_family = host_entity.OSType if host_entity.OSType else 'Linux' print('Finding IP Addresses this may take a few minutes.......') syslog_ips = ioc_extractor.extract(data=all_syslog_data, columns=['SyslogMessage'], ioc_types=['ipv4', 'ipv6']) if 'AzureNetworkAnalytics_CL' not in qry_prov.schema: az_net_comms_df = None az_ips = None else: if hasattr(host_entity, 'private_ips') and hasattr(host_entity, 'public_ips'): all_host_ips = host_entity.private_ips + \ host_entity.public_ips + [host_entity.IPAddress] else: all_host_ips = [host_entity.IPAddress] host_ips = {'\'{}\''.format(i.Address) for i in all_host_ips} host_ip_list = ','.join(host_ips) az_ip_where = f"""| where (VMIPAddress in ("{host_ip_list}") or SrcIP in ("{host_ip_list}") or DestIP in ("{host_ip_list}")) and (AllowedOutFlows > 0 or AllowedInFlows > 0)""" az_net_comms_df = qry_prov.AzureNetwork.az_net_analytics( start=query_times.start, end=query_times.end, host_name=hostname, where_clause=az_ip_where) if isinstance(az_net_comms_df, pd.DataFrame) and not az_net_comms_df.empty: az_ips = az_net_comms_df.query("PublicIPs != @host_entity.IPAddress") else: az_ips = None if len(syslog_ips): IPs = syslog_ips[['IoCType', 'Observable']].drop_duplicates('Observable') display(f"Found {len(IPs)} IP Addresses assoicated with the host") else: md("### No IoC patterns found in Syslog Message.") if az_ips is not None: ips = az_ips['PublicIps'].drop_duplicates( ) + syslog_ips['Observable'].drop_duplicates() else: ips = syslog_ips['Observable'].drop_duplicates() if isinstance(az_net_comms_df, pd.DataFrame) and not az_net_comms_df.empty: import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") az_net_comms_df['TotalAllowedFlows'] = az_net_comms_df['AllowedOutFlows'] + \ az_net_comms_df['AllowedInFlows'] sns.catplot(x="L7Protocol", y="TotalAllowedFlows", col="FlowDirection", data=az_net_comms_df) sns.relplot(x="FlowStartTime", y="TotalAllowedFlows", col="FlowDirection", kind="line", hue="L7Protocol", data=az_net_comms_df).set_xticklabels(rotation=50) nbdisplay.display_timeline(data=az_net_comms_df.query('AllowedOutFlows > 0'), overlay_data=az_net_comms_df.query( 'AllowedInFlows > 0'), title='Network Flows (out=blue, in=green)', time_column='FlowStartTime', source_columns=[ 'FlowType', 'AllExtIPs', 'L7Protocol', 'FlowDirection'], height=300) else: md('<h3>No Azure network data for specified time range.</h3>') # - # ### Choose ASNs/IPs to Check for Threat Intel Reports # Choose from the list of Selected ASNs for the IPs you wish to check on. Then select the IP(s) that you wish to check against Threat Intelligence data. # The Source list is populated with all ASNs found in the syslog and network flow data. # + #Lookup each IP in whois data and extract the ASN @lru_cache(maxsize=1024) def whois_desc(ip_lookup, progress=False): try: ip = ip_address(ip_lookup) except ValueError: return "Not an IP Address" if ip.is_private: return "private address" if not ip.is_global: return "other address" whois = IPWhois(ip) whois_result = whois.lookup_whois() if progress: print(".", end="") return whois_result["asn_description"] # Summarise network data by ASN ASN_List = [] print("WhoIs Lookups") ASNs = ips.apply(lambda x: whois_desc(x, True)) IP_ASN = pd.DataFrame(dict(IPs=ips, ASN=ASNs)).reset_index() x = IP_ASN.groupby(["ASN"]).count().drop( 'index', axis=1).sort_values('IPs', ascending=False) display(x) ASN_List = x.index # Select an ASN to investigate in more detail selection = widgets.SelectMultiple( options=ASN_List, width=900, description='Select ASN to investigate', disabled=False ) display(selection) # + # For every IP associated with the selected ASN look them up in TI feeds ip_invest_list = None ip_selection = None for ASN in selection.value: if ip_invest_list is None: ip_invest_list = (IP_ASN[IP_ASN["ASN"] == ASN]['IPs'].tolist()) else: ip_invest_list + (IP_ASN[IP_ASN["ASN"] == ASN]['IPs'].tolist()) if ip_invest_list is not None: ioc_ip_list = [] if len(ip_invest_list) > 0: ti_resps = tilookup.lookup_iocs(data=ip_invest_list, providers=["OTX"]) i = 0 ti_hits = [] while i < len(ti_resps): if ti_resps['Details'][i]['pulse_count'] > 0: ti_hits.append(ti_resps['Ioc'][i]) i += 1 else: i += 1 display(HTML(f"Found {len(ti_hits)} IoCs in Threat Intelligence")) for ioc in ti_hits: ioc_ip_list.append(ioc) #Show IPs found in TI feeds for further investigation if len(ioc_ip_list) > 0: display(HTML("Select an IP whcih appeared in TI to investigate further")) ip_selection = nbwidgets.SelectItem(description='Select IP Address to investigate: ', item_list = ioc_ip_list, width='95%', auto_display=True) else: md("No IPs to investigate") # - # Get all syslog for the IPs if ip_selection is not None: display(HTML("Syslog data associated with this IP Address")) sys_hits = all_syslog_data[all_syslog_data['SyslogMessage'].str.contains( ip_selection.value)] display(sys_hits) os_family = host_entity.OSType if host_entity.OSType else 'Linux' display(HTML("TI result for this IP Address")) display(ti_resps[ti_resps['Ioc'] == ip_selection.value]) else: md("No IP address selected") # ## Configuration # # ### `msticpyconfig.yaml` configuration File # You can configure primary and secondary TI providers and any required parameters in the `msticpyconfig.yaml` file. This is read from the current directory or you can set an environment variable (`MSTICPYCONFIG`) pointing to its location. # # To configure this file see the [ConfigureNotebookEnvironment notebook](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/ConfiguringNotebookEnvironment.ipynb)
Entity Explorer - Linux Host.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..', '..')) # teslakit from teslakit.database import Database # - # ### Generate database - climate change: intermediate SLR scenario (S2, +1m) and future ENSO probability # # + # -------------------------------------- # Teslakit database p_data = r'/media/administrador/HD/Dropbox/Guam/teslakit/data' # p_data=r'/Users/laurac/Dropbox/Guam/teslakit/data' db = Database(p_data) # make new database for climate change - S4 db.MakeNewSite('GUAM_CC_S5') # -
notebooks/GUAM/GUAM/03_ClimateChange/S5_SLR_ENSO/00_Generate_Database.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import time import pickle #Load population dataset Population2011FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Population_dataset/ACS_11_5YR_B01003/ACS_11_5YR_B01003_with_ann.csv" Population2011Df = pd.read_csv(Population2011FileLoc) Population2012FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Population_dataset/ACS_12_5YR_B01003/ACS_12_5YR_B01003_with_ann.csv" Population2012Df = pd.read_csv(Population2012FileLoc) Population2013FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Population_dataset/ACS_13_5YR_B01003/ACS_13_5YR_B01003_with_ann.csv" Population2013Df = pd.read_csv(Population2013FileLoc) Population2014FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Population_dataset/ACS_14_5YR_B01003/ACS_14_5YR_B01003_with_ann.csv" Population2014Df = pd.read_csv(Population2014FileLoc) Population2015FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Population_dataset/ACS_15_5YR_B01003/ACS_15_5YR_B01003_with_ann.csv" Population2015Df = pd.read_csv(Population2015FileLoc) Population2016FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Population_dataset/ACS_16_5YR_B01003/ACS_16_5YR_B01003_with_ann.csv" Population2016Df = pd.read_csv(Population2016FileLoc) # + # Add population data to demographics dataframe #Get zipcodes to use zipcodes = Population2011Df.loc[:,'GEO.id2'] #Initialize name, zip codes and start year columns to demographics dataframe NanDfValues = np.zeros([np.shape(zipcodes)[0]*6,3]) NanDfValues[:] = np.nan DemographicsDf = pd.DataFrame(NanDfValues,columns=['zip','year','population']) #Add population 2011 data for i in range(0,np.shape(zipcodes)[0]): zipcode2011 = Population2011Df[Population2011Df['GEO.id2'] == zipcodes[i]] if zipcode2011.shape[0] > 0: index = i DemographicsDf.loc[index,'zip'] = zipcode2011['GEO.id2'].iloc[0] DemographicsDf.loc[index,'year'] = 2011 DemographicsDf.loc[index,'population'] = zipcode2011['HD01_VD01'].iloc[0] #Add population 2012 data for i in range(0,np.shape(zipcodes)[0]): zipcode2012 = Population2012Df[Population2012Df['GEO.id2'] == zipcodes[i]] if zipcode2012.shape[0] > 0: index = i + np.shape(zipcodes)[0] DemographicsDf.loc[index,'zip'] = zipcode2012['GEO.id2'].iloc[0] DemographicsDf.loc[index,'year'] = 2012 DemographicsDf.loc[index,'population'] = zipcode2012['HD01_VD01'].iloc[0] #Add population 2013 data for i in range(0,np.shape(zipcodes)[0]): zipcode2013 = Population2013Df[Population2013Df['GEO.id2'] == zipcodes[i]] if zipcode2013.shape[0] > 0: index = i + np.shape(zipcodes)[0]*2 DemographicsDf.loc[index,'zip'] = zipcode2013['GEO.id2'].iloc[0] DemographicsDf.loc[index,'year'] = 2013 DemographicsDf.loc[index,'population'] = zipcode2013['HD01_VD01'].iloc[0] #Add population 2014 data for i in range(0,np.shape(zipcodes)[0]): zipcode2014 = Population2014Df[Population2014Df['GEO.id2'] == zipcodes[i]] if zipcode2014.shape[0] > 0: index = i + np.shape(zipcodes)[0]*3 DemographicsDf.loc[index,'zip'] = zipcode2014['GEO.id2'].iloc[0] DemographicsDf.loc[index,'year'] = 2014 DemographicsDf.loc[index,'population'] = zipcode2014['HD01_VD01'].iloc[0] #Add population 2015 data for i in range(0,np.shape(zipcodes)[0]): zipcode2015 = Population2015Df[Population2015Df['GEO.id2'] == zipcodes[i]] if zipcode2015.shape[0] > 0: index = i + np.shape(zipcodes)[0]*4 DemographicsDf.loc[index,'zip'] = zipcode2015['GEO.id2'].iloc[0] DemographicsDf.loc[index,'year'] = 2015 DemographicsDf.loc[index,'population'] = zipcode2015['HD01_VD01'].iloc[0] #Add population 2016 data for i in range(0,np.shape(zipcodes)[0]): zipcode2016 = Population2016Df[Population2016Df['GEO.id2'] == zipcodes[i]] if zipcode2016.shape[0] > 0: index = i + np.shape(zipcodes)[0]*5 DemographicsDf.loc[index,'zip'] = zipcode2016['GEO.id2'].iloc[0] DemographicsDf.loc[index,'year'] = 2016 DemographicsDf.loc[index,'population'] = zipcode2016['HD01_VD01'].iloc[0] # - #Load Gender dataset Gender2011FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Gender_dataset/ACS_11_5YR_S0101/ACS_11_5YR_S0101_with_ann.csv" Gender2011Df = pd.read_csv(Gender2011FileLoc) Gender2012FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Gender_dataset/ACS_12_5YR_S0101/ACS_12_5YR_S0101_with_ann.csv" Gender2012Df = pd.read_csv(Gender2012FileLoc) Gender2013FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Gender_dataset/ACS_13_5YR_S0101/ACS_13_5YR_S0101_with_ann.csv" Gender2013Df = pd.read_csv(Gender2013FileLoc) Gender2014FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Gender_dataset/ACS_14_5YR_S0101/ACS_14_5YR_S0101_with_ann.csv" Gender2014Df = pd.read_csv(Gender2014FileLoc) Gender2015FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Gender_dataset/ACS_15_5YR_S0101/ACS_15_5YR_S0101_with_ann.csv" Gender2015Df = pd.read_csv(Gender2015FileLoc) Gender2016FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Gender_dataset/ACS_16_5YR_S0101/ACS_16_5YR_S0101_with_ann.csv" Gender2016Df = pd.read_csv(Gender2016FileLoc) #DemographicsDf2 = DemographicsDf DemographicsDf = DemographicsDf2 # + # Add gender data to demographics dataframe #Get zipcodes to use zipcodes = Population2011Df.loc[:,'GEO.id2'] #Initialize female ratio column to demographics dataframe NanDfValues = np.zeros([np.shape(zipcodes)[0]*6,1]) NanDfValues[:] = np.nan GenderDf = pd.DataFrame(NanDfValues,columns=['FemaleRatio']) #Add to demographics dataframe DemographicsDf = pd.concat([DemographicsDf, GenderDf], axis=1) #Add gender data for i in range(0,np.shape(zipcodes)[0]*6): zipcodeyear = DemographicsDf.iloc[i,:] zipcode = zipcodeyear['zip'] year = zipcodeyear['year'] # find gender dataframe by year GenderDf = [] if year == 2011: GenderDf = Gender2011Df elif year == 2012: GenderDf = Gender2012Df elif year == 2013: GenderDf = Gender2013Df elif year == 2014: GenderDf = Gender2014Df elif year == 2015: GenderDf = Gender2015Df elif year == 2016: GenderDf = Gender2016Df #Get gender ratio and convert to percent GenderRow = GenderDf[GenderDf['GEO.id2'] == zipcode] MaleRatio = GenderRow['HC01_EST_VC36'].iloc[0] if MaleRatio.replace('.','',1).isdigit(): MaleRatio = np.float32(MaleRatio) FemaleRatio = 10000/MaleRatio #FemalePercent = 100/(GenderRatio+100) DemographicsDf.iloc[i,3] = FemaleRatio # - #Load Income dataset Income2011FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Income_dataset/ACS_11_5YR_S1901/ACS_11_5YR_S1901_with_ann.csv" Income2011Df = pd.read_csv(Income2011FileLoc) Income2012FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Income_dataset/ACS_12_5YR_S1901/ACS_12_5YR_S1901_with_ann.csv" Income2012Df = pd.read_csv(Income2012FileLoc) Income2013FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Income_dataset/ACS_13_5YR_S1901/ACS_13_5YR_S1901_with_ann.csv" Income2013Df = pd.read_csv(Income2013FileLoc) Income2014FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Income_dataset/ACS_14_5YR_S1901/ACS_14_5YR_S1901_with_ann.csv" Income2014Df = pd.read_csv(Income2014FileLoc) Income2015FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Income_dataset/ACS_15_5YR_S1901/ACS_15_5YR_S1901_with_ann.csv" Income2015Df = pd.read_csv(Income2015FileLoc) Income2016FileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Income_dataset/ACS_16_5YR_S1901/ACS_16_5YR_S1901_with_ann.csv" Income2016Df = pd.read_csv(Income2016FileLoc) #DemographicsDf2 = DemographicsDf DemographicsDf = DemographicsDf2 # + #Add Income data to demographics dataframe #Get zipcodes to use zipcodes = Population2011Df.loc[:,'GEO.id2'] #Initialize median income year columns to demographics dataframe NanDfValues = np.zeros([np.shape(zipcodes)[0]*6,1]) NanDfValues[:] = np.nan NanDf = pd.DataFrame(NanDfValues,columns=['Income']) #Add to demographics dataframe DemographicsDf = pd.concat([DemographicsDf, NanDf], axis=1) #Add income data for i in range(0,np.shape(zipcodes)[0]*6): zipcodeyear = DemographicsDf.iloc[i,:] zipcode = zipcodeyear['zip'] year = zipcodeyear['year'] # find income dataframe by year IncomeDf = [] if year == 2011: IncomeDf = Income2011Df elif year == 2012: IncomeDf = Income2012Df elif year == 2013: IncomeDf = Income2013Df elif year == 2014: IncomeDf = Income2014Df elif year == 2015: IncomeDf = Income2015Df elif year == 2016: IncomeDf = Income2016Df #Get gender ratio and convert to percent IncomeRow = IncomeDf[IncomeDf['GEO.id2'] == zipcode] Income = IncomeRow['HC01_EST_VC13'].iloc[0] if Income.replace('.','',1).isdigit(): DemographicsDf.iloc[i,4] = Income # - DemographicsDf.tail(5) # + import pickle f = open('/home/henry/Insight/Yogee/Datasets/Demographics_dataset/DemographicsDf.pckl', 'wb') pickle.dump(DemographicsDf, f) f.close() # + import pickle f = open('/home/henry/Insight/Yogee/Datasets/Demographics_dataset/DemographicsDf.pckl', 'rb') DemographicsDf = pickle.load(f) f.close() # - #Load land area dataset LandAreaFileLoc = "/home/henry/Insight/Yogee/Datasets/NY_Land_Area/NY_Land_Area.csv" LandArea2010Df = pd.read_csv(LandAreaFileLoc) # + #Add land area data to demographics dataframe #Get zipcodes to use zipcodes = Population2011Df.loc[:,'GEO.id2'] #Initialize land area and population density columns to demographics dataframe NanDfValues = np.zeros([np.shape(zipcodes)[0]*6,2]) NanDfValues[:] = np.nan NanDf = pd.DataFrame(NanDfValues,columns=['LandArea','PopDensity']) #Add to demographics dataframe DemographicsDf = pd.concat([DemographicsDf, NanDf], axis=1) #Add land area data for i in range(0,np.shape(zipcodes)[0]*6): zipcodeyear = DemographicsDf.iloc[i,:] zipcode = zipcodeyear['zip'] #Get land area and population density LandAreaRow = LandArea2010Df[LandArea2010Df['GEO.id2'] == zipcode] LandArea = LandAreaRow['Land_area'].iloc[0] if isinstance(LandArea, (int, np.integer)): DemographicsDf.iloc[i,5] = LandArea PopDensity = DemographicsDf.iloc[i,2]/LandArea DemographicsDf.iloc[i,6] = PopDensity # + import pickle f = open('/home/henry/Insight/Yogee/Datasets/Demographics_dataset/DemographicsDf.pckl', 'wb') pickle.dump(DemographicsDf, f) f.close() # + import pickle f = open('/home/henry/Insight/Yogee/Datasets/Demographics_dataset/DemographicsDf.pckl', 'rb') DemographicsDf = pickle.load(f) f.close() # - LandAreaDf.head(6)
Demographics dataframe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Gradient filters # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/YoniChechik/AI_is_Math/blob/master/c_03_edge_detection/gradient_filters.ipynb) # # # + # to run in google colab import sys if 'google.colab' in sys.modules: import subprocess subprocess.call('apt-get install subversion'.split()) subprocess.call('svn export https://github.com/YoniChechik/AI_is_Math/trunk/c_03_edge_detection/Bikesgray.jpg'.split()) # + import numpy as np import cv2 from matplotlib import pyplot as plt figsize = (10,10) # - # ## Original image # # + img = cv2.imread("Bikesgray.jpg") img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) plt.figure(figsize=figsize) plt.imshow(img, cmap='gray', vmin=0, vmax=255) plt.title('Original image') # - # ## X absolute grad filter # # + img = img.astype(float) # 'uint8' doesn't work with minus sign - for filtering # 1. cv2.filter2D is working with corelation rether than convolution # no need to flip the kernel # 2. Notice that kernel is 2D array - if 1d than we will get a column vector convolution kernel = np.array([[-1, 0, +1]]) dst = cv2.filter2D(img, -1, kernel) plt.figure(figsize=figsize) plt.imshow(np.abs(dst), cmap='gray') plt.colorbar() plt.title('$f\'_x$: image filtered with '+str(kernel)) print("kernel shape is "+str(kernel.shape)) # - # ## Y grad filter (no abs) # # + kernel = np.array([[-1, 0, +1]]).T dst = cv2.filter2D(img, -1, kernel) plt.figure(figsize=figsize) plt.imshow(dst, cmap='gray') plt.colorbar() plt.title('$f\'_y$: image filtered with\n '+str(kernel)) # - # ## Comparison of x gradient filters # # + plt.rcParams['figure.figsize'] = [20, 20] plt.subplot(4, 2, 1) plt.imshow(img, cmap='gray', vmin=0, vmax=255) plt.title('original image') ####################################### kernel = 1/2*np.array([[-1, 0, +1]]) dst_sym = cv2.filter2D(img, -1, kernel) plt.subplot(4, 2, 2) plt.imshow(dst_sym, cmap='gray') plt.title('$f\'_x$: image filtered with symmetric derivative') ####################################### kernel = 1/6*np.array([ [-1, 0, +1], [-1, 0, +1], [-1, 0, +1]]) dst_prewitt = cv2.filter2D(img, -1, kernel) plt.subplot(4, 2, 3) plt.imshow(dst_prewitt, cmap='gray') plt.title('$f\'_x$: image filtered with Prewitt') ####################################### # cv2.Sobel() also exist kernel = 1/8*np.array([ [-1, 0, +1], [-2, 0, +2], [-1, 0, +1]]) dst_sobel = cv2.filter2D(img, -1, kernel) plt.subplot(4, 2, 4) plt.imshow(dst_sobel, cmap='gray') plt.title('$f\'_x$: image filtered with Sobel') ####################################### dst_cv2_sobel = cv2.Sobel(img, -1, 1, 0) #cv2.Sobel(img,ddepth,x_size,y_size) plt.subplot(4, 2, 5) plt.imshow(dst_cv2_sobel, cmap='gray') plt.colorbar() plt.title('cv2.Sobel X') ####################################### plt.subplot(4, 2, 6) plt.imshow(np.abs(dst_sobel-dst_sym)) plt.colorbar() plt.title('|sobel-symmetric|') ####################################### plt.subplot(4, 2, 7) plt.imshow(np.abs(dst_sobel-dst_prewitt)) plt.colorbar() plt.title('|sobel-prewitt|') ####################################### plt.subplot(4, 2, 8) plt.imshow(np.abs(dst_sym-dst_prewitt)) plt.colorbar() plt.title('|symmetric-prewitt|') # - # ## Filtering common errors # ### kernel dimension error # Note that you need to use a 2D array for a horizontal kernel! # # + kernel = np.array([-1, 0, +1]) dst = cv2.filter2D(img, -1, kernel) plt.figure(figsize=figsize) plt.imshow(np.abs(dst), cmap='gray') plt.colorbar() plt.title('wrong kernel dim: '+str(kernel)) print("kernel shape is "+str(kernel.shape)) # - # ### uint8 errors # Wrong filtering when keeping uint8 instead of float, because uint8 doesn't have negative numbers... # # + uint8_img = np.zeros((500, 500), dtype=np.uint8) uint8_img[200:300, 200:300] = 1 kernel = np.array([[-1, 0, +1]]) dst = cv2.filter2D(uint8_img, -1, kernel) fig, axs = plt.subplots(1, 2, figsize=(20,20)) axs[0].imshow(uint8_img, cmap='gray') axs[0].title.set_text('original image') axs[1].imshow(dst, cmap='gray') axs[1].title.set_text('uint8 WRONG filtering')
c_03_edge_detection/gradient_filters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Code for **"Prior effect"** figure from supmat. # # Import libs # + from __future__ import print_function import matplotlib.pyplot as plt # %matplotlib inline import argparse import os # os.environ['CUDA_VISIBLE_DEVICES'] = '1' import numpy as np from models import * import torch import torch.optim from skimage.measure import compare_psnr from models.downsampler import Downsampler from utils.sr_utils import * torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark =True dtype = torch.cuda.FloatTensor imsize =-1 factor = 4 enforse_div32 = 'CROP' # we usually need the dimensions to be divisible by a power of two PLOT = True # - # # Load image # + fname = 'data/sr/zebra_crop.png' imgs = load_LR_HR_imgs_sr(fname, imsize, factor, enforse_div32) if PLOT: imgs['bicubic_np'], imgs['sharp_np'], imgs['nearest_np'] = get_baselines(imgs['LR_pil'], imgs['HR_pil']) plot_image_grid([imgs['HR_np'], imgs['bicubic_np'], imgs['sharp_np'], imgs['nearest_np']], 4,12); print ('PSNR bicubic: %.4f PSNR nearest: %.4f' % ( compare_psnr(imgs['HR_np'], imgs['bicubic_np']), compare_psnr(imgs['HR_np'], imgs['nearest_np']))) # - def closure(): global i if reg_noise_std > 0: net_input.data = net_input_saved + (noise.normal_() * reg_noise_std) out_HR = net(net_input) out_LR = downsampler(out_HR) total_loss = mse(out_LR, img_LR_var) + tv_weight * tv_loss(out_HR) total_loss.backward() # Log psnr_LR = compare_psnr(imgs['LR_np'], var_to_np(out_LR)) psnr_HR = compare_psnr(imgs['HR_np'], var_to_np(out_HR)) print ('Iteration %05d PSNR_LR %.3f PSNR_HR %.3f' % (i, psnr_LR, psnr_HR), '\r', end='') # History psnr_history.append([psnr_LR, psnr_HR]) if PLOT and i % 500 == 0: out_HR_np = var_to_np(out_HR) plot_image_grid([imgs['HR_np'], np.clip(out_HR_np, 0, 1)], factor=8, nrow=2, interpolation='lanczos') i += 1 return total_loss # # Experiment 1: no prior, optimize over pixels # + input_depth = 3 INPUT = 'noise' pad = 'reflection' OPT_OVER = 'input' KERNEL_TYPE='lanczos2' LR = 0.01 tv_weight = 0.0 OPTIMIZER = 'adam' num_iter = 2000 reg_noise_std = 0.0 # + # Identity mapping network, optimize over `net_input` net = nn.Sequential() net_input = get_noise(input_depth, INPUT, (imgs['HR_pil'].size[1], imgs['HR_pil'].size[0])).type(dtype).detach() downsampler = Downsampler(n_planes=3, factor=factor, kernel_type='lanczos2', phase=0.5, preserve_size=True).type(dtype) # Loss mse = torch.nn.MSELoss().type(dtype) img_LR_var = np_to_var(imgs['LR_np']).type(dtype) # + psnr_history = [] i = 0 p = get_params(OPT_OVER, net, net_input) optimize(OPTIMIZER, p, closure, LR, num_iter) # + out_HR_np = np.clip(var_to_np(net(net_input)), 0, 1) result_no_prior = put_in_center(out_HR_np, imgs['orig_np'].shape[1:]) psnr_history_direct = psnr_history # - # # Experiment 2: using TV loss # + tv_weight = 1e-7 net_input = get_noise(input_depth, INPUT, (imgs['HR_pil'].size[1], imgs['HR_pil'].size[0])).type(dtype).detach() psnr_history = [] i = 0 p = get_params(OPT_OVER, net, net_input) optimize(OPTIMIZER, p, closure, LR, num_iter) # + out_HR_np = np.clip(var_to_np(net(net_input)), 0, 1) result_tv_prior = put_in_center(out_HR_np, imgs['orig_np'].shape[1:]) psnr_history_tv = psnr_history # - # # Experiment 3: using deep prior # Same setting, but use parametrization. # + OPT_OVER = 'net' reg_noise_std = 1./30. # This parameter probably should be set to a lower value for this example tv_weight = 0.0 net = skip(input_depth, 3, num_channels_down = [128, 128, 128, 128, 128], num_channels_up = [128, 128, 128, 128, 128], num_channels_skip = [4, 4, 4, 4, 4], upsample_mode='bilinear', need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU').type(dtype) net_input = get_noise(input_depth, INPUT, (imgs['HR_pil'].size[1], imgs['HR_pil'].size[0])).type(dtype).detach() # Compute number of parameters s = sum([np.prod(list(p.size())) for p in net.parameters()]); print ('Number of params: %d' % s) # + psnr_history = [] net_input_saved = net_input.data.clone() noise = net_input.data.clone() i = 0 p = get_params(OPT_OVER, net, net_input) optimize(OPTIMIZER, p, closure, LR, num_iter) # + out_HR_np = np.clip(var_to_np(net(net_input)), 0, 1) result_deep_prior = put_in_center(out_HR_np, imgs['orig_np'].shape[1:]) psnr_history_deep_prior = psnr_history # - # # Comparison plot_image_grid([imgs['HR_np'], result_no_prior, result_tv_prior, result_deep_prior], factor=8, nrow=2, interpolation='lanczos');
0-newbooks/deep-image-prior/sr_prior_effect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # metadata: # interpreter: # hash: 0cf725079c7d16f2cba0a185a776402bb287255802a557bed9d05e4eed5bfa43 # name: python3 # --- # # Search for products by tile # `eodag` allows to search for products by geometric features that match a *location query*, see the [API user guide](../api_user_guide/4_search.ipynb#locations-search) for an introduction to this concept. # # In this tutorial we will use a shapefile that represents the Sentinel 2 tiling grid to search for *Sentinel 2 Level-1C* products with *PEPS* **at a specific tile**. In this shapefile each tile is defined by its centroid and a `tile_id` attribute (e.g. *29PMT*). This shapefile was created by downloading first the Sentinel 2 tiling grid (MGRS) provided [by ESA as a KML file](https://web.archive.org/web/20200907072744/https://sentinel.esa.int/web/sentinel/missions/sentinel-2/data-products). It was then converted as a shapefile and processed to compute the centroids. We use the tile's centroid here as `eodag` returns products that intersects the user defined search area. Since tiles overlap with each other, using the polygons instead of the centroids would return more tiles than just the one we target. # + import os from zipfile import ZipFile # Interactive mapping import folium from folium.plugins import TimestampedGeoJson # pyshp: to read shapefiles import shapefile from eodag import EODataAccessGateway from eodag import setup_logging # - # ## Setup # A workspace directory is created to store the files that will be generated. workspace = "eodag_workspace_locations_tiles" if not os.path.isdir(workspace): os.mkdir(workspace) # You should have an `auxdata` folder next to this tutorial's file. It contains a shapefile that is needed to run this tutorial correctly. sentinel2_grid_zip = os.path.join("auxdata", "sentinel2_tiling_grid_centroids.zip") if not os.path.isfile(sentinel2_grid_zip): raise FileNotFoundError("Auxdata not found, please check your configuration.") # We unzip the archived shapefile. with ZipFile(sentinel2_grid_zip, "r") as fzip: fzip.extractall("auxdata") # In this tutorial products will just be searched for, not downloaded. We don't need to set up PEPS credentials to search for products. If you wish to download them, you should set the credentials beforehand, using these two environment variables for instance. # + # os.environ["EODAG__PEPS__AUTH__CREDENTIALS__USERNAME"] = "PLEASE_CHANGE_ME" # os.environ["EODAG__PEPS__AUTH__CREDENTIALS__PASSWORD"] = "<PASSWORD>" # - # Logging is activated to better inspect what `eodag` does internally. setup_logging(2) # INFO level # The default search criteria consists of a time period in June 2018 and `eodag`'s product type identifier for *Sentinel 2 Level-1C* products. default_search_criteria = dict( productType="S2_MSI_L1C", start="2018-06-01", end="2018-06-15" ) # ## Add a locations configuration # We check and store the content of this shapefile. sentinel2_shp = os.path.join('auxdata', 'sentinel2_tiling_grid_centroids.shp') with shapefile.Reader(sentinel2_shp) as shp: print(shp, "\n") print("fields:", shp.fields) shaperecs = shp.shapeRecords() # It has about 57 000 tiles/polygons and a field `tile_id`. # # We create a YAML file to configure this new location selector, we will refer to it with `s2_tile_centroid`. # + # Save the locations configuration file. locations_yaml_content = """ shapefiles: - name: s2_tile_centroid path: {} attr: tile_id """.format(os.path.abspath(sentinel2_shp)) locations_filepath = os.path.abspath(os.path.join(workspace, "custom_locations.yml")) with open(locations_filepath, "w") as f_yml: f_yml.write(locations_yaml_content.strip()) # - # An instance of an [EODataAccessGateway](../../api_reference/core.rst#eodag.api.core.EODataAccessGateway) class is created, it makes use of this location configuration file. dag = EODataAccessGateway(locations_conf_path=locations_filepath) # We want to look for *Sentinel 2 Level-1C* products. We can check whether this product type is offered by *PEPS* (as configured in `eodag`). If so, *PEPS* is set as the provider used to search for products. "peps" in dag.available_providers("S2_MSI_L1C") dag.set_preferred_provider("peps") # ## Search # ### A single tile # Our target tile is `31TFK` and is located in the South-East of France. Its feature is retrieved from the shapefil to be displayed later on an interactive map. # + targeted_tile_name = "31TFK" # Get the targeted tile feature targeted_tile = [ sr for sr in shaperecs if sr.record["tile_id"] == "31TFK" ][0] # - # We search for all the products that intersect with the centroid of this tile. products = dag.search_all( locations=dict(s2_tile_centroid="31TFK"), **default_search_criteria ) print(f"{len(products)} were found given the above search criteria") # The products found are displayed on an interactive map along with the centroid of the targeted tile. A time player allows to see when the products were sensed. # + # The GeoJSON representation has to be slightly adapted for the time slider adapted_prods = products.as_geojson_object() for feature in adapted_prods["features"]: feature["properties"]["time"] = feature["properties"]["startTimeFromAscendingNode"] # Create a map zoomed over the search area fmap = folium.Map([44.5, 5], zoom_start=8) # Add a layer that map the tile's centroid folium.GeoJson( data=targeted_tile, tooltip = targeted_tile_name, ).add_to(fmap) # Add layer that temporally maps the products found TimestampedGeoJson( adapted_prods, transition_time=50, # Transition duration in ms period="PT3H", # Array of times, here every 3 hours duration="PT12H", # Feature display duragion, here 6 hours time_slider_drag_update=True, # Update the map when the slider is dragged auto_play=False, # Don't auto play the animation ).add_to(fmap) fmap # - # <div class="alert alert-info"> # # Note # # Instead of using the tile's centroid we could have directly used its extent and filter the returned products to keep only those fully contained within the tile. Check out the section dedicated to [filtering products](../api_user_guide/6_crunch.ipynb) in the API user guide. # # </div> # # # ### Multiple tiles # We can search for products that overlap with several tiles using a **regular expression**. We use the expression `"31T[CDE][MLK]"` to look for products over 9 different tiles (*31TCM*, *31TCL*, *31TCK*, *31TDM*, etc.) over France. products = dag.search_all( locations=dict(s2_tile_centroid="31T[CDE][MLK]"), **default_search_criteria ) print(f"{len(products)} were found given the above search criteria") # The products are displayed on an interactive map. By hovering over them you can observe that the MGRS number of the tiles match with the regular expressions we used. # Create a map zoomed over the search area fmap = folium.Map([44.5, 1.5], zoom_start=6) # Create a layer that maps the products found folium.GeoJson( data=products, tooltip=folium.GeoJsonTooltip( fields=[ "title", # The product's title "mgrs", # The tile number on the MGRS grid ] ), ).add_to(fmap) fmap # This example has demonstrated the possibilities offered by `eodag` to easily select products from a tile grid by using regular expressions over their identifier.
docs/notebooks/tutos/tuto_search_location_tile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import sonnet as snt import tensorflow as tf import copy import os import matplotlib.pyplot as plt from tqdm import tqdm def load_celeba(data_dir, restricted_degree, label_type, print_ratio=False): """Returns CelebA as (train_data, train_labels, test_data, test_labels) Shapes are (162770, 64, 64, 3), (162770, 2), (19962, 64, 64, 3), (19962, 10) Data is in [0,1] and labels are one-hot Arg: restricted_degree: only keep the instances with at least d selected attributes """ train_data = np.load(os.path.join(data_dir, 'celeba_train_imgs.npy')).astype('float32') / 255.0 test_data = np.load(os.path.join(data_dir, 'celeba_test_imgs.npy')).astype('float32') / 255.0 info_pak = np.load(os.path.join(data_dir, 'celeba_attr.npz')) train_idxs = info_pak['train_idxs'] val_idxs = info_pak['val_idxs'] test_idxs = info_pak['test_idxs'] attribute_names = info_pak['attribute_names'] attributes = info_pak['attributes'] male_attr_idx = 20 def get_label(data, idxs): def count_indicators(attr): important_attributes_idx = [0, 1, 4, 9, 16, 18, 22, 24, 29, 30, 34, 36, 37, 38] x = np.array([0] * attr.shape[0]) for i in important_attributes_idx: x = x + attr[:, i] return x label = attributes[idxs] sig = count_indicators(label) >= restricted_degree label = label[sig] data = data[sig] if label_type == 'gender': label = 1-label[:, male_attr_idx].reshape([-1, 1]) label = np.append(label, 1 - label, 1) elif label_type == 'subattr': # decission_tree_attr_idx = [1, 6, 34, 35, 36] # decission_tree_attr_idx = [0, 1, 6, 7, 8, 9, 12, 18, 19, male_attr_idx, 24, 34, 36, 38, 39] decission_tree_attr_idx = [i for i in range(label.shape[1])] sub_attributes_idx = np.array(decission_tree_attr_idx) label = label[:, sub_attributes_idx] return data, label train_data, train_label = get_label(train_data, train_idxs) test_data, test_label = get_label(test_data, test_idxs) if print_ratio: print('\nCelebA restricted degree: {}'.format(restricted_degree)) train_ratio = sum(train_label[:, 1]) / train_label.shape[0] test_ratio = sum(test_label[:, 1]) / test_label.shape[0] print('Training set - Male: {:.2f}% ({}/{}), Not male: {:.2f}%'.format(train_ratio * 100, sum(train_label[:, 1]), train_label.shape[0], 100 - train_ratio * 100)) print('Testing set - Male: {:.2f}% ({}/{}), Not male: {:.2f}%'.format(test_ratio * 100, sum(test_label[:, 1]), test_label.shape[0], 100 - test_ratio * 100)) return train_data[:2000], train_label[:2000], test_data, test_label # + train_data, train_labels, test_data, test_labels = load_celeba( 'H:\\CodeRange\\CelebA\\npy\\', restricted_degree=0, print_ratio=False, label_type='gender') _, train_latent_labels, _, test_latent_labels = load_celeba( 'H:\\CodeRange\\CelebA\\npy\\', restricted_degree=0, print_ratio=False, label_type='subattr') # - config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) model=tf.saved_model.loader.load(session, [tf.saved_model.tag_constants.SERVING], './saved_model') # + def get_variables(model_meta): graph = tf.get_default_graph() sig_vars = copy.deepcopy(model_meta.signature_def['serving_default']) sig_inputs = sig_vars.inputs sig_outputs = sig_vars.outputs output = dict() for k in sig_inputs.keys(): print('{:20}, {}'.format(k,sig_inputs[k].name)) output[k] = graph.get_tensor_by_name(sig_inputs[k].name) for k in sig_outputs.keys(): print('{:20}, {}'.format(k,sig_outputs[k].name)) output[k] = graph.get_tensor_by_name(sig_outputs[k].name) return output tensors = get_variables(model) t_x = tensors['x'] t_x_latent = tensors['x_latent'] t_latent_labels = tensors['latent_labels'] t_output = tensors['output'] # + epoch_size = 32 test_data_len = test_data.shape[0] epoch_num = test_data_len // epoch_size # epoch_num = 60 instance_num = epoch_num * epoch_size latent_labels = [] for i in tqdm(range(epoch_num)): epoch_beg = i*epoch_size epoch_end = (i+1)*epoch_size # print(epoch_beg, epoch_end) outputs = session.run([t_output, t_latent_labels], feed_dict={t_x:test_data[epoch_beg:epoch_end], t_x_latent:test_latent_labels[epoch_beg:epoch_end]}) latent_labels.append(outputs[1]) latent_labels = np.concatenate(latent_labels)>0 correct = latent_labels==(test_latent_labels[:instance_num]) accu = sum(correct)/instance_num # - attributes_names = np.load('./gender-classification/data/toy_celeba/celeba_attr.npz')['attribute_names'] for i,ac in enumerate(accu): print('{:4}: {:20}, {:.4f}'.format(i, attributes_names[i], ac)) # + # Example idx = 20 outputs = session.run([t_output, t_latent_labels], feed_dict={t_x:train_data[:32], t_x_latent:train_latent_labels[:32]}) plt.imshow(train_data[idx]) plt.show() y = outputs[0] temp = y[0, idx, :] plt.imshow(temp) plt.show() print('Male:', train_latent_labels[idx][20]) print('Mustache:', train_latent_labels[idx][22]) print('No_Beard:', train_latent_labels[idx][24]) # -
read_model_and_analyze.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import requests import pickle import json from bs4 import BeautifulSoup from datetime import datetime import datetime import pandas as pd import os def load_comments_from_file(fname): with open(fname,'rb') as f: return pickle.load(f) def get_unic_count(elements): return len(set([ e[u'commenter'][u'name']+str(e[u'createdDate'])+e[u'message'] for e in elements ] )) def delete_repeated(elements): def get_key(e): return e[u'message']+str(e[u'createdDate'])+e[u'commenter'][u'name'] new_elements =[] key_set = set() for e in elements: k = get_key(e) if k not in key_set: key_set.add(k) new_elements.append(e) return new_elements # + def get_time(time): return datetime.datetime.fromtimestamp(0) + datetime.timedelta(minutes=float(time) /1000/60) def initials(name): new_name = u''.join( [ s[0] for s in name.split(',')[0].split()[:3] ] ).upper() new_name = u''.join([ n for n in new_name if n.isupper() ]) return new_name # - #load data elements = [] file_names = [ 'data/'+s for s in os.listdir("data") ] for file_name in file_names: elements.extend(load_comments_from_file(file_name)) elements = delete_repeated(elements) #sort by data elements.sort(key = lambda x: x[u'createdDate']) # + #add nested unfolded_elements = [] next_id = 1 def extract_data(e,parent_id = 0): global next_id element = { u'Id':next_id, u'name':e[u'commenter'][u'name'], u'initial':initials(e[u'commenter'][u'name']), u'date':get_time(e[u'createdDate']), u'message':e[u'message'], u'parent_id':parent_id} next_id = next_id + 1 return element for e in elements: unfolded_elements.append(extract_data(e)) parent_id = unfolded_elements[-1]['Id'] unfolded_elements.extend([ extract_data(ne, parent_id) for ne in e[u'nestedComments']['elements'] ]) # - p = pd.DataFrame(unfolded_elements) p.to_excel('demo.xlsx',index=False) p
.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/vusal2014/data/blob/master/TestChart.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="rsKa28A4ZJyi" colab_type="code" outputId="8e9871ed-b310-446d-e7f6-9cca5a5bcf87" colab={"base_uri": "https://localhost:8080/", "height": 368} import altair as alt from vega_datasets import data source = data.movies.url alt.Chart(source).mark_bar().encode( alt.X("IMDB_Rating:Q", bin=True), y='count()', )
TestChart.ipynb
/ --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / --- / + cell_id="00000-398da09d-c532-4886-9d4d-8b1248e7e9f2" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=6 execution_start=1625502033948 source_hash="8ad57f32" tags=[] # Start writing code here... / + [markdown] cell_id="00001-9b04a0a3-9504-41f9-a712-005ae7a3815b" deepnote_cell_type="text-cell-h3" tags=[] / ### Bootstrap technique / + cell_id="00002-3587db4b-8e2d-43c8-9f22-63d322de65a0" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1450 execution_start=1625502033962 source_hash="24b26b86" tags=[] import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns / + cell_id="00002-fd89c6a3-9513-4fd3-82cf-dfea846abb9b" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=12 execution_start=1625502035425 source_hash="594ffa7d" tags=[] Data=[22.3,27.1,24.3,25.3,26.0] Data / + cell_id="00004-a552d5e1-9b84-4805-a4d2-7e5a7b057a07" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=16 execution_start=1625502035437 source_hash="6bc1ac84" tags=[] np.mean(Data) / + cell_id="00005-e84806e1-d912-49c8-a39f-91d53e6cabe6" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=2 execution_start=1625502035450 source_hash="8e639365" tags=[] np.random.choice([1,2,3,4,5],size=5) / + [markdown] cell_id="00006-77f2cf5b-8323-44c7-a980-67e44a0448bd" deepnote_cell_type="markdown" tags=[] / computation of bootstrap replicate / + cell_id="00006-b83fd520-d815-4d1e-9fdc-1e94598b8870" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=204 execution_start=1625502035498 source_hash="a99b02f8" tags=[] bs_sample=np.random.choice() / + cell_id="00008-97a38e77-20aa-46f9-b5e4-8275f40d85b7" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=140 execution_start=1625502041266 source_hash="b0361f03" tags=[] # loading data df_us=pd.read_csv("https://raw.githubusercontent.com/reddyprasade/Data-Analysis-with-Python/main/Statistics/Data/2008_swing_states.csv") / + cell_id="00009-b74d5b0a-044f-43c4-8ddf-855b0e2db3cb" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=11 execution_start=1625502041734 source_hash="2de65fbb" tags=[] mean=df_us['total_votes'].mean() mean / + cell_id="00010-ed30a0ab-03f8-4f53-b566-77028769da63" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=310 execution_start=1625502042100 source_hash="c96255cd" tags=[] df_us['total_votes'].hist() / + cell_id="00011-bffb4b72-60ac-4fbe-967e-26c24070a720" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=17 execution_start=1625502044527 source_hash="76b8c816" tags=[] df_us.info() / + cell_id="00012-73c1c0c3-f513-4711-befb-c37ecb8d5b7d" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=7 execution_start=1625502181503 source_hash="a009b768" tags=[] np.random.choice([1,2,3,4,5,6,7,8,9],9) / + cell_id="00012-c97c2065-544e-4b30-9bb8-05df769fd090" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1 execution_start=1625502046354 source_hash="d68e18ea" tags=[] def Bootstrap_replicator(data,fun): """ returns a value of applied function value to the random choice taken in given data data: 1-D array """ sample=np.random.choice(data,size=len(data)) value=fun(sample) return value / + cell_id="00013-bf185f0c-ad64-4567-8424-03c2cc214ec4" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=4 execution_start=1625502215106 source_hash="a62b9f85" tags=[] Bootstrap_replicator(df_us['total_votes'],np.mean) / + cell_id="00014-e8237dc9-2841-44a3-b08f-a5c33cd89bc6" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=0 execution_start=1625502270942 source_hash="139e8566" tags=[] us_sample=np.zeros(100) / + cell_id="00015-6afc68f9-a7a2-4355-844b-c3641d50f43b" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=4 execution_start=1625502278495 source_hash="f4daa50a" tags=[] for i in range(100): us_sample[i]=Bootstrap_replicator(df_us['total_votes'],np.mean) / + cell_id="00016-862c2001-5742-44b0-971a-5cd4fb576165" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=0 execution_start=1625502305616 source_hash="3f40e4c3" tags=[] us_total_votes_data=pd.DataFrame(us_sample,columns=['total_votes']) / + cell_id="00017-3e08b09b-ee9f-4ad4-b9e5-ebea22c523ee" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=21 execution_start=1625502311474 source_hash="ac7b1be6" tags=[] us_total_votes_data / + cell_id="00018-0bd08b74-4c05-4723-a2f8-5c7c829dc346" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=167 execution_start=1625502326787 source_hash="87d0c097" tags=[] us_total_votes_data.hist() / + cell_id="00019-36a4c013-8490-4ccb-a7a9-b69f4a1200ef" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=3 execution_start=1625502337634 source_hash="841c0f97" tags=[] conf_int=np.percentile(us_total_votes_data,[2.5,97.5]) / + cell_id="00020-9cb8497b-b5d8-4fe8-a65f-7dda47c719d6" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=4 execution_start=1625502340298 source_hash="15ec456e" tags=[] conf_int / + [markdown] cell_id="00020-28d006ff-1428-4a24-bd46-2946495d5eaf" deepnote_cell_type="text-cell-h1" tags=[] / # Non-Parametric Inference / + [markdown] cell_id="00020-1b252b42-9606-4490-a32f-fcc4b448dfe6" deepnote_cell_type="markdown" tags=[] / ### only applicable for large data / + cell_id="00020-02174b19-9cf6-4b5d-bbf4-875397ec423c" deepnote_cell_type="code" deepnote_to_be_reexecuted=true execution_millis=10 execution_start=1625450781469 source_hash="6ab75c4a" tags=[] df = pd.read_csv("https://raw.githubusercontent.com/reddyprasade/Data-Analysis-with-Python/main/Statistics/Data/2008_swing_states.csv") df.head() / + cell_id="00024-a34ad68a-8e16-4085-b86b-ae5227636a1f" deepnote_cell_type="code" deepnote_to_be_reexecuted=true execution_millis=7 execution_start=1625450823098 source_hash="de1e323c" tags=[] df.info() / + cell_id="00025-77d2839d-77b0-4ba0-a2d1-f9a688e9bddd" deepnote_cell_type="code" deepnote_to_be_reexecuted=true execution_millis=1 execution_start=1625451137593 source_hash="8d9100c9" tags=[] swing =df.copy() / + cell_id="00026-bb863da6-5617-4791-ab9e-daca3d24602b" deepnote_cell_type="code" deepnote_to_be_reexecuted=true source_hash="b623e53d" tags=[] / + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[] / <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=fcf2399d-084b-4173-af36-20a4a45218a8' target="_blank"> / <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV<KEY> > </img> / Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
Statistics/Bootstraping _technique1-7-2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/adeepH/Multimodal-Machine-Translation/blob/main/Text/indicTrans.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="rE4MO-8bDtwD" outputId="715f7b89-92d0-4d2e-f820-34c5bf3aa22d" # create a seperate folder to store everything # !mkdir finetuning # %cd finetuning # + colab={"base_uri": "https://localhost:8080/"} id="-2Rs6_WkD_gF" outputId="cc3acfe7-ea10-445b-871e-1a48a5cf022c" # clone the repo for running finetuning # !git clone https://github.com/AI4Bharat/indicTrans.git # %cd indicTrans # clone requirements repositories # !git clone https://github.com/anoopkunchukuttan/indic_nlp_library.git # !git clone https://github.com/anoopkunchukuttan/indic_nlp_resources.git # !git clone https://github.com/rsennrich/subword-nmt.git # %cd .. # + colab={"base_uri": "https://localhost:8080/"} id="duwTvJ9xEBJ1" outputId="080b4179-be70-408c-b91c-722c5243c59e" # ! sudo apt install tree # Install the necessary libraries # !pip install sacremoses pandas mock sacrebleu tensorboardX pyarrow indic-nlp-library # Install fairseq from source # !git clone https://github.com/pytorch/fairseq.git # %cd fairseq # # !git checkout da9eaba12d82b9bfc1442f0e2c6fc1b895f4d35d # !pip install --editable ./ # %cd .. # + colab={"base_uri": "https://localhost:8080/"} id="oD2EHQdqEH70" outputId="10b90f71-837a-49a9-b7fe-d854e981fd80" # download the indictrans model # downloading the en-indic model # this will contain: # en-indic/ # ├── final_bin # contains fairseq dictionaries (we will use this to binarize the new finetuning data) # │ ├── dict.SRC.txt # │ └── dict.TGT.txt # ├── model # contains model checkpoint(s) # │ └── checkpoint_best.pt # └── vocab # contains bpes for src and tgt (since we train seperate vocabularies) generated with subword_nmt (we will use this bpes to convert finetuning data to subwords) # ├── bpe_codes.32k.SRC # ├── bpe_codes.32k.TGT # ├── vocab.SRC # └── vocab.TGT # #!wget https://akpublicdata.blob.core.windows.net/indicnlp/indictrans/inidctrans-en-indic-v0.2.zip # !wget https://storage.googleapis.com/samanantar-public/V0.2/models/en-indic.zip # !unzip en-indic.zip # if you want to finetune indic-en models, use the link below # # !wget https://akpublicdata.blob.core.windows.net/indicnlp/indictrans/indictrans-indic-en-v0.2.zip # # !unzip indictrans-indic-en-v0.2.zip # + colab={"base_uri": "https://localhost:8080/"} id="XF4GladJ1W9e" outputId="957fce02-1844-4a06-8eab-4124a23bb8a0" # !sudo apt-get install p7zip-full # + id="A7J6l8FF2LgE" import pandas as pd a = '/content/train.lc.norm.tok.1.en' b = '/content/train.lc.norm.tok.2.en' c = '/content/train.lc.norm.tok.3.en' d = '/content/train.lc.norm.tok.1.google.ta' e = '/content/train.lc.norm.tok.2.google.ta' f = '/content/train.lc.norm.tok.3.google.ta' ta_1 = open(a, 'r') ta_1 = ta_1.read().splitlines() ta_2 = open(b, 'r') ta_2 = ta_2.read().splitlines() ta_3 = open(c, 'r') ta_3 = ta_3.read().splitlines() en_1 = open(d, 'r') en_1 = en_1.read().splitlines() en_2 = open(e, 'r') en_2 = en_2.read().splitlines() en_3 = open(f, 'r') en_3 = en_3.read().splitlines() en = en_1 + en_2 + en_3 ta = ta_1 + ta_2 + ta_3 Tam = pd.DataFrame() Eng = pd.DataFrame() Tam['ta'] = en Eng['en'] = ta # + id="iA5lKoAa3a6D" outputId="2702ba98-8f2a-4e59-d360-c382946e6335" colab={"base_uri": "https://localhost:8080/"} # cd /content/ # + id="aLZaQps_3QMD" outputId="f30272d3-d1f6-45e5-c192-622ea70056b2" colab={"base_uri": "https://localhost:8080/"} Tam.values # + id="iTavA3xc2X-4" import numpy as np np.savetxt('train.ta', Tam['ta'].values, fmt='%s') np.savetxt('train.en', Eng['en'].values, fmt='%s') #Tam.to_csv('Train.ta', sep='\t') #Eng.to_csv('Train.en', sep='\t') # + id="Z1WkK7RQyK5G" outputId="8ba31b53-2bf4-43e0-e89b-c400483ae51e" colab={"base_uri": "https://localhost:8080/"} # !mkdir /content/finetuning/dataset # !mkdir /content/finetuning/dataset/train # !mkdir /content/finetuning/dataset/dev # !mkdir /content/finetuning/dataset/test # !mkdir /content/finetuning/dataset/train/en-ta # + colab={"base_uri": "https://localhost:8080/"} id="-1KlcCvHeyvZ" outputId="2d77c23c-037c-4f70-e1d6-27f6347bfa7e" # %cd /content/ # + colab={"base_uri": "https://localhost:8080/"} id="1AOrU9KHz2SS" outputId="6332377b-b93e-4deb-e5f1-c3eb31e8ca35" !7z x /content/en_mr.zip #password <PASSWORD> # + id="ZKYGc9Nk2Sbg" # !cp /content/dev.en /content/finetuning/dataset/dev # !cp /content/dev.ta /content/finetuning/dataset/dev # + id="PXfTb2Zu2pE1" # !mv /content/dev.en /content/finetuning/dataset/test/ # !mv /content/dev.ta /content/finetuning/dataset/test/ # + id="YzyPcs4O3bDc" outputId="d4f7bbfa-31ea-4bfd-8463-fe48a1e48052" colab={"base_uri": "https://localhost:8080/"} # !mv /content/train.en /content/finetuning/dataset/train/en-ta # !mv /content/train.ta /content/finetuning/dataset/train/en-ta # + colab={"base_uri": "https://localhost:8080/"} id="e2jAutJB4Dqg" outputId="b73e1bea-2f4f-4a54-d945-9343d07a970b" # %cd /content/finetuning/indicTrans # + colab={"base_uri": "https://localhost:8080/"} id="8yPTbM_clKfI" outputId="d4ca2bd6-0e16-4d43-dbf1-caa38c9f20c7" # %%shell exp_dir=../dataset src_lang=en tgt_lang=indic # change this to indic-en, if you have downloaded the indic-en dir download_dir=../en-indic train_data_dir=$exp_dir/train dev_data_dir=$exp_dir/dev test_data_dir=$exp_dir/test # echo $exp_dir # + colab={"base_uri": "https://localhost:8080/"} id="NhwUXyYVXrOY" outputId="f22a3eac-0154-4bb7-e0bc-b47b17698053" # all the data preparation happens in this cell # %%shell exp_dir=../dataset src_lang=en tgt_lang=indic # change this to indic-en, if you have downloaded the indic-en dir download_dir=../en-indic train_data_dir=$exp_dir/train dev_data_dir=$exp_dir/dev test_data_dir=$exp_dir/test # echo "Running experiment ${exp_dir} on ${src_lang} to ${tgt_lang}" train_processed_dir=$exp_dir/data devtest_processed_dir=$exp_dir/data out_data_dir=$exp_dir/final_bin # mkdir -p $train_processed_dir # mkdir -p $devtest_processed_dir # mkdir -p $out_data_dir # indic languages. # cvit-pib corpus does not have as (assamese) and kn (kannada), hence its not part of this list langs=( ta ) for lang in ${langs[@]};do if [ $src_lang == en ]; then tgt_lang=$lang else src_lang=$lang fi train_norm_dir=$exp_dir/norm/$src_lang-$tgt_lang devtest_norm_dir=$exp_dir/norm/$src_lang-$tgt_lang mkdir -p $train_norm_dir mkdir -p $devtest_norm_dir # preprocessing pretokenizes the input (we use moses tokenizer for en and indicnlp lib for indic languages) # after pretokenization, we use indicnlp to transliterate all the indic data to devnagiri script # train preprocessing train_infname_src=$train_data_dir/en-${lang}/train.$src_lang train_infname_tgt=$train_data_dir/en-${lang}/train.$tgt_lang train_outfname_src=$train_norm_dir/train.$src_lang train_outfname_tgt=$train_norm_dir/train.$tgt_lang echo "Applying normalization and script conversion for train $lang" input_size=`python scripts/preprocess_translate.py $train_infname_src $train_outfname_src $src_lang true` input_size=`python scripts/preprocess_translate.py $train_infname_tgt $train_outfname_tgt $tgt_lang true` echo "Number of sentences in train $lang: $input_size" # dev preprocessing dev_infname_src=$dev_data_dir/dev.$src_lang dev_infname_tgt=$dev_data_dir/dev.$tgt_lang dev_outfname_src=$devtest_norm_dir/dev.$src_lang dev_outfname_tgt=$devtest_norm_dir/dev.$tgt_lang echo "Applying normalization and script conversion for dev $lang" input_size=`python scripts/preprocess_translate.py $dev_infname_src $dev_outfname_src $src_lang true` input_size=`python scripts/preprocess_translate.py $dev_infname_tgt $dev_outfname_tgt $tgt_lang true` echo "Number of sentences in dev $lang: $input_size" # test preprocessing test_infname_src=$test_data_dir/test.$src_lang test_infname_tgt=$test_data_dir/test.$tgt_lang test_outfname_src=$devtest_norm_dir/test.$src_lang test_outfname_tgt=$devtest_norm_dir/test.$tgt_lang echo "Applying normalization and script conversion for test $lang" input_size=`python scripts/preprocess_translate.py $test_infname_src $test_outfname_src $src_lang true` input_size=`python scripts/preprocess_translate.py $test_infname_tgt $test_outfname_tgt $tgt_lang true` echo "Number of sentences in test $lang: $input_size" done # Now that we have preprocessed all the data, we can now merge these different text files into one # ie. for en-as, we have train.en and corresponding train.as, similarly for en-bn, we have train.en and corresponding train.bn # now we will concatenate all this into en-X where train.SRC will have all the en (src) training data and train.TGT will have all the concatenated indic lang data python scripts/concat_joint_data.py $exp_dir/norm $exp_dir/data $src_lang $tgt_lang 'train' python scripts/concat_joint_data.py $exp_dir/norm $exp_dir/data $src_lang $tgt_lang 'dev' python scripts/concat_joint_data.py $exp_dir/norm $exp_dir/data $src_lang $tgt_lang 'test' # use the vocab from downloaded dir # cp -r $download_dir/vocab $exp_dir # echo "Applying bpe to the new finetuning data" bash apply_single_bpe_traindevtest_notag.sh $exp_dir # mkdir -p $exp_dir/final # We also add special tags to indicate the source and target language in the inputs # Eg: to translate a sentence from english to hindi , the input would be __src__en__ __tgt__hi__ <en bpe tokens> # echo "Adding language tags" python scripts/add_joint_tags_translate.py $exp_dir 'train' python scripts/add_joint_tags_translate.py $exp_dir 'dev' python scripts/add_joint_tags_translate.py $exp_dir 'test' data_dir=$exp_dir/final out_data_dir=$exp_dir/final_bin # rm -rf $out_data_dir # binarizing the new data (train, dev and test) using dictionary from the download dir num_workers=`python -c "import multiprocessing; print(multiprocessing.cpu_count())"` data_dir=$exp_dir/final out_data_dir=$exp_dir/final_bin # # rm -rf $out_data_dir # echo "Binarizing data. This will take some time depending on the size of finetuning data" fairseq-preprocess --source-lang SRC --target-lang TGT \ --trainpref $data_dir/train --validpref $data_dir/dev --testpref $data_dir/test \ --destdir $out_data_dir --workers $num_workers \ --srcdict $download_dir/final_bin/dict.SRC.txt --tgtdict $download_dir/final_bin/dict.TGT.txt --thresholdtgt 5 --thresholdsrc 5 # + colab={"base_uri": "https://localhost:8080/"} id="iz6tzbe2tcs7" outputId="435932e2-4c35-49de-b342-5926235b2dc4" # Finetuning the model # pls refer to fairseq documentaion to know more about each of these options (https://fairseq.readthedocs.io/en/latest/command_line_tools.html) # some notable args: # --max-update=1000 -> for this example, to demonstrate how to finetune we are only training for 1000 steps, incrase this if needed # --arch=transformer_4x -> we use a custom transformer model and name it transformer_4x (4 times the parameter size of transformer base) # --user_dir -> we define the custom transformer arch in model_configs folder and pass it as an argument to user_dir for fairseq to register this architechture # --lr -> learning rate. From our limited experiments, we find that lower learning rates like 3e-5 works best for finetuning. # --restore-file -> reload the pretrained checkpoint and start training from here (change this path for indic-en. Currently its is set to en-indic) # --reset-* -> reset and not use lr scheduler, dataloader, optimizer etc of the older checkpoint # --max_tokns -> this is max tokens per batch !( fairseq-train ../dataset/final_bin \ --max-source-positions=210 \ --max-target-positions=210 \ --max-update=100000 \ --save-interval=1 \ --arch=transformer_4x \ --criterion=label_smoothed_cross_entropy \ --source-lang=SRC \ --max-epoch=3 \ --lr-scheduler=inverse_sqrt \ --target-lang=TGT \ --label-smoothing=0.1 \ --optimizer adam \ --adam-betas "(0.9, 0.98)" \ --clip-norm 1.0 \ --warmup-init-lr 1e-07 \ --warmup-updates 4000 \ --dropout 0.2 \ --tensorboard-logdir ../dataset/tensorboard-wandb \ --save-dir ../dataset/model \ --keep-last-epochs 5 \ --patience 5 \ --skip-invalid-size-inputs-valid-test \ --fp16 \ --no-last-checkpoints \ --user-dir model_configs \ --update-freq=2 \ --distributed-world-size 1 \ --max-tokens 4096 \ --lr 3e-5 \ --restore-file ../en-indic/model/checkpoint_best.pt \ --reset-lr-scheduler \ --reset-meters \ --reset-dataloader \ --reset-optimizer) # + id="tpPsT1e7vuO9" colab={"base_uri": "https://localhost:8080/"} outputId="c201316c-09bd-4896-e6b7-59d55816ea4e" # To test the models after training, you can use joint_translate.sh # joint_translate takes src_file, output_fname, src_lang, tgt_lang, model_folder as inputs # src_file -> input text file to be translated # output_fname -> name of the output file (will get created) containing the model predictions # src_lang -> source lang code of the input text ( in this case we are using en-indic model and hence src_lang would be 'en') # tgt_lang -> target lang code of the input text ( tgt lang for en-indic model would be any of the 11 indic langs we trained on: # as, bn, hi, gu, kn, ml, mr, or, pa, ta, te) # supported languages are: # as - assamese, bn - bengali, gu - gujarathi, hi - hindi, kn - kannada, # ml - malayalam, mr - marathi, or - oriya, pa - punjabi, ta - tamil, te - telugu # model_dir -> the directory containing the model and the vocab files # Note: if the translation is taking a lot of time, please tune the buffer_size and batch_size parameter for fairseq-interactive defined inside this joint_translate script # here we are translating the english sentences to hindi # !bash /content/finetuning/indicTrans/joint_translate.sh /content/finetuning/dataset/test/test.en en_mr_outputs.txt 'en' 'ta' /content/finetuning/dataset # + colab={"base_uri": "https://localhost:8080/"} id="_yEoqL1OlxZ1" outputId="b58defa7-fe24-47a0-e05a-2ed7d7241cb5" # !cat en_mr_outputs.txt # + id="bPqneByPxilN" colab={"base_uri": "https://localhost:8080/"} outputId="555d7925-d0e1-4baf-cb32-965be0f5d2a0" # to compute bleu scores for the predicitions with a reference file, use the following command # arguments: # pred_fname: file that contains model predictions # ref_fname: file that contains references # src_lang and tgt_lang : the source and target language # !bash /content/finetuning/indicTrans/compute_bleu.sh en_mr_outputs.txt /content/finetuning/dataset/test/test.ta 'en' 'ta' # + id="7etV5Lf7mbge"
Text/indicTrans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:nlp-dl-env] # language: python # name: conda-env-nlp-dl-env-py # --- # + import os import numpy as np import torch from torch import nn from torch.nn import functional as F import pytorch_lightning as pl from torch.utils.data import DataLoader, Dataset, TensorDataset, random_split from pytorch_lightning.callbacks import EarlyStopping # - # ## Three-class classification model class SimpleClassifier(pl.LightningModule): def __init__(self): super().__init__() # Linear self.layer_1 = torch.nn.Linear(4, 32) self.layer_2 = torch.nn.Linear(32, 64) self.layer_3 = torch.nn.Linear(64, 32) self.layer_4 = torch.nn.Linear(32, 2) def forward(self, x): # Layer 1 x = self.layer_1(x) x = torch.relu(x) # Layer 2 x = self.layer_2(x) x = torch.relu(x) # Layer 3 x = self.layer_3(x) x = torch.relu(x) # Layer 4 x = self.layer_4(x) x = torch.relu(x) # Probability distribution over labels x = torch.log_softmax(x, dim=1) return x def cross_entropy_loss(self, logits, labels): return F.nll_loss(logits, labels) def training_step(self, train_batch, batch_idx): x, y = train_batch logits = self.forward(x) loss = self.cross_entropy_loss(logits, y) self.log('train_loss', loss) return loss def validation_step(self, val_batch, batch_idx): x, y = val_batch logits = self.forward(x) loss = self.cross_entropy_loss(logits, y) self.log('val_loss', loss) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=1e-2) return optimizer class MyDataModule(pl.LightningDataModule): def __init__(self, X, y, train_size): super().__init__() self.X = X self.y = y self.train_size = train_size def setup(self, stage): X_train = torch.tensor(self.X[:self.train_size]) y_train = torch.tensor(self.y[:self.train_size], dtype=torch.long) X_test = torch.tensor(self.X[self.train_size:]) y_test = torch.tensor(self.y[self.train_size:], dtype=torch.long) self.train = TensorDataset(X_train, y_train) self.test = TensorDataset(X_test, y_test) def train_dataloader(self): return DataLoader(self.train, batch_size=16) def val_dataloader(self): return DataLoader(self.test, batch_size=16) # + X = np.array([ [1, 1, 1, 1], [2, 4, 6, 24], [-1, -2, -1, -192], [-191, -3, -2, -7], [102, 12, 16, 200], [7, 9, 13, 177], ] * 200, dtype='float32') y = np.array([ 0, 0, 1, 1, 0, 0 ] * 200, dtype='int32') # - data_module = MyDataModule(X, y, train_size=16) # + early_stopping = EarlyStopping('val_loss', patience=20) model = SimpleClassifier() trainer = pl.Trainer(max_epochs=1000, callbacks=[early_stopping]) trainer.fit(model, data_module) # - model(torch.tensor([[1, 1, 1, 79]], dtype=torch.float)).argmax()
PyTorch Lightning/00_-_PyTorch_Lightning_Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="kGYHnYWYVQbg" colab_type="code" colab={} import numpy as np # + id="mjiYO9IqcO4d" colab_type="code" colab={} my_list = [0,1,2,3,4,5,6,7] # + id="j9u2eQO4cR5s" colab_type="code" colab={} my_array = np.array(my_list) # + id="6_1ta9UCcUR1" colab_type="code" outputId="5be9850d-5eb4-47a3-e862-0f619a34348b" executionInfo={"status": "ok", "timestamp": 1577643344961, "user_tz": 480, "elapsed": 1088, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(my_array) # + id="C3rNBcoecW_y" colab_type="code" outputId="8405c60b-fd47-422c-a920-768796bc01f4" executionInfo={"status": "ok", "timestamp": 1577643344962, "user_tz": 480, "elapsed": 1085, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} np.argmax(my_array) # + id="8VmNynZJcZzC" colab_type="code" outputId="6d53d846-be36-426f-ca1e-5da906c8e7ab" executionInfo={"status": "ok", "timestamp": 1577643344963, "user_tz": 480, "elapsed": 1082, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} np.argmin(my_array) # + id="Pk-ddmb8cdec" colab_type="code" outputId="a6c670b7-a49f-46d9-ef0e-1a41e32fa4a7" executionInfo={"status": "ok", "timestamp": 1577643344965, "user_tz": 480, "elapsed": 1080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(my_array) # + id="Loeusx9icg9W" colab_type="code" outputId="a68edaa7-fdc1-4c49-cbaa-bbd0d0dfa759" executionInfo={"status": "ok", "timestamp": 1577643344966, "user_tz": 480, "elapsed": 1077, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(my_array[:4]) # + id="k1BAfApUcozN" colab_type="code" outputId="ab38ad6e-6876-46e7-ccff-bbcf2612844b" executionInfo={"status": "ok", "timestamp": 1577643344967, "user_tz": 480, "elapsed": 1074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(my_array[4:]) # + id="duZiLbd8cyNQ" colab_type="code" outputId="8d3eb2e9-1c0c-4464-a406-dfd656bf90b0" executionInfo={"status": "ok", "timestamp": 1577643344967, "user_tz": 480, "elapsed": 1069, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(my_array[4:6]) # + id="0j7uOePSc3Uq" colab_type="code" outputId="1af55e00-3586-46d6-b4ba-83aa817f8a78" executionInfo={"status": "ok", "timestamp": 1577643344968, "user_tz": 480, "elapsed": 1066, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(my_array.shape) # + id="_jg6wK0VdA5h" colab_type="code" colab={} reshaped_array = np.reshape(my_array, (2,4)) # + id="QbWl9e4DdInw" colab_type="code" outputId="f4719ffb-9cb1-4392-c23f-0cfa3142d652" executionInfo={"status": "ok", "timestamp": 1577643344969, "user_tz": 480, "elapsed": 1059, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 51} print(reshaped_array) # + id="ViDsE76idMNc" colab_type="code" colab={} reshaped_array = np.reshape(my_array, (4,2)) # + id="XatZvgR6dYrt" colab_type="code" outputId="a36d41f3-7be7-45a9-8d91-ee1d4c896620" executionInfo={"status": "ok", "timestamp": 1577643344971, "user_tz": 480, "elapsed": 1054, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCR1rDsgDyr9riP9sGVuQmz2nQ1jKPI7QJrdRqmm9I=s64", "userId": "16644161164743621476"}} colab={"base_uri": "https://localhost:8080/", "height": 85} print(reshaped_array)
E2E Android ML with tf.Keras & TFLite/L3_Numpy Basics.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## 'rvest' packages require(dplyr) require(rvest) url <- 'https://sports.news.naver.com/esports/news/read.nhn?oid=347&aid=0000127317' read_html(url) read_html(url) %>% html_nodes("h4") read_html(url) %>% html_nodes("h4[class = 'title']") read_html(url) %>% html_nodes("h4") %>% html_attr('class') read_html(url) %>% html_nodes("h4.title") ### attr이 id이면 #으로, class이면 . read_html(url) %>% html_nodes("h4") %>% html_text # ## 'httr' packages require(httr) GET(url) GET(url) %>% content() GET('http://httpbin.org/') ### httpbin.org는 연습용 사이트 GET('http://httpbin.org/get') %>% content ah <- httr::add_headers('User-Agent' = "chulhongsung's test") GET('http://httpbin.org/get', ah) %>% content
R/rvest_and_httr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Matrix representation of operators # # We saw briefly that we can represent finite difference operators as matrices. Let's look at this in a bit more detail. # # To do this, we need to provide an ordering of all of the degrees of freedom (dofs) in our finite difference discretisation. In one dimension, we order the points in the domain from left to right and use a single index: # # $$ # x_0 < x_1 < \dots < x_{n-1} # $$ # # and so we have a single index for all the points $i = [0, 1, \dots, n-1]$. We can therefore represent our function $u(x)$ discretised at the points $\{x_i\}$ as a vector in $\mathbb{R}^n$ # # $$ # U = \begin{bmatrix} u_0 \\ u_1 \\ \vdots \\ u_{n-1} \end{bmatrix} # $$ # # and similarly with the right hand side $f(x)$. The differencing operators *combine* entries from $U$ linearly to produce a new vector $D U$. Since this operation is linear, we can represent it as a matrix # # $$ # D : \mathbb{R}^n \to \mathbb{R}^n # $$ # # which takes in a vector $U$ and spits out a new vector representing the action of the differencing operator on $U$. # # For example, the left-looking operator $D_- u_i = \frac{u_i - u_{i-1}}{h}$ uses, at each point $i$ values from points $i$ and $i-1$. On a grid with 4 points, this can be represented as the matrix # # $$ # D_- = \frac{1}{h} # \begin{bmatrix} # 1 & 0 & 0 & 0\\ # -1 & 1 & 0 & 0\\ # 0 & -1 & 1 & 0\\ # 0 & 0 & -1 & 1 # \end{bmatrix}. # $$ # # Similarly, the centered difference approximation of $\frac{\text{d}^2}{\text{d} x^2}$, $D^2 u_i = \frac{u_{i+1} - 2u_i + u_{i-1}}{h^2}$ can be written # # $$ # D^2 = \frac{1}{h^2} # \begin{bmatrix} # -2 & 1 & 0 & 0\\ # 1 & -2 & 1 & 0\\ # 0 & 1 & -2 & 1\\ # 0 & 0 & 1 & -2 # \end{bmatrix}. # $$ # # ### "Matrix-free" implementation # # If we only never need to apply the differencing operator, it might make sense (memory or efficiency, for example) to just provide a function which computes the matrix-vector multiplication without storing the matrix. Let's see this in action. # + # %matplotlib notebook from matplotlib import pyplot import numpy pyplot.style.use('ggplot') def dminus(u, h): n, = u.shape du = numpy.zeros_like(u) for i in range(n): if i == 0: du[i] = 1/h * u[i] else: du[i] = 1/h * (u[i] - u[i-1]) return du def dminusop(u, h): n, = u.shape D = numpy.eye(n) - numpy.diag(numpy.full(n-1, 1), k=-1) D *= 1/h return D # - n = 10 u = numpy.random.rand(n) h = 1/n dminus(u, h) D = dminusop(u, h) D @ u numpy.allclose(D@u, dminus(u, h)) # Which one is faster? Let's have a go with a bigger grid. We can use notebook "magic" `%%timeit` to time the execution of a cell. n = 10000 u = numpy.random.rand(n) h = 1/n # %%timeit dminus(u, h) D = dminusop(u, h) # %%timeit D @ u # Perhaps surprisingly, the python loops are faster than the numpy matrix-vector product. This is likely because the numpy matrix is 10000 x 10000 and dense (and we do a lot of work multiplying by zero). We should probably use a *sparse* matrix (see below). # We can also attempt to speed up the loop by using the Python JIT compiler [numba](https://numba.pydata.org) (available via `pip install numba`). # + import numba @numba.jit def dminus_compiled(u, h): n, = u.shape du = numpy.zeros_like(u) for i in range(n): if i == 0: du[i] = 1/h * u[i] else: du[i] = 1/h * (u[i] - u[i-1]) return du # - # %%timeit dminus_compiled(u, h) # Nearly a 500x speedup. This doesn't work for all functions, but if you have code with loops and numpy arrays, it's probably worth a shot. # ## 2D finite differences # # Now, finally, let's look at finite differences in 2D. We remind ourselves of the differential operators we might encounter. Rather than just a derivative in the $x$ direction, we can take derivatives of a function in both $x$ and $y$. # # $$ # \begin{aligned} # \partial_x u &= \frac{\partial u(x, y)}{\partial x}\\ # \partial_y u &= \frac{\partial u(x, y)}{\partial y} # \end{aligned}. # $$ # # Often we see vector-calculus operators. # # ### Gradient # # For a scalar $u(x, y)$ the 2D gradient is a vector # # $$ # \nabla u(x, y) := \begin{bmatrix} \partial_x u \\ \partial_y u \end{bmatrix}. # $$ # # ### Divergence # # For a vector $\vec{w}(x, y) = \begin{bmatrix} w_0 \\ w_1 \end{bmatrix}$, the divergence is a scalar # # $$ # \nabla \cdot \vec{w} = \partial_x w_0 + \partial_y w_1. # $$ # # ### Laplacian # # For a scalar $u(x, y)$ the Laplacian is a scalar # # $$ # \nabla^2 u(x, y) := \nabla \cdot \nabla u(x, y) = \partial_x^2 u + \partial_y^2 u. # $$ # # ### Finite difference operators # # As usual, we need some domain $\Omega$ in which we will solve the problem. Given some domain, we need to choose a way of specifying it, and ordering the degrees of freedom. This is very fiddly for anything other than coordinate aligned rectangular domains (one of the major disadvantages of finite differences). As a result, all of the problems we will solve will be on squares and rectangles. # # Lets choose $\Omega = (0, W) \times (0, H)$. We'll pick $N_x$ points in the x-direction, and $N_y$ in the y-direction. We'll choose a typewriter ordering of degrees of freedom (bottom-to-top, left-to-right), so given an index $i$ in the x-direction and an index $j$ in the y-direction it represents the point # # $$ # (x, y) = (i h_x, j h_y) # $$ # # where # # $$ # \begin{aligned} # h_x &= \frac{W}{N_x - 1}\\ # h_y &= \frac{H}{N_y - 1}\\ # \end{aligned} # $$ # # and $i \in \{0, \dots, N_x - 1\}$, $j \in \{0, \dots, N_y - 1\}$. # # We will again represent our solution vectors as 1D vectors (remembering that we should plot them in 2D). # # Let's write some code to encapsulate a domain and draw vectors. # + from collections import namedtuple Point = namedtuple("Point", ("x", "y")) class Grid(object): def __init__(self, Nx, Ny, P0=Point(0,0), P1=Point(1,1)): X0, Y0 = P0 X1, Y1 = P1 self.W = X1 - X0 self.H = Y1 - Y0 self.Nx = Nx self.Ny = Ny x = numpy.linspace(X0, X1, self.Nx) y = numpy.linspace(Y0, Y1, self.Ny) self.XY = numpy.meshgrid(x, y, indexing="ij") @property def ndof(self): return self.Nx*self.Ny @property def hx(self): return self.W/(self.Nx - 1) @property def hy(self): return self.H/(self.Ny - 1) def alpha(self, i, j): return i*self.Ny + j def new_vector(self, components=1): vec = numpy.zeros(self.Nx*self.Ny*components, dtype=float) shape = (self.Nx, self.Ny) if components > 1: shape = shape + (components, ) return vec.reshape(shape) def contourf(self, u, levels=11): U = u.reshape(self.Nx, self.Ny) pyplot.figure() pyplot.contourf(*self.XY, U, levels) pyplot.colorbar() def quiver(self, u, colour=None): U = u.reshape(self.Nx, self.Ny, 2) pyplot.figure() if colour is None: pyplot.quiver(*self.XY, U[..., 0], U[..., 1]) else: pyplot.quiver(*self.XY, U[..., 0], U[..., 1], colour) # - grid = Grid(17, 15, P0=Point(-2, -1), P1=Point(1, 1)) X, Y = grid.XY u = grid.new_vector(components=2) u[..., 0] = -Y u[..., 1] = X grid.quiver(u); # Notice how we return vectors that we can index with two indices (or three if we have a vector). For 2D indexing of a vector, I'll write (using roman indices): # # $$ # U_{i, j} # $$ # # to indicate the value at $(i h_x, j h_y)$. # # We can translate these 2D indices into a 1D index to a flat vector. I'll use greek letters for these flat indices. # # $$ # \alpha(i, j) := i N_y + j # $$ # Now let's think about solving an equation, we'll start by solving the 2D Laplacian with Dirichlet conditions. # # $$ # \begin{aligned} # -\nabla^2 u &= f && \text{ on }\Omega = (0, 1) \times (0, 1)\\ # u &= g && \text{ on }\partial\Omega # \end{aligned} # $$ # # We'll pick $f = 8\pi^2\sin(2\pi x)\sin(2\pi y)$ and set $g = 0$. # # Since we're only doing things on axis-aligned domains, the derivatives decompose into directional derivatives, and so the 2D stencil is just the "sum" of the two 1D stencils for $\partial_x^2$ and $\partial_y^2$. Note that we must be careful to use the correct $h_x$ or $h_y$. # # So we have # # $$ # -\nabla^2 = \frac{1}{h_x^2} \begin{bmatrix} & & \\ -1 & 2 & -1 \\ & & \end{bmatrix} + \frac{1}{h_y^2} \begin{bmatrix} & -1 & \\ & 2 & \\ & -1 & \end{bmatrix}. # $$ # # Where this stencil notation is to be understood as being laid over the 2D grid. We will come to the indexing in a moment. def laplacian(grid, f, g): ndof = grid.ndof A = numpy.zeros((ndof, ndof)) X, Y = grid.XY u0 = g(X, Y) rhs = f(X, Y) stencilx = 1/grid.hx**2 * numpy.array([-1, 0, 2, 0, -1]) stencily = 1/grid.hy**2 * numpy.array([0, -1, 2, -1, 0]) stencil = stencilx + stencily for i in range(grid.Nx): for j in range(grid.Ny): row = grid.alpha(i, j) if i in (0, grid.Nx - 1) or j in {0, grid.Ny - 1}: # Dirichlet bc A[row, row] = 1 rhs[i, j] = u0[i, j] else: cols = [grid.alpha(*ij) for ij in [(i-1, j), (i, j-1), (i, j), (i, j+1), (i+1, j)]] A[row, cols] = stencil return A, rhs grid = Grid(41, 41) f = lambda x, y: 8*numpy.pi**2*numpy.sin(2*numpy.pi*x)*numpy.sin(2*numpy.pi*y) g = lambda x, y: numpy.zeros_like(x) A, rhs = laplacian(grid, f, g) pyplot.figure() pyplot.spy(A); x = numpy.linalg.solve(A, rhs.flatten()) grid.contourf(x, levels=20) # Let's look at convergence. We conveniently picked a problem for which the exact solution is easy to compute # # $$ # u^*(x, y) = \sin(2\pi x)\sin(2\pi y). # $$ def mms_errors(ncell, f, g): errors = [] for n in ncell: grid = Grid(n + 1, n + 1) A, rhs = laplacian(grid, f, g) u = numpy.linalg.solve(A, rhs.flatten()) X, Y = grid.XY uexact = numpy.sin(2*numpy.pi*X)*numpy.sin(2*numpy.pi*Y) u = u.reshape(uexact.shape) error = u - uexact error = numpy.sqrt(grid.hx*grid.hy)*numpy.linalg.norm(error) errors.append(error) return errors ncell = numpy.geomspace(4, 64, num=5, dtype=int) errors = mms_errors(ncell, f, g) pyplot.figure() pyplot.loglog(1/ncell, errors, "o", label="Numeric error") pyplot.loglog(1/ncell, 1/ncell, label="$h^{-1}$") pyplot.loglog(1/ncell, 1/ncell**2, label="$h^{-2}$") pyplot.xlabel("$h$") pyplot.ylabel("$\|u - u^*\|_2$") pyplot.legend(); # ### Sparse matrices # # We have 2nd order convergence. Notice that in 2D the grid-function norm is # # $$ # \|u\|_p = \left(h_x h_y \sum_i |u_i|^p\right)^{\frac{1}{p}} # $$ # # since we're a approximating a two-dimensional integral, and each little piece has area $h_x h_y$. # # We'd like to try on some bigger grids, but we run into a problem. The matrices we're making take a tremendously long time to invert. Let's see. import cProfile, pstats profiles = [] ncell = [40, 50, 60, 70, 80, 90, 100] for n in ncell: prof = cProfile.Profile() prof.enable() grid = Grid(n+1, n+1) A, rhs = laplacian(grid, f, g) u = numpy.linalg.solve(A, rhs.flatten()) prof.disable() profiles.append(prof) for n, p in zip(ncell, profiles): print(f"***** Profile for {n}x{n} grid *****") pstats.Stats(p).sort_stats(pstats.SortKey.TIME).print_stats(3); # Let's look at the *sparsity* of the operator again, and some statistics. A, _ = laplacian(Grid(11, 11), f, g) pyplot.figure() pyplot.spy(A); # Ainv = numpy.linalg.inv(A) # pyplot.figure()\ # pyplot.spy(Ainv); print(f"Percentage of nonzeros: {100*numpy.prod(Ainv[numpy.nonzero(Ainv)].shape) / numpy.prod(A.shape):0.2f}%") # ### Speeding up the solve # # So we're doing a lot of work storing lots of redundant zeros, and potentially lots of redundant work solving the equation. # # Instead, we can use a *sparse* matrix, provided by scipy. # + import scipy.sparse as sp import scipy.sparse.linalg def laplacian_sparse(grid, f, g): ndof = numpy.asarray(grid.ndof, dtype=int) A = sp.lil_matrix((ndof, ndof)) X, Y = grid.XY u0 = g(X, Y) rhs = f(X, Y) mask = numpy.zeros_like(X, dtype=int) mask[1:-1,1:-1] = 1 mask = mask.flatten() stencilx = 1/grid.hx**2 * numpy.array([-1, 0, 2, 0, -1]) stencily = 1/grid.hy**2 * numpy.array([0, -1, 2, -1, 0]) stencil = stencilx + stencily for i in range(grid.Nx): for j in range(grid.Ny): row = grid.alpha(i, j) if mask[row] == 0: # Dirichlet bc A[row, row] = 1 rhs[i, j] = u0[i, j] else: stencili = numpy.asarray([grid.alpha(*ij) for ij in [(i-1, j), (i, j-1), (i, j), (i, j+1), (i+1, j)]]) smask = mask[stencili] cols = stencili[smask == 1] A[row, cols] = stencil[smask == 1] # Lift boundary contribution to RHS bdycols = stencili[smask == 0] rhs[i, j] -= stencil[smask == 0] @ u0.reshape(-1)[bdycols] return A.tocsr(), rhs # - grid = Grid(41, 41) A, rhs = laplacian_sparse(grid, f, g) u = sp.linalg.spsolve(A, rhs.flatten(), use_umfpack=True) grid.contourf(u, levels=20); # ### Exercise # # Check this solution actually converges at second order. import cProfile, pstats profiles = [] ncell = [40, 50, 60, 70, 80, 80, 100] for n in ncell: prof = cProfile.Profile() prof.enable() grid = Grid(n+1, n+1) A, rhs = laplacian_sparse(grid, f, g) u = sp.linalg.spsolve(A, rhs.flatten()) prof.disable() profiles.append(prof) for n, p in zip(ncell, profiles): print(f"***** Profile for {n}x{n} grid *****") pstats.Stats(p).sort_stats(pstats.SortKey.TIME).print_stats(3); # #### A more efficient implementation # # OK, so now the creation of the matrix is the most expensive bit. We can try and fix this by directly creating a CSR matrix (rather than this linked-list thing), and jitting it with numba. We need to refactor the code a little (to move the jittable region into a separate function). # # For a CSR matrix we need to guess how big our data structures will be. Since we will have at most 5 entries per row, the value and colidx arrays are five times the number of degrees of freedom (rows). # + @numba.jit(nopython=True) # warn if we couldn't compile def make_csr(stencil, mask, hx, hy, Nx, Ny, u0, rhs): ndof = Nx * Ny ai = numpy.zeros(ndof+1, dtype=numpy.int32) aj = numpy.zeros(ndof*5, dtype=numpy.int32) av = numpy.zeros(ndof*5, dtype=numpy.float64) ajptr = 0 u0 = u0.flatten() for i in range(Nx): for j in range(Ny): row = i*Ny + j if mask[row] == 0: ai[row+1] = 1 + ai[row] aj[ajptr] = row av[ajptr] = 1 ajptr += 1 rhs[i, j] = u0[row] else: stencili = numpy.asarray([i_*Ny + j_ for (i_, j_) in [(i-1, j), (i, j-1), (i, j), (i, j+1), (i+1, j)]]) smask = mask[stencili] cols = stencili[smask == 1] ncol = len(cols) ai[row+1] = len(cols) + ai[row] aj[ajptr:ajptr+ncol] = cols av[ajptr:ajptr+ncol] = stencil[smask == 1] ajptr += ncol # Lift boundary contribution to RHS bdycols = stencili[smask == 0] rhs[i, j] -= stencil[smask == 0] @ u0[bdycols] return ai, aj, av def laplacian_sparse_csr(grid, f, g): ndof = numpy.asarray(grid.ndof, dtype=int) X, Y = grid.XY u0 = g(X, Y) rhs = f(X, Y) mask = numpy.zeros_like(X, dtype=int) mask[1:-1,1:-1] = 1 mask = mask.flatten() stencilx = 1/grid.hx**2 * numpy.array([-1, 0, 2, 0, -1]) stencily = 1/grid.hy**2 * numpy.array([0, -1, 2, -1, 0]) stencil = stencilx + stencily ai, aj, av = make_csr(stencil, mask, grid.hx, grid.hy, grid.Nx, grid.Ny, u0, rhs) return sp.csr_matrix((av, aj, ai), shape=(ndof, ndof)), rhs # - import cProfile, pstats profiles = [] ncell = [40, 50, 60, 70, 80, 80, 100] for n in ncell: prof = cProfile.Profile() prof.enable() grid = Grid(n+1, n+1) A, rhs = laplacian_sparse_csr(grid, f, g) u = sp.linalg.spsolve(A, rhs.flatten()) prof.disable() profiles.append(prof) for n, p in zip(ncell, profiles): print(f"***** Profile for {n}x{n} grid *****") pstats.Stats(p).sort_stats(pstats.SortKey.TIME).print_stats(3); # ### Algorithmic performance of sparse direct solvers # # Now, finally, the solve is back at the top of our profile. Let's see how it scales with the number of dofs. # # We split the work into three phases # # 1. Assembly of the operator (matrix $A$) # 2. Factoring the matrix into sparse $LU$ form # 3. Solving the problem by forward-backward substitution. # # For these sparse operators and the sparse direct solver we expect complexity # # 1. Assembly $\mathcal{O}(n)$ # 2. Factoring $\mathcal{O}(n^{3/2})$ # 3. Solve $\mathcal{O}(n \log n)$ # Reminder: Add LU forwards-backward substitution here import time ns = numpy.geomspace(16, 1024, num=7, dtype=numpy.int32) factor_times = [] lu_solve_times = [] assemble_times = [] for n in ns: grid = Grid(n+1,n+1) start = time.time() A, rhs = laplacian_sparse_csr(grid, f, g) end = time.time() assemble_times.append(end - start) print(f"Assemble on {n}x{n} grid took {assemble_times[-1]:.2f}s") start = time.time() lu = sp.linalg.splu(A.tocsc()) end = time.time() factor_times.append(end - start) print(f"Factor on {n}x{n} grid took {factor_times[-1]:.2f}s") start = time.time() u = lu.solve(rhs.flatten()) end = time.time() lu_solve_times.append(end - start) print(f"Solve on {n}x{n} grid took {lu_solve_times[-1]:.2f}s") grid.contourf(u, levels=20); ndof = (ns+1)**2 pyplot.figure() pyplot.loglog(ndof, assemble_times, "o", label="Assembly") pyplot.loglog(ndof, factor_times, "o", label="Factor") pyplot.loglog(ndof, lu_solve_times, "o", label="Solve") ndof = ndof[3:] pyplot.loglog(ndof, ndof/1e6, label="$\mathcal{O}(n)$") pyplot.loglog(ndof, ndof**(3/2)/1e8, label="$\mathcal{O}(n^{3/2})$") pyplot.loglog(ndof, ndof*numpy.log(ndof)/1e7, label="$\mathcal{O}(n \log n)$") pyplot.xlabel("Number of dofs") pyplot.ylabel("Time (s)") pyplot.legend(); # So the factoring is clearly $n^{3/2}$, assembly looks like it's $n$. The solve itself is rather hard to judge, I suspect it's $n\log n$, but we're observing probably cache effects for the smaller problems. # # Having now solved stationary problems (with no time derivative) we will move on to 2D time-dependent problems, using the *heat equation* as a first example. # ## For interest only, faster solves with multigrid # # This section is just to see what we get if we use a (close to) optimal algorithm. # # We might ask ourselves, can we do any better? The answer is yes, and if we had another 10 lectures, we'd see how. Instead, I will show two examples, the first generally applicable (for Laplacians certainly) using a solver technique called multigrid, the second exploits some special property of our problem. import pyamg import time ns = numpy.geomspace(16, 2048, num=8, dtype=numpy.int32) amg_times = [] assemble_times = [] amg_solve_times = [] for n in ns: grid = Grid(n+1,n+1) start = time.time() A, rhs = laplacian_sparse_csr(grid, f, g) end = time.time() assemble_times.append(end - start) print(f"Assemble on {n}x{n} grid took {assemble_times[-1]:.2f}s") start = time.time() null = numpy.ones((A.shape[0],1)) amg = pyamg.smoothed_aggregation_solver(A, null, max_levels=25, max_coarse=20) end = time.time() amg_times.append(end - start) print(f"Build AMG on {n}x{n} grid took {amg_times[-1]:.2f}s") start = time.time() u = amg.solve(rhs.flatten(), tol=1e-10, accel="cg", cycle="F") end = time.time() amg_solve_times.append(end - start) print(f"Solve on {n}x{n} grid took {amg_solve_times[-1]:.2f}s") ndof = (ns+1)**2 pyplot.figure() pyplot.loglog(ndof, assemble_times, "o", label="Assembly") pyplot.loglog(ndof, amg_times, "o", label="Build AMG") pyplot.loglog(ndof, amg_solve_times, "o", label="Solve") pyplot.loglog(ndof[3:], ndof[3:]/5e5, label="$\mathcal{O}(n)$") pyplot.loglog(ndof[3:], ndof[3:]*numpy.log(ndof[3:])/3e6, label="$\mathcal{O}(n\log n)$") pyplot.xlabel("Number of dofs") pyplot.ylabel("Time (s)") pyplot.legend(); # Here we see that assembly is still linear, but now the solve also looks to be linear, and the building of the AMG object is $n \log n$, an improvement over the previous setting. # # This solver exploits a natural hierarchy in the problem to build a fast solution using coarse (low-resolution) approximations of the same problem. # # Finally our fastest solver, though not generally applicable (it exploits a particular property of the right hand side that isn't always true). import pyamg import time ns = numpy.geomspace(16, 2048, num=8, dtype=numpy.int32) cg_solve_times = [] assemble_times = [] for n in ns: grid = Grid(n+1,n+1) start = time.time() A, rhs = laplacian_sparse_csr(grid, f, g) end = time.time() assemble_times.append(end - start) print(f"Assemble on {n}x{n} grid took {assemble_times[-1]:.2f}s") start = time.time() start = time.time() u = sp.linalg.cg(A, rhs.flatten(), tol=1e-10) end = time.time() cg_solve_times.append(end - start) print(f"Solve on {n}x{n} grid took {cg_solve_times[-1]:.2f}s") ndof = (ns+1)**2 pyplot.figure() pyplot.loglog(ndof, assemble_times, "o", label="Assembly") pyplot.loglog(ndof, cg_solve_times, "o", label="Solve") pyplot.loglog(ndof[3:], ndof[3:]/5e5, label="$\mathcal{O}(n)$") pyplot.loglog(ndof[3:], ndof[3:]/3e7, label="$\mathcal{O}(n)$") pyplot.xlabel("Number of dofs") pyplot.ylabel("Time (s)") pyplot.legend(); # This solve takes almost no time at all! In fact, all it has to do is one matrix-vector multiplication. So we could have done this in an entirely matrix-free way (avoiding the need to assemble the matrix).
material/finite-difference-II.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Implementing Linear Regression # Quadratic Model # y = c1*x^2 + c2*x + c3 # + # Imports import pandas as pd import numpy as np import sklearn from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn import metrics # Plotting import matplotlib.pyplot as plt from matplotlib import style style.use('fivethirtyeight') import seaborn as sns # %matplotlib inline # - # Importing dataset dt = pd.read_csv("../input/q_1.csv") dt.describe() sns.lmplot(x="x",y="y",data=dt) reg = LinearRegression() reg.fit(dt,dt["y"])
15XW62 - Machine Learning/0/q_1/q_1.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.0 # language: julia # name: julia-0.6 # --- # + using DifferentialEquations, ParameterizedFunctions, ODE, ODEInterfaceDiffEq, LSODA f(t) = 0.25*sin(t)^2 g = @ode_def RigidBody begin dy1 = I₁*y2*y3 dy2 = I₂*y1*y3 dy3 = I₃*y1*y2 + f(t) end I₁ I₂ I₃ p = [-2.0,1.25,-0.5] prob = ODEProblem(g,[1.0;0.0;0.9],(0.0,10.0),p) abstols = 1./10.^(6:13) reltols = 1./10.^(3:10); sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) using Plots; gr() # - sol = solve(prob,Vern7()) plot(sol) setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) # fails Dict(:alg=>dopri5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=true,numruns=1000,maxiters=10000) plot(wp) # The DifferentialEquations.jl algorithms once again pull ahead. This is the first benchmark we've ran where `ode45` doesn't fail. However, it still doesn't do as well as `Tsit5`. One reason why it does so well is that the maximum norm that ODE.jl uses (as opposed to the L2 norm of Sundials, DifferentialEquations, and ODEInterface) seems to do really well on this problem. `dopri5` does surprisingly bad in this test. # ## Higher Order setups = [Dict(:alg=>DP8()) #Dict(:alg=>ode78()) # fails Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>dop853()) Dict(:alg=>Vern6()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=1000,maxiters=1000) plot(wp) setups = [Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>odex()) Dict(:alg=>CVODE_Adams()) Dict(:alg=>lsoda()) Dict(:alg=>ddeabm()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=6)) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=1000,maxiters=1000) plot(wp) # ### Conclusion # # Once again, the OrdinaryDiffEq.jl pull far ahead in terms of speed and accuracy.
NonStiffODE/RigidBody Work-Precision Diagrams.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNIST Image Classification with TensorFlow on Cloud ML Engine # # This notebook demonstrates how to implement different image models on MNIST using Estimator. # # Note the MODEL_TYPE; change it to try out different models # + import os PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 MODEL_TYPE = "dnn" # "linear", "dnn", "dnn_dropout", or "cnn" # Do not change these os.environ["PROJECT"] = PROJECT os.environ["BUCKET"] = BUCKET os.environ["REGION"] = REGION os.environ["MODEL_TYPE"] = MODEL_TYPE os.environ["TFVERSION"] = "1.13" # Tensorflow version # + language="bash" # gcloud config set project $PROJECT # gcloud config set compute/region $REGION # - # ## Run as a Python module # # In the previous notebook (mnist_linear.ipynb) we ran our code directly from the notebook. # # Now since we want to run our code on Cloud ML Engine, we've packaged it as a python module. # # The `model.py` and `task.py` containing the model code is in <a href="mnistmodel/trainer">mnistmodel/trainer</a> # # **Complete the TODOs in `model.py` before proceeding!** # # Once you've completed the TODOs, set MODEL_TYPE and run it locally for a few steps to test the code. # + language="bash" # rm -rf mnistmodel.tar.gz mnist_trained # gcloud ai-platform local train \ # --module-name=trainer.task \ # --package-path=${PWD}/mnistmodel/trainer \ # -- \ # --output_dir=${PWD}/mnist_trained \ # --train_steps=100 \ # --learning_rate=0.01 \ # --model=$MODEL_TYPE # - # **Now, let's do it on Cloud ML Engine so we can train on GPU:** `--scale-tier=BASIC_GPU` # # Note the GPU speed up depends on the model type. You'll notice the more complex CNN model trains significantly faster on GPU, however the speed up on the simpler models is not as pronounced. # + language="bash" # OUTDIR=gs://${BUCKET}/mnist/trained_${MODEL_TYPE} # JOBNAME=mnist_${MODEL_TYPE}_$(date -u +%y%m%d_%H%M%S) # echo $OUTDIR $REGION $JOBNAME # gsutil -m rm -rf $OUTDIR # gcloud ai-platform jobs submit training $JOBNAME \ # --region=$REGION \ # --module-name=trainer.task \ # --package-path=${PWD}/mnistmodel/trainer \ # --job-dir=$OUTDIR \ # --staging-bucket=gs://$BUCKET \ # --scale-tier=BASIC_GPU \ # --runtime-version=$TFVERSION \ # -- \ # --output_dir=$OUTDIR \ # --train_steps=10000 --learning_rate=0.01 --train_batch_size=512 \ # --model=$MODEL_TYPE --batch_norm # - # ## Monitor training with TensorBoard # # To activate TensorBoard within the JupyterLab UI navigate to "<b>File</b>" - "<b>New Launcher</b>". Then double-click the 'Tensorboard' icon on the bottom row. # # TensorBoard 1 will appear in the new tab. Navigate through the three tabs to see the active TensorBoard. The 'Graphs' and 'Projector' tabs offer very interesting information including the ability to replay the tests. # # You may close the TensorBoard tab when you are finished exploring. # ## Deploying and predicting with model # # Deploy the model: # + language="bash" # MODEL_NAME="mnist" # MODEL_VERSION=${MODEL_TYPE} # MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/mnist/trained_${MODEL_TYPE}/export/exporter | tail -1) # echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes" # #gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME} # #gcloud ai-platform models delete ${MODEL_NAME} # gcloud ai-platform models create ${MODEL_NAME} --regions $REGION # gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=$TFVERSION # - # To predict with the model, let's take one of the example images. # + import json, codecs import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data HEIGHT = 28 WIDTH = 28 mnist = input_data.read_data_sets("mnist/data", one_hot = True, reshape = False) IMGNO = 5 #CHANGE THIS to get different images jsondata = {"image": mnist.test.images[IMGNO].reshape(HEIGHT, WIDTH).tolist()} json.dump(jsondata, codecs.open("test.json", "w", encoding = "utf-8")) plt.imshow(mnist.test.images[IMGNO].reshape(HEIGHT, WIDTH)); # - # Send it to the prediction service # + language="bash" # gcloud ai-platform predict \ # --model=mnist \ # --version=${MODEL_TYPE} \ # --json-instances=./test.json # - # <pre> # # Copyright 2017 Google Inc. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # </pre>
courses/machine_learning/deepdive/08_image/labs/mnist_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo Notebook for Copying a Component # Start by importing QisKit Metal: import qiskit_metal as metal from qiskit_metal import designs, draw from qiskit_metal import MetalGUI, Dict #, open_docs # Then let's fire up the GUI: design = designs.DesignPlanar() gui = MetalGUI(design) # Now we'll put one transmon at the origin: # Let's start by putting a transmon at the origin: from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket design.overwrite_enabled = True q1 = TransmonPocket(design, 'Q1') gui.rebuild() gui.autoscale() # First, we'll copy the component and then manually modify the coordinates to be at (1,0) instead of (0,0): # Now let's copy the transmon at the origin and put it at position (0,1): q1_copy = design.copy_qcomponent(q1, 'Q1_copy') q1_copy.options['pos_x']='1.0mm' gui.rebuild() gui.autoscale() # Now, we'll copy the original component and in the same step we'll move the copy to (-1,0) by passing a dictionary to the "copy_qcomponent" command: # Now let's copy the transmon at the origin and put it at position (0,-1): q1_anothercopy = design.copy_qcomponent(q1,'Q1_another_copy', dict(pos_x='-1.0mm')) gui.rebuild() gui.autoscale() # We can copy multiple components at once using the "copy_multiple_qcomponents" command. Here's an example that copies the three components we've just created (Q1 and the two copies: Q1_copy and Q1_another_copy) and moves them up by 2.0mm each: # Now let's copy all three components at the same time, moving them up by +2mm in the y-direction: newcopies = design.copy_multiple_qcomponents([q1, q1_copy, q1_anothercopy], ['Q3', 'Q4', 'Q5'], [dict(pos_y='1.0mm'), dict(pos_y='2.0mm'), dict(pos_y='3.0mm')]) gui.rebuild() gui.autoscale() # Note that we can also copy multiple qcomponents without passing dictionaries. In this case, the copied qcomponents will sit on top of of the original qcomponents in the layout: # Example of copying without giving any dictionary values; qcomponents will sit on top of the originals! newcopies2 = design.copy_multiple_qcomponents([q1, q1_copy, q1_anothercopy], ['Q6', 'Q7', 'Q8']) gui.rebuild() gui.autoscale() # Note also that we can copy multiple qcomponents but only give a dictionary to one of them. The other two dictionaries still need to exist but can be empty: # Copy the three original components but only give a dictionary for the first one; other two dictionaries still need to exist but can be empty: newcopies3 = design.copy_multiple_qcomponents([q1, q1_copy, q1_anothercopy], ['Q9', 'Q10', 'Q11'], [dict(pos_y='-1.0mm'), dict(), dict()]) gui.rebuild() gui.autoscale()
tutorials/2 Front End User/2.1 My first custom QComponent/Copying_Component.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Numpy.Random Analysis # This assignment relates to the numpy.random package in Python. The following Jupyter Notebook is designed to provide an analysis of the package. # ### Purpose of the package # #### Numpy.random # Numpy is a library package for the Python programming language to support the computation of large multi-dimensional arrays and matrices. # # The purpose of the numpy.random submodule is to pseudogenerate random variables for computational purposes within arrays and matrices. With Numpy.random it is to easily perform statistical distributions using python. Statistical distributions and their interpretation can be fundamental to the quality of any piece of analysis. # #### Why use Numpy.random instead of Random.random? # Numpy.random is therefore a random generator. But a random generator already exists in python by way of the random.random function. Why would we need another? # # The random.random libary is simply considered a little more lightweight and thus the numpy.random submodule has become popular given its wider range of option and probability distributions. # # Both random libraries utilise the Mersenne twister sequence to generate their random numbers and are deterministic, so the numbers generated are pseudorandom, not truly random. # + import numpy.random as np x = np.rand(3,5) y = np.rand(1000) y # %matplotlib inline import matplotlib.pyplot as plt plt.hist(y) plt.show() # - # ### "Simple random data" and "Permutations" functions # This section will introduce and explain the "Simple random data" and "Permutations" functions. # # #### Simple random data # The simple random data functions are a set of random function that return different types of random numbers depending on what outputs are required. # # The basic rand() function will return values in a specific range, eg [0,1) whereas the randn() function will return values within a standard normal distribution which is a distribution of integers with a mean of 0 and a standard deviation of 1. # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline np.randn(2,2) # - # Expanding beyond this we can look at sampling functions where contained within a continuous uniform distribution (where probability of any number is constant) we can generate random floats and of predetermined shape and perform operations on them. # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline 5 * np.random_sample(5) # - # The purpose of Simple random data functions is to produce outputs that are single values or arrays of data that are randomly (or pseudorandomly) chosen. # #### Permutations # The role of permutations functions within the numpy.random package is to take randomly generated values or arrays and manipulated their order randomly. The random range or sequence is then said to be permuted randomly. The key is that the original random data is copied before the permutation takes place. Below we can show how an array can be passed into the arange() function in the standard numpy library, subsequently reshaped, then randomised in the permutation() function utilising the numpy.random library. np.permutation([7,8,4]) import numpy arr_1 = numpy.arange(16) arr_2 = numpy.arange(16).reshape((4,4)) numpy.arange(16), np.permutation(arr_2) # ### Discussion of distribution functions # This section will explain the key differences between 5 different distributions provided for in the package. # # #### Standard normal distribution # The standard normal distribution is a distribution of integers whose mean is 0 and whose standard deviation is 1. A standard normal distribution might be used in industry to ensure data is representative of a wider population. # # Plotting the distribution below we can validate that the standard normal distribution looks as expected (like a bell curve) and also that the mean of 0 and standard deviation of 1 are similarly as expected. # + # %matplotlib inline import numpy.random as np import matplotlib.pyplot as plt x = np.standard_normal(size=(10000,2)) plt.hist(x) # - import numpy x = np.standard_normal(size=(10000,2)) y = numpy.std(x) z = numpy.mean(x) y, z # #### Uniform distribution # The uniform distribution or rectangular distribution is a distribution where all intervals across the distribution have equal probability. A real world example of a uniform distribution is the first ball drawn in the lottery, the first card drawn from a deck of cards or a simple toss of a coin. # # Plotting the uniform distribution below we can see that each interval is approximately equal and has approximately the same rate of occurence. # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline x = np.uniform(1,2,10000) plt.hist(x) # - # #### Poisson Distribution # # The best explanation of the Poisson distribution is that is a distribution that has the following properties: # # "It is a measurement of outcomes that can be classified as successes or failures. # The average number of successes (μ) that occurs in a specified region is known. # The probability that a success will occur is proportional to the size of the region. # The probability that a success will occur in an extremely small region is virtually zero" # # A real world example is the number of bankruptcies filed in one year. The histogram below is based on the idea that the lamda or interval is 5 and that our sample size is 2000 small companies. It shows us that bankruptcies can be forecasted for up to 14 in the given year, albeit with remote probability. The more likely outcome will be from 3 to 8 filings. # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline x = np.poisson(5,2000) plt.hist(x) # - # #### Rayleigh distribution # # The Rayleigh distribution is "a special case of the Weibull distribution with a scale parameter of 2. When a Rayleigh is set with a shape parameter (σ) of 1, it is equal to a chi square distribution with 2 degrees of freedom" # # A real world example of the Rayleigh distribution is the measurement of wave heights. If the mode value of the wave height was determined, the Rayleigh distribution could be used to calculate the probability of the waves being 2 times larger for instance. # that approximately 1000 of these waves are determined to be greater than 10m - 2 times the mode. # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline x = np.rayleigh(5,10000) plt.hist(x) # - # #### Geometric distribution # The geometric distribution is defined as "a discrete Random Sampling Distribution. The sampling is from a series of independent trials each of which may have outcomes which fall into one of two classifications. The Geometric Distribution generates the probability of X-trials required until the first occurrence is obtained. The probability of occurrence of each classification remains constant for each trial. The outcomes of each successive trial also remains independent. This sampling situation is sometimes referred to as a Bernoulli Trial or Process" # # A real world example of the geometric distribution can be how many customers will walk into a store before a sale is made. As the probability of occurence for each classification remains constant for each trial but the probability we are looking for is the first occurence or first sale. Outcomes from one customer to the next are independent. # # Below is a histogram of a random geometric distribution where the probability of a sale is assumed to be 0.2 and the number of trials is 50. The distribution shows that the event (sales) has quite a high chance of occurence within 5 trials (visits) # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline x = np.geometric(0.2,50) plt.hist(x) # - # ### Seeds in pseudorandom numbers # # #### Random numbers and their uses # # # # Pseudorandom numbers come from the concept of random numbers. The necessity of random numbers in the modern world is only increasing and have many everyday applications from encryption and tokenisation to machine learning and computer security applications. Given however that computers cannot generate genuinely random numbers the best alternative is psuedorandom numbers. # # Psuedorandom numbers are numbers that appear random and exhibit statistical randomness but were generated using a deterministic process. These numbers can also be stored or recalled later which makes them particularly useful and advantageous for encyption and tokenisation. This is done using the seeds. # #### The use of seeds # Seeds are the beginning numbers or labels to start or initialise pseudorandom number generation. Defining a seed is important as the an array of random numbers will be stored and associated with that seed until it is reset. If no seed is specified the seed will be automatically determined by the state of the computer system, eg the clock. # # Psuedorandom numbers are generated in sequences. These sequences are deterministic and seeded with an initial number. This is essentially a labelling system that associates your generation of random numbers under the seed with the seed itself. This means if you recall the seed you recall the random number. # # We can see below that the random numbers associated with the seed label "10" can be easily recalled and reproduced in an entirely deterministic fashion by reaffirming our seed label and asking for 5 more random numbers. They are the same. # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline np.seed(10) np.rand(5) # + import numpy.random as np import matplotlib.pyplot as plt # %matplotlib inline np.seed(10) np.rand(5) # - # #### Where does the data come from? # The data for the pseudorandom numbers likely comes from an internal repository or pool of data in the computer system. User keystrokes or the movements of input/output devices are converted into numbers and replenish the pool of data constantly. A new seed will draw a specified number of pseudorandom numbers from the pool and will be associated with that same explicit set of numbers until it is reset. # #### Is pseudorandom number generation secure? # According to a recent article in the BBC https://www.bbc.com/news/technology-33839925 Encryption packages based on pseudorandom number generation may not be as secure as once thought. # # The issue lies chiefly in the data pool described above where a low entropy environment is present in the computer system. In this case the frequency of the data stream may be quite low and so prove more predictable and less secure than an equivalent high entropy system. The article cites this environment as more susceptible to a brute force attack as seeds for new numbers would be generated far less regularly.
Numpy-Random.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import warnings warnings.filterwarnings('ignore') # %matplotlib inline # + import numpy as np import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt import pandas as pd import random import patsy import sklearn.linear_model as linear sns.set(style="whitegrid") import sys sys.path.append('../resources') import fundamentals.models as models # - # # Transformations # # At several points in the previous discussions we have hinted at the possibility that transformations might improve the performance and or interpretability of a linear model. In this section, we'll talk about a variety of transformations that accomplish these goals. # ## Scaling # # Let's review the two jobs that a single $\beta$ does. # # Imagine we want to predict some $y$ that has a range of 27 to 89 and we want to model it with an $x$ with the range 345 to 763. Our first observation is that if we have an equation of the form $y = \beta_0 + \beta_1 x$ then $\beta_1$ must *at least* be on the order of 0.# (1/10th) because $y$ and $x$ differ in magnitude by that much. Therefore, before before $\beta_1$ does anything else, it must *scale* the value of $x$ to be of the same magnitude of $y$. # # It follows that if we start adding features of different magnitudes, some of them in the 000's and some in the 0.000's, then the $\beta$'s must all adjust to the scales of their respective features. This makes it difficult to determine the relative contributions of each predictor (as measured by $\beta_i$). # # Additionally, we have the problem of interpreting $\beta_0$, the intercept, when all $x_i$ are zero. It doesn't make any sense for someone to have zero IQ, zero height, zero weight, etc. # # Therefore, in order to make coefficients more interpreterable, it is often desirable to transform the variables ahead of time to a common scale. There are two such transformations: # # 1. subtract each value $x_i$ from its mean $\bar{x_i}$. This creates mean *centered* feature. # 2. Take mean centered data and divide through by $x_i$'s standard deviation, $\sigma_{x_i}$. This creates a mean *scaled* feature. # # We only apply this transformation to *numerical* features and not dummy variables. # # Mean scaling accomplishes two things: # # 1. Zero is a meaningful value for each $x_i$, the mean value of $x_i$ and thus the intercept $\beta_0$ is interpretable as the value when all $x_i$ are at their mean. # 2. Each variable is projected into the standard normal distribution (mean of 0 and standard deviation of 1) so that they're all on the same scale. A unit change is a change of 1 standard deviation in the standard normal distribution. # # Mean *centering* accomplishes only the first thing. # # However, if you use mean scaling for a *logistic* regression, then "+1" is an entire standard deviation of data. This means that "Divide by 4" won't work and you'll need to figure out what "+1" in real units is in standard deviations. Often it is better to just use mean *centered* data for logistic regression. # # Finally, mean scaling puts the units in "standard units" and mean centering leaves the units in the "natural" units such as feet, square feet, IQ points, etc. This is often desirable. # + def mean_scale( df, variable): x_bar = df[ variable].mean() std = df[ variable].std() scaled_variable = (df[ variable] - x_bar) / (2.0 * std) # suggested by Gelman df[ variable + "_scaled"] = scaled_variable def mean_center( df, variable): x_bar = df[ variable].mean() scaled_variable = df[ variable] - x_bar df[ variable + "_centered"] = scaled_variable # - # Back to child IQs... child_iq = pd.read_csv( "../resources/data/child_iq.tsv", sep="\t") mean_scale(child_iq, "child_iq") mean_scale(child_iq, "mom_iq") mean_scale(child_iq, "mom_age") child_iq.head() # Here's the unscaled model: model = "child_iq ~ mom_iq + mom_age" result = models.bootstrap_linear_regression(model, data=child_iq) models.describe_bootstrap_lr(result) # Although mom_iq is in the same exact units as child_iq, mom_age and child_iq do not share the same units and they are even of slightly different magnitudes. We know that neither mom_iq nor mom_age can be zero so what is $\beta_0$? This is the same model and the same problem as before. # # Here's a completely scaled model: model = "child_iq_scaled ~ mom_iq_scaled + mom_age_scaled" result = models.bootstrap_linear_regression(model, data=child_iq) models.describe_bootstrap_lr(result) # How do we interpret this model? At the mean value of mom_iq (=0) and mom_age (=0), child_iq is at *its* mean. If $\beta_0$ had been 6.39 then we would say, "at the mean values of mom_iq and mom_age, we add 6.39 to mean of the child IQ". This gets are the heart of what we've been saying all along: linear models are about estimating means. # # One of the strange side effects of mean scaled (and mean centered) models is that you need to calculate and keep all the means around so you know what they are. # # # It might make a bit more sense to *not* scale the target variable: model = "child_iq ~ mom_iq_scaled + mom_age_scaled" result = models.bootstrap_linear_regression(model, data=child_iq) models.describe_bootstrap_lr(result) # This is easier to interpret. When mom_iq and mom_age are at their mean values, the mean child IQ is 86.8. Nice. # # It's worth noting that this doesn't improve the *performance* of the model, only the interpretability. This is because we're only considering linear transformations of $x_i$. More about *that* later. # ## Natural Baselines # # Sometimes it's more natural to center the data against a natural baseline instead of the mean. That is, instead of subtracting the data from the mean, we subtract from a baseline. For example, in the case of IQ, there already exists a baseline: the average IQ is defined as an IQ of 100. You might want to define your coefficients in terms of a baseline IQ of 100. Similarly, if 30 MPG were a Federal mandate or goal, you might want to define a regression dealing with gas mileage in terms of 30 MPG by subtracting it from all the values in the data set. # # Let's see. child_iq["child_iq100"] = child_iq.child_iq - 100 child_iq["mom_iq100"] = child_iq.mom_iq - 100 model = "child_iq100 ~ mom_iq100 + mom_age_scaled" result = models.bootstrap_linear_regression(model, data=child_iq) models.describe_bootstrap_lr(result) # How do we interpret *this* model? If mom_iq is at the baseline (mom_iq = 0) and mom_age is at the mean (mom_age = 0), then child_iq will be 13.20 points *below* the baseline: # # $100 - 13.20 = 86.8$ # # which is the same number we've gotten before. Again, this just changes the interpretability. # ## Save the Parameters # # There is one caveat with creating interpretable models using these methods. If you create a regression with transformed variables, the coefficients are now defined in terms of *transformed* variables and you must transform any variables you use the regression equation on. For example, if a new mother came up and wanted us to predict the IQ of her child, we'd need to scale her IQ of, say, 112, with the same mean and standard deviation we used to build the model. Whenever you do any kind of transformation that involves parameters, you should save those parameter values because you will need to use them to make predictions from future data. # # ## Transformations of Variables # # There are other transformations that can be applied to the raw data that don't just improve interpretability: they improve performance. We have already seen the problems we might discover by looking at the residuals of a linear regression. Let's see how we might fix them. # # Remember our noiseless synthetic data from the Residuals discussion: np.random.seed(6734745) # When making the synthetic data, we leave out the intermediate calculation of $x_1^2$ to emphasize that we don't know that it exists. x1 = np.array([10*np.random.random() for _ in range( 100)]) ys = 1.00 + 2.5 * x1 ** 2 + stats.norm.rvs(0, 30, 100) data = pd.DataFrame({"y": ys, "x1": x1}) result = models.bootstrap_linear_regression("y ~ x1", data) models.describe_bootstrap_lr(result) # The $R^2$ is decent (64%) but the error is high. Let's plot the data: # + figure = plt.figure(figsize=(10,6)) axes = figure.add_subplot(2, 1, 1) axes.scatter(data.x1, data.y, color="dimgray", alpha=0.5) beta = result["coefficients"] axes.plot(data.x1, [beta[ 0] + beta[ 1] * x for x in data.x1], '-', color="firebrick") axes.set_title(result[ "formula"]) axes.set_xlim((0, 10)) axes.set_xlabel( "x_1") axes = figure.add_subplot(2, 1, 2) keyed_values = sorted(zip(data.x1, result["residuals"]), key=lambda x: x[ 0]) residuals = [x[ 1][ 0] for x in keyed_values] axes.plot(list(range(0, result[ "n"])), residuals, '.', color="dimgray", alpha=0.75) axes.axhline(y=0.0, xmin=0, xmax=result[ "n"], c="black", alpha=0.5) axes.set_title( "x_1 v. residuals") axes.set_xlabel( "x_1 sorted") axes.set_ylim((-60.0, 60.0)) plt.show() plt.close() # - # There's definitely a "bend" to the residuals which indicates that we're over underestimating at low and high values of $x_1$ and overestimating at medium values of $x_1$. This suggests that a transformation is in order...*which* transformation comes from experience of looking at residuals and at histograms of features. # # Since we know what the right answer is, let's see what happens when we create a new feature $x_2 = x_1^2$ and use it in our model: data["x2"] = data.x1 ** 2 result = models.bootstrap_linear_regression("y ~ x2", data) models.describe_bootstrap_lr(result) # This model is much better. The error is smaller, the $R^2$ is larger, and the $\beta_1$ coefficient is almost the "true" value. Let's plot the data and residuals: # + figure = plt.figure(figsize=(10,6)) axes = figure.add_subplot(2, 1, 1) axes.scatter(data.x2, data.y, color="dimgray", alpha=0.5) beta = result["coefficients"] axes.plot(data.x2, [beta[ 0] + beta[ 1] * x for x in data.x2], '-', color="firebrick") axes.set_title(result[ "formula"]) axes.set_xlim((0, 10)) axes = figure.add_subplot(2, 1, 2) keyed_values = sorted(zip(data.x2, result["residuals"]), key=lambda x: x[ 0]) residuals = [x[ 1][ 0] for x in keyed_values] axes.plot(list(range(0, result[ "n"])), residuals, '.', color="dimgray", alpha=0.75) axes.axhline(y=0.0, xmin=0, xmax=result[ "n"], c="black", alpha=0.5) axes.set_title( "x_2 v. residuals") axes.set_xlabel( "x_2 sorted") axes.set_ylim((-60.0, 60.0)) plt.show() plt.close() # - # Perhaps the most surprising thing here is that the model is still linear. "Linear" doesn't mean that you can't have higher degree polynomials: # # $y = \beta_0 + \beta_1 x_1^2 + \beta_2 log(x_2) + \beta_3 \sqrt{x_3} + \epsilon$ # # It means you can't have: # # $y = \beta_0 + \beta_1^{x_1}$ # # And even the last one isn't completely impossible as we will see. To drive the point home, let's show the model in $y-x_1$-space: # + figure = plt.figure(figsize=(10,6)) axes = figure.add_subplot(1, 1, 1) axes.scatter(data.x1, data.y, color="dimgray", alpha=0.5) beta = result["coefficients"] data.sort_values("x1", inplace=True) axes.plot(data.x1, [beta[ 0] + beta[ 1] * x for x in data.x2], '-', color="firebrick") axes.set_title(result[ "formula"]) axes.set_xlim((0, 10)) axes.set_xlabel( "x_1") plt.show() plt.close() # - # The challenge will be determining what transformation to use. There are really a fairly limited number and they tend to be related to the mathematical distribution of the data or the process under consideration. Think about what it might mean that the effect is on the *square* of the variable, say, age. Similarly, because earnings or income are often exponentially distributed, it is common to use the square-root, reciprocal, or log of earnings instead of raw earnings. The kind of transformation you try may tie back to your EDA of the variables. # # Say the coefficient, $\beta_{age}$, is 2. We start at $age = 0$ and move to $age = 1$. This means $age^2$ moves from 0 to 1 as well. The change in $y$ is $2 * (1 - 0) = 2$. What happens when $age$ moves from 1 to 2? $age^2$ moves from 1 to 4. This is a change of 3 this time. The change in $y$ is $2 * (4 - 1) = 6$. But the *average unit change* is $2/3$. Put differently, it took a change in age of 1 to increase $y$ by 2 the first time but it took a change in $age$ of 3 to increase $y$ by 2 the second time. It's really happening in terms of $age$ but we model the phenomena in terms of $age^2$. This phenomena is known as *decreasing* returns. Of course, there may be *increasing* returns. For this you would need a square *root* transformation. # ## Numerical to Categorical # # When it comes to transformations between numeric and categorical variables, the suggested practice is to deal with numeric variables if possible because you can extract more information from them. This is often at the level of measurement though (when you are recording data) rather than at the point of data transformation. For example, it is more informative and your model may be better if you measure handedness on a scale from 0 to 10 rather than as a binary "left or right". It is usually better to model something as percent correct/successful/voted than pass/fail. # # However, there are times when only a categorical variable (or factor) will do. If you coded the US States and the District of Columbia as an integer from 1 to 51, what would a coefficient for this variable even mean? And even if you have a continuous variable sometimes a discretization permits you to model non-linearities. # # One exception is that you may need to discretize a variable in order to handle some non-linearities. For example, imagine something depends in a non-linear way on `age`: younger and older people are "for" something ($\beta_{age} > 0$) but middle aged people are against it ($\beta_{age} < 0$). # # If you used numeric `age` in your model, you would get a single value for the coefficient...perhaps even something near zero. If you used categorical age variables (a dummy for each category), you could get a positive coefficient on `age19-29`, a negative one on `age29-49` and a positive one on `age49+`. # ## Interaction Terms # # We have already mentioned the possibility of including interaction terms in models. When you do you include them? The usual answer is domain knowledge. Sometimes interaction terms fall naturally out of the problem such as when you have height, width, and length. The interaction of these terms is volume. # # Otherwise, you can identify variables with "large" effects and then include interaction terms. Although domain theory may indicate that terms with smaller effects interact with each other, it's unlikely that you'll be able to model it. An interaction effect can't be stronger than the main effect. Although it *might* make sense to model only the interaction effect. # # Categorical variables are also a source of interaction terms but you need to be aware of the underlying support for the interaction. If you don't have enough observations of that particular--more specific--case, the estimate of the coefficient will probably not be any "good". # ## Linear Models of Non-Linear Data # # Linear models also imply a additivity/linearity assumption has been violated. In these cases, we can often perform a transformation that results in data that satisfies the assumptions. Taking logarithms generally permit you to model non-linear relationships. # # Consider the following: # # $log( y) = \beta_0 + \beta_1 x_1 + \dots + \beta_n x_n$ # # if you exponentiate this equation you get: # # $y = e^{\beta_0 + \beta_1 x_1 + \dots + \beta_n x_n}$ # # $y = B_0 + B_1^{X_1} + ... B_n^{X_n}$ # # where $B_0 = e^{\beta_0}$. In this case, each $B_i$ can be interpreted (if you use natural log) as the percent difference in $log( y)$ between groups that differ by one unit of $x_i$. # # If you start the other way: # # $y = \beta_1^{x_1} \times \dots \times \beta_n^{x_n}$ # # then taking the log of both sides yields: # # $log( y) = log( x_0) + \beta_1 log( x_1) + \dots + \beta_n log( x_n)$. # # In this case, the various $\beta_i$ are interpretable as *elasticities* or the percent change in y that results from a 1% change in $x_i$. # # ## Building Linear Models # # Where does this all leave us? What are the steps to building a linear model? # # 1. Start with all features as suggested by domain knowledge, but... # 2. Analyze features for correlations and, of those groups, consider picking the best variable, an average of the variables, or some other transformation (sum, min, max, etc). # 3. Transform all categorical variables into one hot encodings but leave one encoding out of the model for each variable. The intercept $\beta_0$ represent all the outcomes that are excluded explicitly. Which one you leave out might depend on the number of observations for each and what you want to do with the model. # 4. Examine the residuals and EDA of the features and refer back to domain knowledge to see if any transformations are warranted including converting numerical variables into discrete, categorical variables. # # # ## Step-wise Regression and Data Dredging # # As we already mentioned, finding the best subset of features out of a set of variables is NP-hard. Nevertheless, there are heuristics for so-called "Step-wise Regression". To a certain extent, we have been describing a heuristic driven form of manual Step-wise regression that starts with a "all variables in" initial model and then seeks to improve the model by # # Step-wise regression is generally frowned up the statistical *cognoscenti* and is associated with "data dredging" and "p-value hacking". The concerns are these: # # 1. You have collected a bunch of data from an experiment. Your *theory* should tell you the specification for your model. If that model doesn't work, you shouldn't try to tweak it, looking for *some* relationship that fits your theory. # 2. As you iterate over models, you are constantly doing statistical significance testing. It's not clear what any of the statistical significance tests for your final model really mean. # # We've dodged the last point by using Bayesian inference. We don't have a "multiple comparisons" problem. The evidence supports what the evidence supports to the degree it supports it, based on your prior. As long as you don't go hog wild and use domain knowledge as a guide, you should be alright within the standards of commerce if not science. # # And that's really germane to the first point. This is data science but it's not really science *per se*. We're not trying to build and validate a large body of knowledge. We want to know what our customer life time value is. This goes to the Blomberg and Gelman discussion. # # It's up to you to not mislead yourself or organization by making tenuous connections in your data, leading to the loss of revenue at best and lives at worst. It behooves all of us to use the tools wisely or someone will take them away from us. # ## Assumptions of the Linear Model # # We've talked about the assumptions of the linear model but let's bring them all into one place. Classical linear regression has a number of assumptions. We follow Gelman's advice and list them in their order of importance: # # 1. Validity # 2. Additivity and Linearity # 3. Independence of errors # 4. Equal variance of errors # 5. Normality of errors # # *Validity* is the most important factor. Reasonably, it is the most important factor in any model building. Since it is possible to find spurious correlations between any number of variables so each variable should have some reason for being included in the model. # # Validity is slightly broader than this, however. It also means that if we want to apply our model to underprivileged youths, the original model should not be constructed from data for all youths. Similarly, if we want to apply a model of health and fitness to the general population, it should probably not be built upon data from patients at risk for heart disease. # # *Additivity* is important because, well, we are using an additive model. The same is true of *linearity*. But we can apply transformations to get us back into compliance. # # *Independence of errors* assumes that errors in prediction are not correlated. We have mentioned this before, what we are saying is that our data should be either independently and identically distributed or, a slightly weaker condition, they are exchangeable. It's easy to think how this might be violated in our child IQ problem: what if more than one child from the same mother is included? # # *Equal variance of errors* is the assumption that the variance, $\epsilon_i \sim N(0, \sigma)$, is the same for all i. The technical term for this is *homoskedacity*. The opposite (and undesirable) condition is *heteroskedacity*. We saw this in the Residuals section. Gelman dismisses this as not a very big problem because it does not affect the estimation of $\beta$. # # *Normality of errors* is something that people often wring their hands about and Gelman says this is the least important. While it might affect our ability to predict (and here it might actually be important to someone), it doesn't affect our ability to form a model, to discover the effects that are $\beta$.
fundamentals_2018.9/linear/transformations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="vk6pUeHeF3Tm" colab_type="code" colab={} # google colab does not come with torch installed. And also, in course we are using torch 0.4. # so following snippet of code installs the relevant version from os.path import exists from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) # cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/' accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu' # !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision # + id="kTAWQzZRGEwZ" colab_type="code" colab={} import torch import torchvision # + id="34oiXrzCHUqE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a9e735d6-00af-4b8b-8e3c-0cbe93f27d4f" executionInfo={"status": "ok", "timestamp": 1545334546371, "user_tz": -300, "elapsed": 1003, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00142952581255680645"}} print(torch.__version__) # + id="vkYfRuiRHV7C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5b13840e-725f-4b6b-8d10-be35e3c4885a" executionInfo={"status": "ok", "timestamp": 1545334551353, "user_tz": -300, "elapsed": 789, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00142952581255680645"}} print(torchvision.__version__)
setting-up-colab/Installing-PyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # # The Relational Model # # - The relational model is a mathematical *abstraction* used to describe # how data can be structured, queried and updated. # # - It is based on *set theory*. # # - It can be *implemented* in many different ways. # # - When it is implemented by storing relations on disk files, we have a *relational database*. # # - Functional programming languages such as Python naturally express many aspects of the relational model. # # - This is one of the reasons they are very useful for data science. # + [markdown] slideshow={"slide_type": "slide"} # # Overview # # 1. The formal definition of the relational model # # 2. Representing relations in Python using collections of tuples # # 3. Querying relational data using Python set comprehensions # - # # An Example Relational Dataset # # The following slides use relations to describe: # # - students, # - the courses they are taking # - the prerequisites of courses # - their grades # - which department they are in import pandas as pd # + [markdown] slideshow={"slide_type": "slide"} # # Concepts # # - In a relational model, the data is a collection of *relations*. # # - Informally, a relation resembles a table of values. # # - When relations are stored on disk, they are called tables. # # - Each row represents a *fact* about a real-world entity or relationship. # # - The table name and column names are used to help interpret the meaning of the values. # # - A relational model is defined formally in terms of: # - tuples # - attributes # - relations # - domains # + [markdown] slideshow={"slide_type": "slide"} # # Tuples # # A tuple is a mathematical abstraction which: # # - contains several other values # # - has a well-defined ordering over the values # # - can contain duplicate values # # - can contain values of different types # # - can contain the special value `None` or `Null` # # - is immutable; the values contained in the tuple cannot change over time # + [markdown] slideshow={"slide_type": "slide"} # # The size of a tuple # # - We often restrict attention to tuples of a particular size or *degree*. # # - An $n-$tuple contains $n$ values. # + [markdown] slideshow={"slide_type": "slide"} # # Attributes # # - An attribute refers to the value in a particular index of a tuple. # + [markdown] slideshow={"slide_type": "slide"} # # Atomic values # # - Atomic values are values which are not stored in collections. # # - Atomic values cannot be further decomposed into other values. # # - A tuple is therefore *not* atomic. # # - A tuple that contains only atomic values is called a *flat tuple*. # + [markdown] slideshow={"slide_type": "slide"} # # Domain # # - A *domain* $D_i$ is a set of atomic values. # # - Each attribute within a relation has the *same* domain. # # - Intuitively, a domain specifies the allowable values in a column $i$. # # - Examples: # # $D_1 = \mathbb{Z}$ # # $D_2 = \{ 15, 16, \ldots, 80 \}$ # # $D_3 = \{ "CS", \; "ECON", \; "PHYS" \}$ # + [markdown] slideshow={"slide_type": "slide"} # # Relation schema # # - A *relation schema* is denoted by $R(A_1, A_2, \ldots, A_n)$. # # - Each *attribute* $A_i$ is the name of a role played by some domain $D_i$ in $R$. # # - $D_i$ is the *domain* of $A_i$ and is denoted by $\operatorname{dom}(A_i)$. # # - The *degree* or *arity* of a relation is the number of attributes $n$. # + [markdown] slideshow={"slide_type": "slide"} # # Example # # - What is the arity of the following relation schema? # # ~~~ # STUDENT(Name, Ssn, Home_phone, Address, Office_phone, Age, Gpa) # ~~~ # # - Answer: 7 # # - What is the name of the relation? # # - Answer: `STUDENT` # + [markdown] slideshow={"slide_type": "slide"} # # Example of a Domain # # $\operatorname{dom}(Gpa) = [0, 4]$ # + [markdown] slideshow={"slide_type": "slide"} # # Relations # # - The schema represents the structure of a *relation*. # # - A relation contains the actual data. # # - It is sometimes called the *relation state*, *relation intension* or *relation extension*. # # - Let $r(R)$ denote the relation $r$ of a relation schema $R$. # # - The relation $r$ consists of a set of $n$-tuples $r = \{t_1, t_2, \ldots, t_m\}$. # # - The $i^{th}$ value in tuple $t$ corresponds to the attribute $A_i$ # and is denoted $t.A_i$ or $t[i]$. # + [markdown] slideshow={"slide_type": "slide"} # # Constraints # # - Domain constraints # # - Key constraints # # - NULL values # + [markdown] slideshow={"slide_type": "slide"} # # Relational Datasets # # - So far we have discussed single relations. # # - A typical data-set will comprise many relations. # # - A relational dataset schema $(S, IC)$ comprises: # # - a set of relation schemas $S = \{ R_1, R_2, \ldots, R_k \}$ # # - a set of integrity constraints $IC$ # # - A relational dataset state $DB$ is a set of relation states $DB = \{ r_1, r_2, \ldots r_m \}$ # # - such that every $r_i$ satisfies every constraint in $IC$. # # + [markdown] slideshow={"slide_type": "slide"} # # # Data definition language # # - The data definition language (DDL) provides a concrete syntax and semantics for describing a relational schema. # # - Most commonly we use *SQL* - Structured Query Language. # + [markdown] slideshow={"slide_type": "slide"} # # Data query language # # - The data query language provides a concrete syntax and semantics for querying the relational dataset. # # - Formally a query is a function mapping from existing relation states to new relations. # # - That is, we map from one set of tuples to another set of tuples. # # - Typically the mapping involves some combination of set-theoretic functions, e.g. # # - subset of tuples that satisfy a predicate $p$ # - $\{ x: x \in X \wedge p(x) \}$ # - set union $X \cup Y$, difference $X - Y$, intersection $X \cap Y$ # - Cartesian product $X \times Y$ # # - The most common data query language for relational databases is again SQL. # # . . . # # - Mathematically, there is nothing stopping us from using e.g. Python as a query language. # + [markdown] slideshow={"slide_type": "slide"} # # Tuples in Python # # - Tuples in Python can be written by writing a sequence of values separated by # commas and surrounded by round brackets. For example: # + slideshow={"slide_type": "slide"} tuple1 = (50, 6.5) tuple2 = (1, 2, 'hello') professor = ('Steve', 'Phelps', 'S6.18') student = ('John', 'Doe', None) # + [markdown] slideshow={"slide_type": "-"} # - The individual values contain within a tuple can be obtained by indexing # their position (counting from zero). To find the office number of the professor: # + slideshow={"slide_type": "-"} professor[2] # + [markdown] slideshow={"slide_type": "slide"} # - Tuples are a very flexible way to represent single pieces of data. # # - We only allow *flat tuples*. The following is not allowed in a relational model: # + slideshow={"slide_type": "-"} this_is_not_allowed = (1, 3, (50, 6.5)) # + [markdown] slideshow={"slide_type": "slide"} # # Sets of Tuples # # - How can we use tuples to represent data-*sets* and relations? # # - We can use collections of tuples, e.g. a set of tuples. # # - So now we can represent one or more students: # + # Student tuples smith = ('Smith', 17, 1, 'CS') brown = ('Brown', 8, 2, 'CS') # The student relation students = {smith, brown} # + [markdown] slideshow={"slide_type": "slide"} # # Relational attributes in Python # # - Attributes are names for particular positions within a tuple. # # - We can use Python functions to represent relational attributes: # + # The attributes of a student def student_name(s): return s[0] def student_student_number(s): return s[1] # + [markdown] slideshow={"slide_type": "slide"} # # - Note that different relations can have the same attribute. # # - Therefore we need some convention to distinguish attributes from different relations. # # - In the above code, `student_student_number` refers to the `student_number` attribute of the `student` relation. # # - # # Queries in Python # # - We need some way to extract data from our data-set; i.e. to *query* the data. # # - A query will e.g.: # # - Take a subset of the tuples of a relation that satisfy a predicate. # # - *Join* two or more relations using a Cartesian product. # # - Take the intersection of tuples from two or more relations. # # - Take the union of tuples from two or more relations. # # # - Python list comprehensions or set comprehensions provide all of this functionality. # + [markdown] slideshow={"slide_type": "slide"} # # Relational queries in Python # - # - The set of students whose name is "Smith": {s for s in students if student_name(s) == 'Smith'} # This is equivalent to the SQL query: # # ~~~SQL # SELECT * FROM students WHERE students.name = "SMITH"; # ~~~ # + [markdown] slideshow={"slide_type": "slide"} # # Joining relations # # - Now let's create another relation called `grades` which has tuples of the form `(ssn, course-name, mark)`: # - grades = { (17, 'python', 'A'), (17, 'algebra', 'B'), (17, 'algebra', 'A')} # and a function to return the mark for a given grade tuple: def grade_mark(g): return g[2] # Now we can join the two relations using a Cartesian product: {(student_name(s), grade_mark(g)) for s in students for g in grades} # this is equivalent to the following SQL: # # ~~~SQL # SELECT students.name, grades.mark FROM students, grades; # ~~~ # - We can also combine this with a predicate: {(student_name(s), grade_mark(g)) for s in students for g in grades if student_name(s) == 'Smith'} # ~~~SQL # SELECT students.name, grades.mark FROM students, grades WHERE students.name = "Smith"; # ~~~
src/main/ipynb/relational-python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- x = 5 print(x) # ### this next cell is an example of a string variable # # * bullet point 1 # * bullet point 2 # # $$y = mx + b$$ # # # $$ \sum_{r \in R} P(R=r|U) $$ # # y = 'hello' type(y) # + print(y) x # - type(x) 5 +6 # + x = 5 y = 6 x*y # - y x a = True b = False type(a), type(b) if x < y: print('true!') def myotherfunc(): print('I got into the other function') def myfunc(x): print("I'm in the function!") myotherfunc() return x+2, 'hello!', x+5 r1, r2, r3 = myfunc(8) r1 r2 r3 def func2(y, x=5, z=6): return y+x*z func2(3,z=7) # + mylist = [1,2,3,4,'boise','state',['e1','e2']] print(mylist[5]) mylist.append(10) mylist # - mydict = {'key1':'value1', 'key2':'value2', 'key3':[3,2]} print(mydict['key3']) mydict['key4'] = [1,2,'three'] mydict # + #for item in mylist: # print(item) for item in mydict: print(item, mydict[item]) # - # ### it would be better not to do this: def func3(x='hello', j=[]): j.append(x) print(j) func3('ahoy there')
introds/classes/L2-PythonJupyter/python_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 8 # # ## CSCI E-82A # # # In the a previous homework assignments, you used two different dynamic programming algorithms and Monte Carlo reinforcement learning to solve a robot navigation problem by finding optimal paths to a goal in a simplified warehouse environment. Now you will use time differencing reinforcement learning to find optimal paths in the same environment. # The configuration of the warehouse environment is illustrated in the figure below. # # <img src="GridWorldFactory.JPG" alt="Drawing" style="width:200px; height:200px"/> # <center> **Grid World for Factory Navigation Example** </center> # # The goal is for the robot to deliver some material to position (state) 12, shown in blue. Since there is a goal state or **terminal state** this an **episodic task**. # # There are some barriers comprised of the states $\{ 6, 7, 8 \}$ and $\{ 16, 17, 18 \}$, shown with hash marks. In a real warehouse, these positions might be occupied by shelving or equipment. We do not want the robot to hit these barriers. Thus, we say that transitioning to these barrier states is **taboo**. # # As before, we do not want the robot to hit the edges of the grid world, which represent the outer walls of the warehouse. # # ## Representation # # You are, no doubt, familiar with the representation for this problem by now. # # As with many such problems, the starting place is creating the **representation**. In the cell below encode your representation for the possible action-state transitions. From each state there are 4 possible actions: # - up, u # - down, d, # - left, l # - right, r # # There are a few special cases you need to consider: # - Any action transitioning state off the grid or into a barrier should keep the state unchanged. # - Any action in the goal state keeps the state unchanged. # - Any transition within the taboo (barrier) states can keep the state unchanged. If you experiment, you will see that other encodings work as well since the value of a barrier states are always zero and there are no actions transitioning into these states. # # > **Hint:** It may help you create a pencil and paper sketch of the transitions, rewards, and probabilities or policy. This can help you to keep the bookkeeping correct. # + ## import numpy for latter import numpy as np import numpy.random as nr # - neighbors = {0:{'u':0, 'd':5, 'l':0, 'r':1}, 1:{'u':1, 'd':1, 'l':0, 'r':2}, 2:{'u':2, 'd':2, 'l':1, 'r':3}, 3:{'u':3, 'd':3, 'l':2, 'r':4}, 4:{'u':4, 'd':9, 'l':3, 'r':4}, 5:{'u':0, 'd':10, 'l':5, 'r':5}, 6:{'u':6, 'd':6, 'l':6, 'r':6},###barrier 7:{'u':7, 'd':7, 'l':7, 'r':7},###barrier 8:{'u':8, 'd':8, 'l':8, 'r':8},###barrier 9:{'u':4, 'd':14, 'l':9, 'r':9}, 10:{'u':5, 'd':15, 'l':10, 'r':11}, 11:{'u':11, 'd':11, 'l':10, 'r':12}, 12:{'u':12, 'd':12, 'l':12, 'r':12},#goal 13:{'u':13, 'd':13, 'l':12, 'r':14}, 14:{'u':9, 'd':19, 'l':13, 'r':14}, 15:{'u':10, 'd':20, 'l':15, 'r':15}, 16:{'u':16, 'd':16, 'l':16, 'r':16},###barrier 17:{'u':17, 'd':17, 'l':17, 'r':17},###barrier 18:{'u':18, 'd':18, 'l':18, 'r':18},###barrier 19:{'u':14, 'd':24, 'l':19, 'r':19}, 20:{'u':15, 'd':20, 'l':20, 'r':21}, 21:{'u':21, 'd':21, 'l':20, 'r':22}, 22:{'u':22, 'd':22, 'l':21, 'r':23}, 23:{'u':23, 'd':23, 'l':22, 'r':24}, 24:{'u':19, 'd':24, 'l':23, 'r':24}} # You need to define the initial transition probabilities for the Markov process. Set the probabilities for each transition as a **uniform distribution** leading to random action by the robot. # # > **Note:** As these are just starting values, the exact values of the transition probabilities are not actually all that important in terms of solving the RL problem. Also, notice that it does not matter how the taboo state transitions are encoded. The point of the DP algorithm is to learn the transition policy. policy = {0:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 1:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 2:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 3:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 4:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 5:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 6:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 7:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 8:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 9:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 10:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 11:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 12:{'u':0, 'd':0, 'l':0, 'r':0},#goal 13:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 14:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 15:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 16:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 17:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 18:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 19:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 20:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 21:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 22:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 23:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}, 24:{'u':0.25, 'd':0.25, 'l':0.25, 'r':0.25}} # The robot receives the following rewards: # - 10 for entering position 0. # - -1 for attempting to leave the grid. In other words, we penalize the robot for hitting the edges of the grid. # - -0.1 for all other state transitions, which is the cost for the robot to move from one state to another. If we did not have this penalty, the robot could follow any random plan to the goal which did not hit the edges. # # This **reward structure is unknown to the MC RL agent**. The agent must **learn** the rewards by sampling the environment. # # In the code cell below encode your representation of this reward structure you will use in your simulated environment. reward = {0:{'u':-1, 'd':-0.1, 'l':-1, 'r':-0.1}, 1:{'u':-1, 'd':-1, 'l':-0.1, 'r':-0.1}, 2:{'u':-1, 'd':-1, 'l':-0.1, 'r':-0.1}, 3:{'u':-1, 'd':-1, 'l':-0.1, 'r':-0.1}, 4:{'u':-1, 'd':-0.1, 'l':-1, 'r':-0.1}, 5:{'u':-0.1, 'd':-0.1, 'l':-1, 'r':-1}, 6:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 7:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 8:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 9:{'u':-0.1, 'd':-0.1, 'l':-1, 'r':-1}, 10:{'u':-0.1, 'd':-0.1, 'l':-1, 'r':-0.1}, 11:{'u':-1, 'd':-1, 'l':-0.1, 'r':10}, 12:{'u':0, 'd':0, 'l':0, 'r':0},#goal 13:{'u':-1, 'd':-1, 'l':10, 'r':-0.1}, 14:{'u':-0.1, 'd':-0.1, 'l':-0.1, 'r':-1}, 15:{'u':-0.1, 'd':-0.1, 'l':-1, 'r':-1}, 16:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 17:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 18:{'u':0, 'd':0, 'l':0, 'r':0},###barrier 19:{'u':-0.1, 'd':-0.1, 'l':-1, 'r':-1}, 20:{'u':-0.1, 'd':-1, 'l':-0.1, 'r':-0.1}, 21:{'u':-1, 'd':-1, 'l':-0.1, 'r':-0.1}, 22:{'u':-1, 'd':-1, 'l':-0.1, 'r':-0.1}, 23:{'u':-1, 'd':-1, 'l':-0.1, 'r':-0.1}, 24:{'u':-0.1, 'd':-1, 'l':-0.1, 'r':-1}} # You will find it useful to create a list of taboo states, which you can encode in the cell below. taboo = [6,7,8,16,17,18] # ## TD(0) Policy Evaluation # # With your representations defined, you can now create and test functions to perform TD(0) **policy evaluation**. # # As a first step you will need a function to find the rewards and next state given a state and an action. You are welcome to start with the `state_values` function from the TD/Q-learning notebook. However, keep in mind that you must modify this code to correctly treat the taboo states of the barrier. Specifically, taboo states should not be visited. # # Execute your code to test it for each possible action from state 11. # + def state_values(s, action, neighbors = neighbors, rewards = reward): """ Function simulates the environment returns s_prime and reward given s and action """ s_prime = neighbors[s][action] reward = rewards[s][action] return (s_prime,reward) ## Test the function for a in ['u', 'd', 'r', 'l']: print(state_values(11, a)) # - # Examine your results. Are the action values consistent with the transitions? # # ANS: No, they differ by the rewards values # Next, you need to create a function to compute the state values using the TD(0) algorithm. You should use the function you just created to find the rewards and next state given a state and action. You are welcome to use the `td_0_state_values` function from the TD/Q-learning notebook as a starting point. # # Execute your function for 1,000 episodes and examine the results. # + def td_0_state_values(policy, n_samps, goal, alpha = 0.2, gamma = 0.9): """ Function for TD(0) policy """ ## Initialize the state list and state values states = list(policy.keys()) v = [0]*len(list(policy.keys())) action_index = list(range(len(list(policy[0].keys())))) for _ in range(n_samps): s = nr.choice(states, size =1)[0] probs = list(policy[s].values()) if(s not in taboo+[goal]): a = list(policy[s].keys())[nr.choice(action_index, size = 1, p = probs)[0]] else: a = list(policy[s].keys())[nr.choice(action_index, size = 1)[0]] transistion = state_values(s, a) v[s] = v[s] + alpha * (transistion[1] + gamma * v[transistion[0]] - v[s]) return(v) # - nr.seed(345) np.round(np.array(td_0_state_values(policy, n_samps = 1000, goal = 12)).reshape((5,5)), 4) # Examine your results and answer the following questions to ensure you action value function operates correctly: # 1. Are the values of the taboo states 0? ANS: Yes # 2. Are the states with the highest values adjacent to the terminal state? ANS: Partialy, because weirdly the right side,13, is smaller than 10. # 3. Are the values of the states decreasing as the distance from the terminal state increases? ANS: Yes # # ## SARSA(0) Policy Improvement # # Now you will perform policy improvement using the SARSA(0) algorithm. You are welcome to start with the `select_a_prime` and `SARSA_0` functions from the TD/Q-learning notebooks. # # Execute your code for 1,000 episodes, and with $\alpha = 0.2$, and $\epsilon = 0.1$) # + import copy def select_a_prime(s_prime, policy, action_index, greedy, goal): ## Randomly select an action prime ## Make sure to handle the terminal state if(s_prime != goal and greedy): probs = list(policy[s_prime].values()) a_prime_index = nr.choice(action_index, size = 1, p = probs)[0] a_prime = list(policy[s_prime].keys())[a_prime_index] else: ## Don't probability weight for terminal state or non-greedy selecttion a_prime_index = nr.choice(action_index, size = 1)[0] a_prime = list(policy[s_prime].keys())[a_prime_index] return(a_prime_index, a_prime) def SARSA_0(policy, episodes, goal, alpha = 0.2, gamma = 0.9, epsilon = 0.1): """ Function to perform SARSA(0) control policy improvement. """ ## Initialize the state list and action values states = list(policy.keys()) n_states = len(states) ## Initialize possible actions and the action values action_index = list(range(len(list(policy[0].keys())))) Q = np.zeros((len(action_index),len(states))) current_policy = copy.deepcopy(policy) for _ in range(episodes): # Loop over the episodes ## sample a state at random ensuring it is not terminal state s = nr.choice(states, size = 1)[0] while(s in taboo+[goal]): s = nr.choice(states, size = 1)[0] ## Now choose action given policy a_index, a = select_a_prime(s, current_policy, action_index, True, goal) s_prime = float('inf') # Value of s_prime to start loop while(s_prime != goal): # Episode ends where get to terminal state ## The next state given the action s_prime, reward = state_values(s, a) a_prime_index, a_prime = select_a_prime(s_prime, current_policy, action_index, True, goal) ## Update the action values Q[a_index,s] = Q[a_index,s] + alpha * (reward + gamma * Q[a_prime_index,s_prime] - Q[a_index,s]) ## Set action and state for next iteration a = a_prime a_index = a_prime_index s = s_prime return(Q) # + Q = SARSA_0(policy, 1000, goal = 12, alpha = 0.2, epsilon = 0.1) for i in range(4): print(np.round(Q[i,:].reshape((5,5)), 4)) # - # Examine the action values you have computed. Ensure that the action values are 0 for the goal and taboo states. Also check that the actions with the largest values for each state make sense in terms of reaching the goal. # With the action value function completed, you will now create and test code to perform GPI with SARSA(0). You are welcome to use the `SRASA_0_GPI` function from the TD/Q-learning notebook as a starting point. # # Execute your code for 10 cycles of 100 episodes, with $\alpha = 0.2$, $\gamma = 0.9$ and $\epsilon = 0.01$, and examine the results. def SARSA_0_GPI(policy, cycles, episodes, goal, alpha = 0.2, gamma = 0.9, epsilon = 0.1): ## iterate over GPI cycles current_policy = copy.deepcopy(policy) for _ in range(cycles): ## Evaluate policy with SARSA Q = SARSA_0(policy, episodes = episodes, goal = goal, alpha = alpha, epsilon = epsilon) for s in list(current_policy.keys()): # iterate over all states ## Find the index action with the largest Q values ## May be more than one. max_index = np.where(Q[:,s] == max(Q[:,s]))[0] ## Probabilities of transition ## Need to allow for further exploration so don't let any ## transition probability be 0. ## Some gymnastics are required to ensure that the probabilities ## over the transistions actual add to exactly 1.0 neighbors_len = float(Q.shape[0]) max_len = float(len(max_index)) diff = round(neighbors_len - max_len,3) prob_for_policy = round(1.0/max_len,3) adjust = round((epsilon * (diff)), 3) prob_for_policy = prob_for_policy - adjust if(diff != 0.0): remainder = (1.0 - max_len * prob_for_policy)/diff else: remainder = epsilon for i, key in enumerate(current_policy[s]): ## Update policy if(i in max_index): current_policy[s][key] = prob_for_policy else: current_policy[s][key] = remainder return(current_policy) # + SARSA_0_Policy = SARSA_0_GPI(policy,goal=12, cycles = 10, episodes = 100) SARSA_0_Policy # - # Verify that your results make sense? For example, starting at state 2 or 22, do the most probable actions follow a shortest path? # # ANS: Yes, both state 2 or 22 show the path to go left side. # ## Apply Double Q-Learning # # As a next step, you will apply Double Q-learning(0) to the warehouse navigation problem. In the cell below create and test a function to perform Double Q-Learning for this problem. You are welcome to use the `double_Q_learning` function from the TD/Q-learning notebook as a starting point. # # Execute your code for 10 cycles of 500 episodes, with $\alpha = 0.2$, and $\gamma = 0.9$ and examine the results. def Q_learning_0(policy, neighbors, rewards, episodes, goal, alpha = 0.2, gamma = 0.9): """ Function to perform Q-learning(0) control policy improvement. """ ## Initialize the state list and action values states = list(policy.keys()) n_states = len(states) ## Initialize possible actions and the action values possible_actions = list(rewards[0].keys()) action_index = list(range(len(list(policy[0].keys())))) Q = np.zeros((len(possible_actions),len(states))) current_policy = copy.deepcopy(policy) for _ in range(episodes): # Loop over the episodes ## sample an intial state at random but make sure it is not goal s = nr.choice(states, size = 1)[0] while(s in taboo+ [goal]): s = nr.choice(states, size = 1)[0] ## Now choose action following policy a_index, a = select_a_prime(s, current_policy, action_index, True, goal) s_prime = n_states + 1 # Dummy value of s_prime to start loop while(s_prime != goal): # Episode ends where get to terminal state ## Get s_prime given s and a s_prime = neighbors[s][a] ## Find the index or indices of maximum action values for s_prime ## Break any tie with multiple max values by random selection action_values = Q[:,s_prime] a_prime_index = nr.choice(np.where(action_values == max(action_values))[0], size = 1)[0] a_prime = possible_actions[a_prime_index] ## Lookup the reward reward = rewards[s][a] ## Update the action values Q[a_index,s] = Q[a_index,s] + alpha * (reward + gamma * Q[a_prime_index,s_prime] - Q[a_index,s]) ## Set action and state for next iteration a = a_prime a_index = a_prime_index s = s_prime return(Q) # + QQ = Q_learning_0(policy, neighbors, reward, episodes=500, goal = 12) for i in range(4): print(np.round(QQ[i,:].reshape((5,5)), 4)) # - def double_Q_learning_0(policy, neighbors, rewards, episodes, goal, alpha = 0.2, gamma = 0.9): """ Function to perform SARSA(0) control policy improvement. """ ## Initialize the state list and action values states = list(policy.keys()) n_states = len(states) ## Initialize possible actions and the action values possible_actions = list(rewards[0].keys()) action_index = list(range(len(list(policy[0].keys())))) Q1 = np.zeros((len(possible_actions),len(states))) Q2 = np.zeros((len(possible_actions),len(states))) current_policy = copy.deepcopy(policy) for _ in range(episodes): # Loop over the episodes ## sample an intial state at random but make sure it is not goal s = nr.choice(states, size = 1)[0] while(s in taboo+[goal]): s = nr.choice(states, size = 1)[0] ## Now choose action following policy a_index, a = select_a_prime(s, current_policy, action_index, True, goal) s_prime = n_states + 1 # Dummy value of s_prime to start loop while(s_prime != goal): # Episode ends where get to terminal state ## Get s_prime given s and a s_prime = neighbors[s][a] ## Update one or the other action values at random if(nr.uniform() <= 0.5): ## Find the index or indices of maximum action values for s_prime ## Break any tie with multiple max values by random selection action_values = Q1[:,s_prime] a_prime_index = nr.choice(np.where(action_values == max(action_values))[0], size = 1)[0] a_prime = possible_actions[a_prime_index] ## Lookup the reward reward = rewards[s][a] ## Update Q1 Q1[a_index,s] = Q1[a_index,s] + alpha * (reward + gamma * Q2[a_prime_index,s_prime] - Q1[a_index,s]) ## Set action and state for next iteration a = a_prime a_index = a_prime_index s = s_prime else: ## Find the index or indices of maximum action values for s_prime ## Break any tie with multiple max values by random selection action_values = Q2[:,s_prime] a_prime_index = nr.choice(np.where(action_values == max(action_values))[0], size = 1)[0] a_prime = possible_actions[a_prime_index] ## Lookup the reward reward = rewards[s][a] ## Update Q2 Q2[a_index,s] = Q2[a_index,s] + alpha * (reward + gamma * Q1[a_prime_index,s_prime] - Q2[a_index,s]) ## Set action and state for next iteration a = a_prime a_index = a_prime_index s = s_prime return(Q1) # + doudble_Q = double_Q_learning_0(policy, neighbors, reward, 500, goal = 12) for i in range(4): print(np.round(doudble_Q[i,:].reshape((5,5)), 4)) # - # Examine the action values you have computed. Ensure that the action values are 0 for the goal and taboo states. Also check that the actions with the largest values for each state make sense in terms of reaching the goal. # With the action value function completed, you will now create and test code to perform GPI with Double Q-Learning(0). You are welcome to use the `double_Q_learning_0_GPI` function from the TD/Q-learning notebook as a starting point. # # Execute your code for 10 cycles of 500 episodes, with $\alpha = 0.2$, $\gamma = 0.9$ and $\epsilon = 0.01$, and examine the results. def double_Q_learning_0_GPI(policy, neighbors, reward, cycles, episodes, goal, alpha = 0.2, gamma = 0.9, epsilon = 0.1): ## iterate over GPI cycles current_policy = copy.deepcopy(policy) for _ in range(cycles): ## Evaluate policy with SARSA Q = double_Q_learning_0(policy, neighbors, reward, episodes = episodes, goal = goal) for s in list(current_policy.keys()): # iterate over all states ## Find the index action with the largest Q values ## May be more than one. max_index = np.where(Q[:,s] == max(Q[:,s]))[0] ## Probabilities of transition ## Need to allow for further exploration so don't let any ## transition probability be 0. ## Some gymnastics are required to ensure that the probabilities ## over the transistions actual add to exactly 1.0 neighbors_len = float(Q.shape[0]) max_len = float(len(max_index)) diff = round(neighbors_len - max_len,3) prob_for_policy = round(1.0/max_len,3) adjust = round((epsilon * (diff)), 3) prob_for_policy = prob_for_policy - adjust if(diff != 0.0): remainder = (1.0 - max_len * prob_for_policy)/diff else: remainder = epsilon for i, key in enumerate(current_policy[s]): ## Update policy if(i in max_index): current_policy[s][key] = prob_for_policy else: current_policy[s][key] = remainder return(current_policy) Double_Q_0_Policy = double_Q_learning_0_GPI(policy, neighbors, reward, cycles = 10, episodes = 500, goal = 12) Double_Q_0_Policy # Verify that your results make sense? For example, starting at state 2 or 22, do the most probable actions follow a shortest path? # # ANS: Um.. at state 2 is something weird, because it shows go 'r', whereas it was 'l' in SARSA. # But it is still shortest path though. # State 22 is 'l' which is same as before. # ## N-Step TD Learning # # Finally, you will apply N-Step TD learning and N-Step SARSA to the warehouse navigation problem. First create a function to perform N-step TD policy evaluation. You are welcome to start with the `TD_n` policy evaluation function from the TD/Q-Learning notebook. # # Test your function using 1,000 episodes, $n = 4$, $\gamma = 0.9$, and $\alpha = 0.2$. # + def TD_n(policy, episodes, n, goal, alpha = 0.2, gamma = 0.9, epsilon = 0.1): """ Function to perform TD(N) policy evaluation. """ ## Initialize the state list and action values states = list(policy.keys()) n_states = len(states) ## Initialize possible actions and the action values action_index = list(range(len(list(policy[0].keys())))) v = [0]*len(list(policy.keys())) current_policy = copy.deepcopy(policy) ## sample an initial state at random and make sure is not terminal state s = nr.choice(states, size = 1)[0] while(s in taboo+[goal]): s = nr.choice(states, size = 1)[0] for _ in range(episodes): # Loop over the episodes T = float("inf") tau = 0 reward_list = [] t = 0 while(tau != T - 1): # Episode ends where get to terminal state if(t < T): ## Choose action given policy probs = list(policy[s].values()) a = list(policy[s].keys())[nr.choice(action_index, size = 1, p = probs)[0]] ## The next state given the action s_prime, reward = state_values(s, a) reward_list.append(reward) # append the reward to the list if(s_prime == goal): T = t + 1 # We reached the terminal state tau = t - n + 1 ## update the time step being updated if(tau >= 0): # Check if enough time steps to compute return ## Compute the return ## The formula for the first index in the loop is different from Sutton and Barto ## but seems to be correct at least for Python. G = 0.0 for i in range(tau, min(tau + n - 1, T)): G = G + gamma**(i-tau) * reward_list[i] ## Deal with case of where we are not at the terminal state if(tau + n < T): G = G + gamma**n * v[s_prime] ## Update v v[s] = v[s] + alpha * (G - v[s]) ## Set state for next iteration if(s_prime != goal): s = s_prime t = t +1 return(v) # - np.round(np.array(TD_n(policy, episodes = 1000, n = 4, goal = 12, alpha = 0.2, gamma = 0.9)).reshape((5,5)), 4) # Verify that the result you obtained appears correct. Are the values of the goal and taboo states all 0? Do the state values decrease with distance from the goal? # # YES # Now that you have an estimate of the best values for the number of steps and the learning rate you can compute the action values using multi-step SARSA. In the cell below, create and test a function to compute the action values using N-step SARSA. You are welcome to use the `SRARSA_n` function from the TD/Q-learning notebook as a starting point. # # Test your function by executing 4-step SARSA for 1,000 episodes with $\alpha = 0.2$ and $\gamma = 0.9$ and using the optimum number of steps and learning rate you have determined. def SARSA_n(policy, episodes, n, goal, alpha = 0.2, gamma = 0.9, epsilon = 0.1): """ Function to perform SARSA(N) control policy improvement. """ ## Initialize the state list and action values states = list(policy.keys()) n_states = len(states) ## Initialize possible actions and the action values action_index = list(range(len(list(policy[0].keys())))) Q = np.zeros((len(action_index),len(states))) current_policy = copy.deepcopy(policy) for _ in range(episodes): # Loop over the episodes ## sample a state at random and make sure is not terminal state s = nr.choice(states, size = 1)[0] while(s in taboo+[goal]): s = nr.choice(states, size = 1)[0] ## Now choose action given policy a_index, a = select_a_prime(s, current_policy, action_index, True, goal) t = 0 # Initialize the time step count T = float("inf") tau = 0 reward_list = [] while(tau != T - 1): # Episode ends where get to terminal state if(t < T): ## The next state given the action s_prime, reward = state_values(s, a) reward_list.append(reward) # append the reward to the list if(s_prime == goal): T = t + 1 # We reached the terminal state else: # Select and store the next action using the policy a_prime_index, a_prime = select_a_prime(s_prime, current_policy, action_index, True, goal) tau = t - n + 1 ## update the time step being updated if(tau >= 0): # Check if enough time steps to compute return ## Compute the return ## The formula for the first index in the loop is different from Sutton and Barto ## but seems to be correct at least for Python. G = 0.0 for i in range(tau, min(tau + n, T)): G = G + gamma**(i-tau) * reward_list[i] ## Deal with case of where we are not at the terminal state if(tau + n < T): G = G + gamma**n * Q[a_prime_index,s_prime] ## Finally, update Q Q[a_index,s] = Q[a_index,s] + alpha * (G - Q[a_index,s]) ## Set action and state for next iteration if(s_prime != goal): s = s_prime a = a_prime a_index = a_prime_index ## increment t t = t + 1 return(Q) # + Q = SARSA_n(policy, episodes = 1000, n = 4, goal = 12, alpha = 0.2, gamma = 0.9) for i in range(4): print(np.round(Q[i,:].reshape((5,5)), 4)) # - # Verify that the results you have computed appear correct using the aforementioned criteria. # # Finally, create a function to use the GPI algorithm with N-step SARSA in the cell below. You are welcome to start with the `SARSA_n_GPI` function from the TD/Q-learning notebook. # # Execute your function using 4 step SARSA for 5 cycles of 500 episodes, with $\alpha = 0.2$, $\epsilon = 0.1$, and $\gamma = 0.9$. # + def SARSA_n_GPI(policy, n, cycles, episodes, goal, alpha = 0.2, gamma = 0.9, epsilon = 0.1): ## iterate over GPI cycles current_policy = copy.deepcopy(policy) for _ in range(cycles): ## Evaluate policy with SARSA Q = SARSA_n(policy, episodes, n, goal = goal, alpha = alpha, gamma = gamma, epsilon = epsilon) for s in list(current_policy.keys()): # iterate over all states ## Find the index action with the largest Q values ## May be more than one. max_index = np.where(Q[:,s] == max(Q[:,s]))[0] ## Probabilities of transition ## Need to allow for further exploration so don't let any ## transition probability be 0. ## Some gymnastics are required to ensure that the probabilities ## over the transistions actual add to exactly 1.0 neighbors_len = float(Q.shape[0]) max_len = float(len(max_index)) diff = round(neighbors_len - max_len,3) prob_for_policy = round(1.0/max_len,3) adjust = round((epsilon * (diff)), 3) prob_for_policy = prob_for_policy - adjust if(diff != 0.0): remainder = (1.0 - max_len * prob_for_policy)/diff else: remainder = epsilon for i, key in enumerate(current_policy[s]): ## Update policy if(i in max_index): current_policy[s][key] = prob_for_policy else: current_policy[s][key] = remainder return(current_policy) # + SARSA_N_Policy = SARSA_n_GPI(policy, n = 4, cycles = 5, episodes = 500, goal = 12, alpha = 0.2, epsilon = 0.1) SARSA_N_Policy # - # Examine your results. Verify that the most probable paths to the goal from states 2 and 22 are the shortest possible. # At the states 2 and 22, it shows go left and right respectively, # which is indicating the shortest path correctly
homeword/CS82A probabalistic programming/Homework/Homework8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/Rishit-dagli/Deep-Learning-With-TensorFlow-Blog-series/blob/master/Part%203-Using%20Convolutional%20Neural%20Networks%20with%20TensorFlow/Convolutions_from_scratch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="tJTHvE8Qe5nM" # Let's explore how convolutions work by creating a basic convolution on a 2D Grey Scale image. First we can load the image by taking the 'ascent' image from scipy. It's a nice, built-in picture with lots of angles and lines. # + colab={} colab_type="code" id="DZ5OXYiolCUi" import cv2 import numpy as np from scipy import misc i = misc.ascent() # + [markdown] colab_type="text" id="SRIzxjWWfJjk" # Next, we can use the pyplot library to draw the image so we know what it looks like. # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="R4p0cfWcfIvi" outputId="b9c159d5-4c47-43c2-9892-77dc51b66a7f" import matplotlib.pyplot as plt plt.grid(False) plt.gray() plt.axis('off') plt.imshow(i) plt.show() # + [markdown] colab_type="text" id="C1mhZ_ZTfPWH" # The image is stored as a numpy array, so we can create the transformed image by just copying that array. Let's also get the dimensions of the image so we can loop over it later. # + colab={} colab_type="code" id="o5pxGq1SmJMD" i_transformed = np.copy(i) size_x = i_transformed.shape[0] size_y = i_transformed.shape[1] # + [markdown] colab_type="text" id="Y7PwNkiXfddd" # Now we can create a filter as a 3x3 array. # + colab={} colab_type="code" id="sN3imZannN5J" # This filter detects edges nicely # It creates a convolution that only passes through sharp edges and straight # lines. #Experiment with different values for fun effects. #filter = [ [0, 1, 0], [1, -4, 1], [0, 1, 0]] # A couple more filters to try for fun! filter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]] #filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] # If all the digits in the filter don't add up to 0 or 1, you # should probably do a weight to get it to do so # so, for example, if your weights are 1,1,1 1,2,1 1,1,1 # They add up to 10, so you would set a weight of .1 if you want to normalize them weight = 1 # + [markdown] colab_type="text" id="JQmm_iBufmCz" # Now let's create a convolution. We will iterate over the image, leaving a 1 pixel margin, and multiply out each of the neighbors of the current pixel by the value defined in the filter. # # i.e. the current pixel's neighbor above it and to the left will be multiplied by the top left item in the filter etc. etc. We'll then multiply the result by the weight, and then ensure the result is in the range 0-255 # # Finally we'll load the new value into the transformed image. # + colab={} colab_type="code" id="299uU2jAr90h" for x in range(1,size_x-1): for y in range(1,size_y-1): convolution = 0.0 convolution = convolution + (i[x - 1, y-1] * filter[0][0]) convolution = convolution + (i[x, y-1] * filter[0][1]) convolution = convolution + (i[x + 1, y-1] * filter[0][2]) convolution = convolution + (i[x-1, y] * filter[1][0]) convolution = convolution + (i[x, y] * filter[1][1]) convolution = convolution + (i[x+1, y] * filter[1][2]) convolution = convolution + (i[x-1, y+1] * filter[2][0]) convolution = convolution + (i[x, y+1] * filter[2][1]) convolution = convolution + (i[x+1, y+1] * filter[2][2]) convolution = convolution * weight if(convolution<0): convolution=0 if(convolution>255): convolution=255 i_transformed[x, y] = convolution # + [markdown] colab_type="text" id="6XA--vgvgDEQ" # Now we can plot the image to see the effect of the convolution! # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="7oPhUPNhuGWC" outputId="727e7b3a-7299-4011-d5af-ff79b3312439" # Plot the image. Note the size of the axes -- they are 512 by 512 plt.gray() plt.grid(False) plt.imshow(i_transformed) #plt.axis('off') plt.show() # + [markdown] colab_type="text" id="xF0FPplsgHNh" # This code will show a (2, 2) pooling. The idea here is to iterate over the image, and look at the pixel and it's immediate neighbors to the right, beneath, and right-beneath. Take the largest of them and load it into the new image. Thus the new image will be 1/4 the size of the old -- with the dimensions on X and Y being halved by this process. You'll see that the features get maintained despite this compression! # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="kDHjf-ehaBqm" outputId="0943f115-bc3e-4986-a28d-67eeba66c828" new_x = int(size_x/2) new_y = int(size_y/2) newImage = np.zeros((new_x, new_y)) for x in range(0, size_x, 2): for y in range(0, size_y, 2): pixels = [] pixels.append(i_transformed[x, y]) pixels.append(i_transformed[x+1, y]) pixels.append(i_transformed[x, y+1]) pixels.append(i_transformed[x+1, y+1]) newImage[int(x/2),int(y/2)] = max(pixels) # Plot the image. Note the size of the axes -- now 256 pixels instead of 512 plt.gray() plt.grid(False) plt.imshow(newImage) #plt.axis('off') plt.show()
Part 4-Extending what Convolutional Neural Nets can do/Convolutions_from_scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/RashiG27/verbs-and-adjs/blob/master/task.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="hZLLtQ1T4SjF" colab_type="code" colab={} from bs4 import BeautifulSoup as bs from nltk.corpus import stopwords,wordnet from nltk.tokenize import sent_tokenize, word_tokenize import string from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer import requests import nltk import spacy # + id="iO2y5WLh9buC" colab_type="code" colab={} speech=requests.get("https://speakola.com/political/dr-a-p-j-abdul-kalam-vision-for-india-2011") # + id="MFBP7YZ1911O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="48aa0519-dbcb-45fb-fd4f-258521553ec5" speech # + id="1fDdZqcs93pY" colab_type="code" colab={} soup=bs(speech.text,'html.parser') # + id="ycJcOk0O-G-9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 25792} outputId="aacc1f7a-61d1-4833-ecb5-c566f625ff7e" soup # + id="ibsBSbBx-Uxd" colab_type="code" colab={} text=[] for i in soup.find_all('p'): #print(i.text) text.append(i.text) # + id="uK1J2JN--c-g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 245} outputId="373a66f5-c38f-48dd-ea7b-734aa1770b1e" text # + id="uyn0Yrry-eSG" colab_type="code" colab={} text=text[1:7] # + id="M19mQ8pnAZaM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="ee7d1034-ea99-41d9-d9be-9dc56b6004ac" text # + id="SBtEGxvSAa4v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="7d2a51e6-0517-4453-a786-9168d31d32f0" fspeech=" ".join(str(x) for x in text) fspeech # + id="8LR-_CzrApSd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="cafb9a78-4d5f-422b-840d-b8f750b434ac" fspeech1=fspeech.translate(str.maketrans(' ',' ',string.punctuation)) fspeech1 # + id="TY9hZxeBBAGv" colab_type="code" colab={} nlp=spacy.load('en') # + id="mNLXFZVOBTkR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="aa17093c-04b3-4cec-8d2b-8e3d1f4faaac" p_data=nlp(fspeech) p_data # + id="1eA2jDMhBcqn" colab_type="code" colab={} verb=[] adj=[] # + id="ZGYSNIOfBsq6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 4254} outputId="35d0a05e-b2fc-499f-b33a-d5740d71ad78" for i in p_data: #print(i.pos_) if i.pos_ == 'VERB': verb.append(i) verb # + id="0KTys7MQB-e7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 503} outputId="fb483a5e-66ed-40c0-e33b-266f80003ba0" for i in p_data: if i.pos_ == 'ADJ': adj.append(i) adj # + id="OqYLCLOsCL-Y" colab_type="code" colab={} # + id="_22tHc1yCOCv" colab_type="code" colab={} # + id="qX_BcVh4CPs1" colab_type="code" colab={}
task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:skLearn] # language: python # name: conda-env-skLearn-py # --- from sklearn import tree from sklearn.model_selection import train_test_split import numpy as np from sklearn.datasets import load_iris from sklearn.datasets import load_breast_cancer from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.externals.six import StringIO from IPython.display import Image, display import pydotplus import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder bc = load_breast_cancer() data=bc.data target=bc.target # + NUM_TIMS_TO_RUN=10 MAX_DEPTH=12 sum_test_accuracy_list=np.array([0,0]) sum_train_accuracy_list=np.array([0,0]) for j in range(NUM_TIMS_TO_RUN): test_accuracy_list=[] train_accuracy_list = [] for i in range(1,MAX_DEPTH): clf = tree.DecisionTreeClassifier(max_depth=i) train_data, test_data, train_target, test_target = train_test_split(data, target, test_size=0.15) #Fit train Data and test against test data clf.fit(train_data, train_target) test_accuracy_list.append([i,clf.score(test_data,test_target)]) #Ftest against training data train_accuracy_list.append([i,clf.score(train_data,train_target)]) sum_test_accuracy_list = sum_test_accuracy_list + np.array(test_accuracy_list) sum_train_accuracy_list = sum_train_accuracy_list + np.array(train_accuracy_list) avg_test_prediction= sum_test_accuracy_list/NUM_TIMS_TO_RUN avg_train_prediction =sum_train_accuracy_list/NUM_TIMS_TO_RUN #print avg_test_prediction, avg_train_prediction # - #print avg_train_prediction,avg_test_prediction plt.xlabel('Depth') plt.ylabel('Accuracy') plt.ylim((.8,1)) plt.plot(avg_test_prediction[:,0],avg_test_prediction[:,1], label='Test') plt.plot(avg_train_prediction[:,0],avg_train_prediction[:,1], label='Train') plt.title('Tree accuracy vs depth') plt.legend() plt.show() # + NUM_TIMS_TO_RUN=100 MAX_LEAF_NODE=30 sum_test_accuracy_list=np.array([0,0]) sum_train_accuracy_list=np.array([0,0]) for j in range(NUM_TIMS_TO_RUN): test_accuracy_list=[] train_accuracy_list = [] for i in range(2,MAX_LEAF_NODE): clf = tree.DecisionTreeClassifier(max_leaf_nodes=i) train_data, test_data, train_target, test_target = train_test_split(data, target, test_size=0.15) #Fit train Data and test against test data clf.fit(train_data, train_target) test_accuracy_list.append([i,clf.score(test_data,test_target)]) #Ftest against training data train_accuracy_list.append([i,clf.score(train_data,train_target)]) sum_test_accuracy_list = sum_test_accuracy_list + np.array(test_accuracy_list) sum_train_accuracy_list = sum_train_accuracy_list + np.array(train_accuracy_list) avg_test_prediction= sum_test_accuracy_list/NUM_TIMS_TO_RUN avg_train_prediction =sum_train_accuracy_list/NUM_TIMS_TO_RUN plt.xlabel('Number of Leaf Nodes') plt.ylabel('Accuracy') plt.ylim((.86,1)) plt.plot(avg_test_prediction[:,0],avg_test_prediction[:,1], label='Test') plt.plot(avg_train_prediction[:,0],avg_train_prediction[:,1], label='Train') plt.title('Tree accuracy vs Leaf Nodes') plt.legend() plt.show() # + NUM_TIMS_TO_RUN=100 TRAIN_SIZE=10 sum_test_accuracy_list=np.array([0,0]) sum_train_accuracy_list=np.array([0,0]) for j in range(NUM_TIMS_TO_RUN): test_accuracy_list=[] train_accuracy_list = [] for i in range(1,TRAIN_SIZE): clf = tree.DecisionTreeClassifier(criterion='gini', max_depth=6, max_leaf_nodes=8) #clf = tree.DecisionTreeClassifier() train_data, test_data, train_target, test_target = train_test_split(data, target, test_size=1-i/10.0) clf.fit(train_data, train_target) #Fit train Data and test against test data clf.fit(train_data, train_target) test_accuracy_list.append([i*10,clf.score(test_data,test_target)]) #Ftest against training data train_accuracy_list.append([i*10,clf.score(train_data,train_target)]) sum_test_accuracy_list = sum_test_accuracy_list + np.array(test_accuracy_list) sum_train_accuracy_list = sum_train_accuracy_list + np.array(train_accuracy_list) avg_test_prediction= sum_test_accuracy_list/NUM_TIMS_TO_RUN avg_train_prediction =sum_train_accuracy_list/NUM_TIMS_TO_RUN #print avg_train_prediction, avg_test_prediction plt.xlabel('Training Size in percentage') plt.ylabel('Accuracy') plt.ylim((0.9,1)) plt.plot(avg_test_prediction[:,0],avg_test_prediction[:,1], label='Test') plt.plot(avg_train_prediction[:,0],avg_train_prediction[:,1], label='Train') plt.title('Tree accuracy vs Training Size percentage') plt.legend() plt.show() # + from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score train_data, test_data, train_target, test_target = train_test_split(data, target, test_size=0.25) skf = StratifiedKFold(n_splits = 5, shuffle=True, random_state=268) param_grid = {'max_depth': np.arange(1, 15), 'criterion' : ['gini','entropy'], 'max_leaf_nodes': np.arange(3, 20) } #create a grid grid_tree = GridSearchCV(DecisionTreeClassifier(), param_grid, scoring = 'accuracy', n_jobs=-1, cv=skf) #training # %time grid_tree.fit(train_data, train_target) #let's see the best estimator best_tree = grid_tree.best_estimator_ print(best_tree) print("_"*40) #with its score print("Cross-validated best score {}%".format(round(grid_tree.best_score_ * 100,3))) #score on test predictions = best_tree.predict(test_data) print("Test score: {}%".format(round(accuracy_score(y_true = test_target, y_pred = predictions) * 100,3))) # + import matplotlib.pyplot as plt features_list = iris.feature_names feature_importance = best_tree.feature_importances_ sorted_idx = np.argsort(feature_importance) features_list[sorted_idx] plt.figure(figsize=(5,7)) plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center') plt.yticks(range(len(sorted_idx)), features_list[sorted_idx]) plt.xlabel('Importance') plt.title('Feature importances') plt.draw() plt.show()
knn/.ipynb_checkpoints/BreastCancer-knn-ALL-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2019 Novel Coronavirus (SARS-CoV-2) and COVID-19 Unpivoted Data # # The following script takes data from the repository of the 2019 Novel Coronavirus Visual Dashboard operated by Johns Hopkins University's Center for Systems Science and Engineering (JHU CSSE). It will apply necessary cleansing/reformatting to make it use in traditional relational databases and data visualization tools. import pandas as pd import os import datetime import pycountry import numpy from copy import deepcopy # + tags=["parameters"] # papermill parameters output_folder = "../output/" # - # Data until 22MAR2020 is stored in a cache. This is collated and reshaped data from previous days. pre_2203_data = pd.read_csv("https://s3-us-west-1.amazonaws.com/starschema.covid/CSSEGISandData_COVID-19_until_0322.csv",keep_default_na=False) pre_2203_data["Date"] = pd.to_datetime(pre_2203_data["Date"]) # Daily reports from and including 23MAR2020 are downloaded from the JHU CSSE GIS and Data Github repository. def urls(): return [template.format(month=dt.month, day=dt.day, year=dt.year) for dt in dates] def retrieve_and_merge(): dates = [datetime.date(year=2020, month=3, day=23) + datetime.timedelta(n) for n in range(int((datetime.datetime.now().date() - datetime.datetime(year=2020, month=3, day=23).date()).days))] template = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{month:02d}-{day:02d}-{year}.csv" res = pd.DataFrame() for dt in dates: df = pd.read_csv(template.format(year=dt.year, month=dt.month, day=dt.day),keep_default_na=False) df["Date"] = dt res = res.append(df, ignore_index=True) return res.melt(id_vars=[col for col in res.columns if col not in ["Confirmed", "Deaths", "Recovered", "Active"]], var_name="Case_Type", value_name="Cases").drop(["Last_Update"], axis=1).rename(columns={"Long_": "Long", "Country_Region": "Country/Region", "Province_State": "Province/State"}) # ## Drop `Active` and `Recovered` from 22MAR2020 onwards # As [JHU no longer reports `Active` and `Recovered` from 22MAR2020 onwards](https://github.com/starschema/COVID-19-data/issues/78), these can be dropped. df = retrieve_and_merge() df = df[df["Case_Type"].isin(["Active", "Recovered"]) == False] df["Date"] = pd.to_datetime(df["Date"]) df = df.rename(columns={"Admin2": "County"}) cldf_us = df.loc[df["Country/Region"] == "US"] cldf_nonus = df.loc[df["Country/Region"] != "US"] # We filter the county-level data set for state data only to prevent DQ issues in JHU inputs that account for 'Recovered'/'Active' as states. cldf_us = cldf_us[cldf_us["Province/State"].isin([s.name for s in pycountry.subdivisions.get(country_code = "US")])] # ## Data Quality # We use `pycountry` to resolve geographies. # A number of states have inconsistent naming or special characters, such as `Taiwan*`. These are normalised through a replacement `dict` with ISO3166-1 compliant names. Data is then aggregated for each division by date and case type. # + changed_names = { "Holy See": "Holy See (Vatican City State)", "Vatican City": "Holy See (Vatican City State)", "Hong Kong SAR": "Hong Kong", "Iran (Islamic Republic of)": "Iran, Islamic Republic of", "Iran": "Iran, Islamic Republic of", "Macao SAR": "Macao", "Macau": "Macao", "Republic of Korea": "Korea, Republic of", "South Korea": "Korea, Republic of", "Korea, South": "Korea, Republic of", "Republic of Moldova": "Moldova, Republic of", "Russia": "Russian Federation", "Saint Martin": "Sint Maarten (Dutch part)", "St. Martin": "Sint Maarten (Dutch part)", "Taipei and environs": "Taiwan, Province of China", "Vietnam": "Viet Nam", "occupied Palestinian territory": "Palestine, State of", "West Bank and Gaza": "Palestine, State of", "Taiwan*": "Taiwan, Province of China", "Congo (Brazzaville)": "Congo", "Congo (Kinshasa)": "Congo, The Democratic Republic of the", "Gambia, The": "Gambia", "The Gambia": "Gambia", "Tanzania": "Tanzania, United Republic of", "US": "United States", "Curacao": "Curaçao", "Brunei": "Brunei Darussalam", "Cote d'Ivoire": "Côte d'Ivoire", "Moldova": "Moldova, Republic of", "The Bahamas": "Bahamas", "Venezuela": "Venezuela, Bolivarian Republic of", "Bolivia": "Bolivia, Plurinational State of", "East Timor": "Timor-Leste", "Cape Verde": "Cabo Verde", "US": "United States", "Laos": "Lao People's Democratic Republic", "Burma": "Myanmar" } def normalize_names(df): df["Country/Region"] = df["Country/Region"].replace(changed_names) df["Cases"] = df["Cases"].replace('',0).astype(int) return(df.groupby(by=["Country/Region","Province/State", "Date", "Case_Type"], as_index=False).agg({"Cases": "sum", "Long": "first", "Lat": "first"})) # - cldf_nonus = normalize_names(cldf_nonus) cldf_us["Country/Region"] = "United States" # ## Normalize cruise ship names cldf_nonus.loc[cldf_nonus["Country/Region"] == "Diamond Princess", "Province/State"] = "Diamond Princess" cldf_nonus.loc[cldf_nonus["Country/Region"] == "Diamond Princess", "Country/Region"] = "Cruise Ship" # ## Adding ISO3166-1 and ISO3166-2 identifiers # To facilitate easy recognition, ISO3166-1 identifiers are added to all countries and ISO3166-2 identifiers are added where appropriate. This is the case where subregional data exists: # # * Australia # * Canada # * France (`France` for metropolitan France, separate regions for DOM/TOMs # * PRC # * US # * UK (the `UK` province identifier encompasses only Great Britain and Northern Ireland, other dependencies reporting to the UK authorities are separate subdivisions) # * The Kingdom of the Netherlands (`Netherlands` encompasses the constituent country of the Netherlands, and the other constituent countries register cases as separate provinces of the Kingdom of the Netherlands) def resolve_iso3166_1(row): if row["Country/Region"] is not "Cruise Ship": if pycountry.countries.get(name=row["Country/Region"]): row["ISO3166-1"] = pycountry.countries.get(name=row["Country/Region"]).alpha_2 else: row["ISO3166-1"] = "" return row cldf_nonus = cldf_nonus.apply(resolve_iso3166_1, axis=1) cldf_us["ISO3166-1"] = "US" # We then encode level 2 IDs: # + fr_subdivisions = {"France": "FR", "French Guiana": "GF", "French Polynesia": "PF", "Guadeloupe": "GUA", "Mayotte": "YT", "Reunion": "RE", "Saint Barthelemy": "BL", "St Martin": "MF"} nl_subdivisions = {"Netherlands": "NL", "Aruba": "AW", "Curacao": "CW"} cn_subdivisions = {'Jilin': 'CN-JL', 'Xizang': 'CN-XZ', 'Anhui': 'CN-AH', 'Jiangsu': 'CN-JS', 'Yunnan': 'CN-YN', 'Beijing': 'CN-BJ', 'Jiangxi': 'CN-JX', 'Zhejiang': 'CN-ZJ', 'Chongqing': 'CN-CQ', 'Liaoning': 'CN-LN', 'Fujian': 'CN-FJ', 'Guangdong': 'CN-GD', 'Inner Mongolia': 'CN-NM', 'Gansu': 'CN-GS', 'Ningxia': 'CN-NX', 'Guangxi': 'CN-GX', 'Qinghai': 'CN-QH', 'Guizhou': 'CN-GZ', 'Sichuan': 'CN-SC', 'Henan': 'CN-HA', 'Shandong': 'CN-SD', 'Hubei': 'CN-HB', 'Shanghai': 'CN-SH', 'Hebei': 'CN-HE', 'Shaanxi': 'CN-SN', 'Hainan': 'CN-HI', 'Shanxi': 'CN-SX', 'Tianjin': 'CN-TJ', 'Heilongjiang': 'CN-HL', 'Hunan': 'CN-HN', 'Xinjiang': 'CN-XJ', 'Tibet': "CN-XZ"} uk_subdivisions = {"United Kingdom": "UK", "Cayman Islands": "KY", "Channel Islands": "CHA", "Gibraltar": "GI", "Montserrat": "MS"} subdivisions = { "AU": {subdivision.name: subdivision.code.replace("AU-", "") for subdivision in pycountry.subdivisions.get(country_code="AU")}, "CA": {subdivision.name: subdivision.code.replace("CA-", "") for subdivision in pycountry.subdivisions.get(country_code="CA")}, "US": {subdivision.name: subdivision.code.replace("US-", "") for subdivision in pycountry.subdivisions.get(country_code="US")}, "GB": uk_subdivisions, "CN": cn_subdivisions, "NL": nl_subdivisions, "FR": fr_subdivisions } # + countries_with_subdivisions = list(subdivisions.keys()) def resolve_iso3166_2(row): if row["ISO3166-1"] in countries_with_subdivisions: row["ISO3166-2"] = subdivisions[row["ISO3166-1"]].get(row["Province/State"]) else: row["ISO3166-2"] = "" return row # - cldf_us = cldf_us.apply(resolve_iso3166_2, axis=1) cldf_nonus = cldf_nonus.apply(resolve_iso3166_2, axis=1) # ## Fixing county name inconsistencies # # See [Issue #128](https://github.com/starschema/COVID-19-data/issues/128#issue-590293662) and [Issue #145](https://github.com/starschema/COVID-19-data/issues/145) for details. county_remappings = { "Walla Walla County": "Walla Walla", "<NAME>a": "<NAME>", "Elko County": "Elko", "Washington County": "Washington" } cldf_us["County"] = cldf_us["County"].replace(county_remappings) # + fips_mapping = pd.read_csv("https://s3-us-west-1.amazonaws.com/starschema.covid/US_County_FIPS_Mapping.csv", index_col=["ISO3166_2","COUNTY"]) def add_missing_fips(row): if row["FIPS"] == "" or row["Lat"] == "" or row["Long"] == "": if (row['ISO3166-2'], row["County"]) in fips_mapping.index: row["FIPS"] = fips_mapping.loc[row['ISO3166-2'], row["County"]]["FIPS"] row["Lat"] = fips_mapping.loc[row['ISO3166-2'], row["County"]]["LATITUDE"] row["Long"] = fips_mapping.loc[row['ISO3166-2'], row["County"]]["LONGITUDE"] return row # - cldf_us = cldf_us.apply(add_missing_fips, axis=1) cldf_us["Lat"] = pd.to_numeric(cldf_us["Lat"]) cldf_us["Long"] = pd.to_numeric(cldf_us["Long"]) cldf_us = cldf_us[cldf_us["County"] == ""].append(cldf_us[cldf_us["County"] != ""].groupby([ "County", "Province/State", "Country/Region", "Date", "Case_Type", "ISO3166-1", "ISO3166-2" ]).agg({ "Cases": "sum", "Lat": "mean", "Long": "mean", "FIPS": "first" }).reset_index(),sort=True) # ## Calculating case changes cldf_nonus = cldf_nonus.sort_values(by=["Country/Region", "Province/State", "Case_Type", "Date"], ascending=True) cldf_us = cldf_us.sort_values(by=["Country/Region", "Province/State", "County", "Case_Type", "Date"], ascending=True) cldf_nonus = pre_2203_data[pre_2203_data["Country/Region"] != "US"].append(cldf_nonus, sort=True) cldf_nonus["Difference"] = cldf_nonus["Cases"] - cldf_nonus.groupby(["Country/Region", "Province/State", "Case_Type"])["Cases"].shift(periods=1) cldf_us["Difference"] = cldf_us["Cases"] - cldf_us.groupby(["Country/Region", "Province/State", "County", "Case_Type"])["Cases"].shift(periods=1) result = cldf_nonus.append(cldf_us) result.loc[(result["Date"] == "2020-01-22") & (result["Country/Region"] == "United States"), "Difference"] = result[(result["Date"] == "2020-01-22") & (result["Country/Region"] == "United States")]["Cases"] # Drop all records with 0 case and 0 differences. We do not need records prior any `Case_Type` events. result = result[ ~(result.Cases.eq(0) & result.Difference.eq(0))] result.groupby(["Date","Case_Type"]).sum() # ## Adding timestamp # Before we save the file locally, we add the `Last_Update_Date` in `UTC` time zone. result["Last_Update_Date"] = datetime.datetime.utcnow() # ## Output # Finally, we store the output in the `output` folder as `JHU_COVID-19.csv` as an unindexed CSV file. result.to_csv(output_folder + "JHU_COVID-19.csv", index=False, columns=["Country/Region", "Province/State", "County", "FIPS", "Date", "Case_Type", "Cases", "Long", "Lat", "ISO3166-1", "ISO3166-2", "Difference", "Last_Update_Date"])
notebooks/JHU_COVID-19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="ose3gvP7TTTP" # # 11 Text Preprocessing and Augmentation # # In this notebook, we will work with text data. Firstly, we will learn how to perform preprocessing and visualization on text data. Then, we will try to adopt data augmentation on text data. The Enron email datasets will be used here to demonstrate how text mining/NLP techniques could be used for fraud analysis. Through the whole example, we will mainly use the following two NLP libraries: # # 1. [Texthero](https://pypi.org/project/texthero/) # # 2. [nlpaug](https://nlpaug.readthedocs.io/en/latest/) # + [markdown] id="GAFz86HDN1vR" # # 1. Data Background # # In 2000, [Enron](https://en.wikipedia.org/wiki/Enron) was one of the largest companies in the United States. By 2002, it had collapsed into bankruptcy due to widespread corporate fraud. In the resulting Federal investigation, a significant amount of typically confidential information entered into the public record, including tens of thousands of emails and detailed financial data for top executives. # # # The Enron fraud is the largest case of corporate fraud in American history. Founded in 1985, Enron Corporation went bankrupt by end of 2001 due to widespread corporate fraud and corruption. Before its fall, Fortune magazine had named Enron "America's most innovative company" for six consecutive years. So what happened? Who were the culprits? # # In this notebook, we are going to work with emails corpus from Enron employees. We will learn how to analyze text data for fraud analysis. # + id="DjHGFkFkOX9_" basefn = "data//" import pandas as pd df_corpus = pd.read_csv(basefn + "enron_emails_clean.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="WlRbu7X9PMFu" outputId="e692e83f-198c-400d-f3d9-87072bad8129" df_corpus.head() # + [markdown] id="LKq5EJQh32BH" # #### Exact Word Match # One simple approach to analyze text data is keyword based query. For example, look for any emails mentioning 'money'. Here, the query word could be any informative words. # + colab={"base_uri": "https://localhost:8080/", "height": 137} id="KPFXfkLZSMf3" outputId="79542927-01d6-44fa-b338-28eac473b7f5" # Select data that matches df_corpus.loc[df_corpus['content'].str.contains('money', na=False)].head(3) # + [markdown] id="o3avab0C4ljL" # Usually you want to search more than one term. For example, in fraud analysis, you may prepare a full **fraud word lists** including terms that could potentially flag fraudulent clients and/or transactions. # # Here, we create a list containing the following words/terms: # # * 'enron stock' # * 'sell stock' # * 'stock bonus' # * 'sell enron stock'. # + colab={"base_uri": "https://localhost:8080/", "height": 107} id="NVUtvC0z4k98" outputId="363d5b22-19f3-4a46-b6d9-9957434a17a3" # Create a list of terms to search for searchfor = ['enron stock', 'sell stock', 'stock bonus', 'sell enron stock'] filtered_emails = df_corpus.loc[df_corpus['content'].str.contains('|'.join(searchfor), na=False)] filtered_emails.head(2) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="JebZEPd65LAM" outputId="99b0bece-d805-47ce-96b1-3bfe813d69cb" print("Number of returned fraud emails is {}".format(filtered_emails.shape[0])) # + [markdown] id="okTd2eKQXcvh" # The recall rate is quite low because the search keyword has to be exactly identical to the words in the emails to be found. For example, the email containing "SELL stock" will not be counted. In the following, we will use text preprocessing techniques from **texthero** to improve the recall rate. # + [markdown] id="i45QsTidYhwd" # # 2. Texthero # # <img src="https://texthero.org/img/T.png" alt="logo" style="width: 50px;"/> # # Texthero is a simple Python toolkit that helps you work with a text-based dataset. It provides quick and easy functionalities that let you preprocess, represent, map into vectors and visualize text data in just a couple of lines of code. # # Texthero is designed to be used on top of **pandas**, so it makes it easier to preprocess and analyze text-based Pandas Series or Dataframes. # # If you are working on an NLP project, Texthero can help you get things done faster than before and gives you more time to focus on important tasks. # # NOTE: The Texthero library is still in the beta version. You might face some bugs and pipelines might change. A faster and better version will be released and it will bring some major changes. # + [markdown] id="f4lOM1O9Z710" # Install Package # ``` # pip install texthero # ``` # + [markdown] id="3L8P_8zV-Y3z" # Texthero has four useful modules that handle different functionalities that you can apply in your text-based dataset. # # - Preprocessing # # This module allows for the efficient pre-processing of text-based Pandas Series or DataFrames. It has different methods to clean your text dataset such as lowercase(), remove_html_tags() and remove_urls(). # # # - NLP # # This module has a few NLP tasks such as named_entities, noun_chunks, and so on. # # - Representation # # This module has different algorithms to map words into vectors such as TF-IDF, GloVe, Principal Component Analysis(PCA), and term_frequency. # # - Visualization # # The last module has three different methods to visualize the insights and statistics of a text-based Pandas DataFrame. It can plot a scatter plot and word cloud. # # In this section, we will focus on two parts: **preprocessing** and **Visualization** # + id="Gg5J2jlA5noK" import texthero as hero # + [markdown] id="dZviD7sRwi3a" # #### Text Preprocessing # # Texthero provides useful text preprocessing methods. For example, # * Remove digits # * Remove stopwords # * Remove URLs # * Tokenize # * Remove HTML tags # # All the required inputs should be **Pandas series** or Pandas dataframe # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="KSSWP3LFxCFX" outputId="f96b12d5-9cf6-470d-9523-1cf395eac862" text = pd.Series("Hi my phone number is +86 12394 call me at 09:00 am") clean_text = hero.preprocessing.remove_digits(text) print(clean_text) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="fng8g9INxG-4" outputId="8ab538eb-1897-4c34-859e-23b49d274dec" text = pd.Series("you need to know machine learning") clean_text = hero.remove_stopwords(text) print(clean_text) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="jiB4CSvDxIoL" outputId="435f1d55-8016-4a0e-f520-ea3d7848a8e1" text = pd.Series("Go to https://spacy.io/ to read more articles you like") clean_text = hero.remove_urls(text) print(clean_text) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="E167An0HxMp2" outputId="30ec19db-063f-4904-9557-ca03c037c3ea" text = pd.Series(["You can think of Texthero as a tool to help you understand and work with text-based dataset. "]) clean_text = hero.tokenize(text) print(clean_text) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="WrpWT02FxO9u" outputId="0b2921e2-d86d-4759-a0d2-eee5fee619ba" text = pd.Series("<html><body><h2>hello world</h2></body></html>") clean_text = hero.remove_html_tags(text) print(clean_text) # + [markdown] id="Mc7UE26gFbHY" # Texthero provide a simple interface named **clean()**. # # The clean() method runs seven functions when you pass a pandas series. These seven functions are: # # * lowercase(s): Lowercases all text. # * remove_diacritics(): Removes all accents from strings. # * remove_stopwords(): Removes all stop words. # * remove_digits(): Removes all blocks of digits. # * remove_punctuation(): Removes all string.punctuation (!"#$%&'()*+,-./:;<=>?@[]^_`{|}~). # * fillna(s): Replaces unassigned values with empty spaces. # * remove_whitespace(): Removes all white space between words # # Now we can see the cleaned news content. # + id="pDFHTV6uFSF_" # clean the news content by using clean method from hero package df_corpus['clean_content'] = hero.clean(df_corpus['content']) # + colab={"base_uri": "https://localhost:8080/", "height": 107} id="NkCBteGJPk1e" outputId="70efb8f7-b6d4-4fba-e560-73cc73b21b78" #show unclean and clean news content df_corpus[['content','clean_content']].head(2) # + [markdown] id="RegOU0iqwEG8" # We can also modify the **clean()** function. Then, we can call the customized text clean functions. # # + id="D0SLqv8L57tW" #create custom pipeline custom_pipeline = [hero.preprocessing.fillna, hero.preprocessing.lowercase, hero.preprocessing.remove_whitespace, hero.preprocessing.remove_urls ] # + id="bsCAT1pcwTwY" df_corpus['clean_custom_content'] = df_corpus['content'].pipe(hero.clean, custom_pipeline) # + [markdown] id="cksqGQPcbbCD" # #### Search the email corpus again # # Here, we search the cleaned email corpus # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="71PNxgRrxgCn" outputId="6bf27e94-d4de-497c-f2c8-6e8ac401e73a" # Create a list of terms to search for searchfor = ['enron stock', 'sell stock', 'stock bonus', 'sell enron stock'] filtered_emails = df_corpus.loc[df_corpus['clean_content'].str.contains('|'.join(searchfor), na=False)] print("Number of returned fraud emails after text preprocessing is {}".format(filtered_emails.shape[0])) # + [markdown] id="FucZrwt9SV7W" # After text preprocessing, the recall rate is improved # # # + [markdown] id="LlWSwRYRynRO" # #### Visualization # # Then, let us explore some text visualization methods in texthero. Texthero contains different method to visualize insights and statistics of a text-based Pandas DataFrame. # # * Top words # # If you want to know the top words in your text-based dataset, you can use the top_words() method from the visualization module. This method is useful if you want see additional words that you can add to the stop words lists. # # * Wordclouds # # The wordcloud() method from the visualization module plots an image using WordCloud from the word_cloud package. # + colab={"base_uri": "https://localhost:8080/", "height": 316} id="euExo_Shyi9X" outputId="8c300ef4-5407-4a1e-cd70-3f8ca72300ae" import matplotlib.pyplot as plt NUM_TOP_WORDS = 20 top_20 = hero.visualization.top_words(df_corpus.loc[:, 'clean_content']).head(NUM_TOP_WORDS) top_20.plot.bar(rot=90, title="Top 20 words in corpus"); plt.show(block=True); # + [markdown] id="gTE2w-An9IvU" # #### Wordclouds # # The wordcloud() method from the visualization module plots an image using WordCloud from the word_cloud package. # # # + colab={"base_uri": "https://localhost:8080/", "height": 574} id="SzIqzaWC9Jxg" outputId="10d3632d-35fe-4660-a37f-8436a3c522b6" #Plot wordcloud image using WordCloud method hero.wordcloud(df_corpus.loc[:, 'clean_content'], max_words=100) # + [markdown] id="yFYudz_h4KWj" # # 3. NLPAUG # # ![https://github.com/makcedward/nlpaug/blob/master/res/logo_small.png?raw=true](https://github.com/makcedward/nlpaug/blob/master/res/logo_small.png?raw=true) # # More data we have, better performance we can achieve. What is more, sample more data from minority class is one approach to address the imbalanced problem. However, it is very costy to annotate large amount of training data. And in some applications includign fraud detection, it is impossible to obtain lots of data labeled as fraud one. Therefore, proper data augmentation is useful to boost up your model performance. # # Due to high complexity of language, it is more challenging to augment text compared to images which can simply cropping out portion of images. Here, we will explore the library named nlpaug. This python library helps you with augmenting nlp for your machine learning projects. # # Provided Features listed as: # # 1. Generate synthetic data for improving model performance without manual effort # 2. Simple, easy-to-use and lightweight library. Augment data in 3 lines of code # 3. Plug and play to any neural network frameworks (e.g. PyTorch, TensorFlow) # 4. Support textual and audio input # # + [markdown] id="AJITp4XqmxnD" # Install Package # + id="zC3qxxKsmz1-" # ! pip install nlpaug # + colab={"base_uri": "https://localhost:8080/", "height": 70} id="-tIzZTPAb_Ra" outputId="252d6cb5-dd93-479b-d0e3-a8f37fa05426" df_corpus['Tag'] = 0 df_corpus.loc[df_corpus['clean_content'].str.contains('|'.join(searchfor), na=False), 'Tag'] = 1 df_corpus['Tag'].value_counts() # + [markdown] id="oP1jysdcnrIc" # This library nlpaug provides various textual augmenter functions including character augmenter, word augmenter and sentence augmenter. # # In this section, we will only explore word-level augmentation based on [WordNet](https://wordnet.princeton.edu/): substitute word by WordNet's synonym. # # You may find other frameworks [here](https://github.com/makcedward/nlpaug/blob/master/example/textual_augmenter.ipynb) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="plaiZiyw4ZcX" outputId="f55d5d81-eecb-4255-927a-0207ef412f05" texts = df_corpus.loc[df_corpus.Tag==1, 'clean_content'].tolist() short_email = min(texts, key=lambda word: len(word)) #for better visualization, find the shortest email short_email # + id="ZAJuzfsylvbZ" import nlpaug.augmenter.word as naw # + [markdown] id="6iJSybUeoiir" # #### Install WordNet # + colab={"base_uri": "https://localhost:8080/", "height": 87} id="TlsNImQWl8k3" outputId="16a3f12c-6fab-4e99-ff43-db432e52786f" import nltk nltk.download('averaged_perceptron_tagger') # + colab={"base_uri": "https://localhost:8080/", "height": 158} id="l5I97N1N6DVK" outputId="2f6fc2fe-7532-411a-fc82-4dc3246bab42" aug = naw.SynonymAug(aug_src='wordnet') augmented_texts = aug.augment(short_email, 5) # 5 is the number of generated text print("Original:") print(short_email) print("Augmented Texts:") for idx in range(len(augmented_texts)): print(augmented_texts[idx])
11_Text Preprocessing and Augmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Controle de versão # + [markdown] slideshow={"slide_type": "slide"} # ## Objetivos # # - Entender o que um Sistema de Controle Versões (SCV) e o versionamento de código # - Compreender benefícios de SCVs para arquivos diversos (reprodutibilidade, auditabilidade, compartilhamento, entre outros) # - Ter uma visão geral sobre os SCVs mais comuns # - Entender o funcionamento do SCV _git_, bem como o workflow _stage_- _commit_- _push_. # - Compreender comandos básicos para criação e gerenciamento de repositórios locais e remotos # + [markdown] slideshow={"slide_type": "slide"} # ## Introdução # + [markdown] slideshow={"slide_type": "subslide"} # > Controle de versão é a _abordagem sistemática para registrar alterações realizadas em um arquivo ou conjunto de arquivos ao longo do tempo_. # # - Daremos uma visão geral sobre _sistemas de controle de versão_ (SCVs), do inglês _control version systems_; # - Meios eficientes de gerir arquivos em repositórios remotos; # - Noções de _Git_. # + [markdown] slideshow={"slide_type": "subslide"} # ## Benefícios de um SCV # # - Reprodutibilidade # # - Auditabilidade # # - Historicidade # # - Sincronia # # - Compartilhamento # + [markdown] slideshow={"slide_type": "subslide"} # ## Exemplos de SCVs # # Podem ser baseados em interface gráfica ou em linha de comando. Exemplos: # # - _Google Drive_: suíte de soluções bem conhecida; # - _Dropbox_: repositório de arquivos com versionamento automático; # - _Overleaf_: ferramenta para escrever artigos científicos com sistema embutido de versão; # - _HackMD_: ferramenta colaborativa para documentação. # + [markdown] slideshow={"slide_type": "subslide"} # - _Bazaar_ # - _Git_ # - _Mercurial_ # - _Subversion_ # - _SVN_ # + [markdown] slideshow={"slide_type": "subslide"} # Neste curso, aprenderemos sobre o _Git_. O _Git_ surgiu em 2005 para ser um sistema SCV: # # - rápido; # - eficiente; # - com suporte ao desenvolvimento não-linear (_multibranching_); # - completamente distribuído e # - capaz de manipular grandes projetos. # + [markdown] slideshow={"slide_type": "subslide"} # ## Processo de versionamento # # O _workflow_ de um SCV ocorre, em geral, nas seguintes etapas básicas: # # 1. Criar arquivos # 2. Manipular arquivos (alterar, deletar, adicionar) # 3. Atribuir o estado (_status_) dos arquivos # + [markdown] slideshow={"slide_type": "subslide"} # - A atribuição do _status_ é o que se define por _versão_ em um determinado tempo. # - Nos SCVs populares, versão é também chamada de _checkpoint_, _commit_ ou _time-point_. # - Para o _Git_, o _commit_ é a submissão de uma modificação de arquivo (nova versão de si). # + [markdown] slideshow={"slide_type": "subslide"} # ### _Masters_ e _branches_ # # O desenvolvimento de códigos pode ser ilustrado como uma planta (caule + ramos). # # - Caule (_master_): "linha-mestra" do código; # - Ramos (_branches_): são as melhorias (_features_) que incorporamos ao código com o tempo. # + [markdown] slideshow={"slide_type": "subslide"} # - A ideia de uma ou mais _branches_ é permitir que um programador trabalhe em múltiplas _features_ ao mesmo tempo e decida, posteriormente, se as _branches_ poderão ser fundidas na _master_ ou não. # + [markdown] slideshow={"slide_type": "subslide"} # - Este processo é conhecido como "fusão" (_merge_). # - A _master_, por si, é chamada de _master branch_ => desenvolvimento _linear_. # - Quando _branches_ adicionais coexistem => desenvolvimento _não-linear_. # + [markdown] slideshow={"slide_type": "subslide"} # ### _Master branch_ # # <img src="../figs/04/branch-linear.png"> # + [markdown] slideshow={"slide_type": "subslide"} # ### _Branches_ # # <img src="../figs/04/branch-nonlinear.png"> # + [markdown] slideshow={"slide_type": "subslide"} # ## Usando o _git_ # # Para começar, apontemos uma sutil diferença: # # - _Git_: é um SCV para gerenciar o histórico de códigos-fonte. # - _GitHub_: serviço de hospedagem para repositórios _Git_. # + [markdown] slideshow={"slide_type": "subslide"} # - Enquanto outros SCVs lidam com as "mudanças" feitas em cada arquivo ao longo do tempo, o _Git_ trata-os como um conjunto de imagens de um sistema de arquivos em miniatura. # + [markdown] slideshow={"slide_type": "subslide"} # - Toda vez que alteramos versões (_commit_), o _Git_ basicamente "tira uma foto de todos os arquivos e armazena uma referência para esse conjunto de arquivos". # + [markdown] slideshow={"slide_type": "subslide"} # - Para ser eficiente, se os arquivos não são alterados, o _Git_ não armazena o arquivo novamente, mas um link para o arquivo idêntico anterior já armazenado. Então, o _Git_ lida com _fluxo de estado dos arquivos_. # + [markdown] slideshow={"slide_type": "subslide"} # <img src="../figs/04/scv-delta.png" > # # Fluxo dos arquivos no tempo para um SCV usual e para o _Git_. # + [markdown] slideshow={"slide_type": "subslide"} # <img src="../figs/04/scv-git.png" > # # Armazenamento de arquivos como _snapshots_ (estado dos arquivos"). Fonte: Pro Git Book. # + [markdown] slideshow={"slide_type": "subslide"} # ### Estados de arquivos # # - Um repositório _git_ pode ser criado por inicialização em um diretório ou por clonagem de um repositório existente. # - O processo geral do _Git_ é baseado em três etapas: _Alterar > Preparar > Enviar_. # + [markdown] slideshow={"slide_type": "subslide"} # - Em um diretório local, arquivos são criados, modificados, então admitidos a uma área de preparação (_stage area_) e, posteriormente, enviados (_commit_) a um estado de "nova versão". # - Para submetê-los a um repositório remoto, "empurramos" (_push_) os arquivos para sincronizá-los com o repositório local. # + [markdown] slideshow={"slide_type": "subslide"} # ### Comandos básicos # # - Para inicializar um novo repositório: # # ``` # git init # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para adicionar arquivos à área de preparação: # # ``` # git add # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para enviar arquivos à nova versão: # # ``` # git commit # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para saber o estado dos arquivos: # # ``` # git status # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para gerar o histórico de mudanças: # # ``` # git log # ``` # + [markdown] slideshow={"slide_type": "subslide"} # <img src="../figs/04/git-commands.png"> # # Quadro de estado de arquivos e comandos do _Git_. # # + [markdown] slideshow={"slide_type": "subslide"} # ### Boas práticas do _commit_ # # - Escrever mensagem: # # ```none # git commit -m "Modificação realizada" # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Configurar editor preferido # # ```none # git config --global core.editor "editor_preferido" # ``` # + [markdown] slideshow={"slide_type": "subslide"} # As mensagens de um _commit_ devem seguir algumas diretrizes: # # 1. Dar significado: escreva mensagens objetivas, diretas e significativas. # 2. Resumir as alterações: um modelo sugerido é: # - 1a. linha: escreva um título com até 50 caracteres. # - 2a. linha: espaçamento. # - 3a. linha em diante: corpo da mensagem contendo o detalhamento das alterações. # + [markdown] slideshow={"slide_type": "subslide"} # 3. Usar o tempo verbal no presente: em geral, usamos o tempo verbal no indicativo ou no imperativo, ou mesmo gerúndio para especificar o título de um _commit_. Por exemplo: # - Fazendo algo... (_Doing, adding, including this_...) # - Faz algo... (_Do, add, include this_...) # + [markdown] slideshow={"slide_type": "subslide"} # # 4. Especificar arquivos: use `git add arq1 arq2 ...` para especificar os arquivos específicos relacionados ao _commit_. # # - Em Python, podemos usar _len('titulo')_ para contar se os o objeto _str_ 'titulo' possui 50 caracteres ou menos. # + [markdown] slideshow={"slide_type": "subslide"} # Exemplificamos abaixo dois tipos de mensagens que podem acompanhar um _commit_: # # - Mensagem inadequada # # ```none # "Estou alterando o sinal de + para -." # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Mensagem adequada # # ```none # "Corrige operação aritmética. # # Corrige símbolo de adição ('+') para subtração ('-') # na expressão 'a + b' da fórmula utilizada no cálculo do XLMX." # ``` # + [markdown] slideshow={"slide_type": "subslide"} # Em inglês, uma mensagem equivalente seria: # # ```none # "Fix arithmetic operator. # # Replaces symbol '+' with '-' at the expression 'a + b' in the formula to compute the XLMX. # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para ignorar arquivos de serem rastreados, criamos um arquivo chamado `.gitignore` e neles acrescentamos todos os demais que não devem ser rastreados. # + [markdown] slideshow={"slide_type": "subslide"} # - Por exemplo, as instruções abaixo servem para criar o arquivo `.gitignore` e adicionar o arquivo _ignore.txt_ para não ser rastreável. # # ```bash # touch .gitignore; echo "ignore.txt" > .gitignore; cat .gitignore # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Recuperação e comparação # # - Para criar um novo _commit_ que reverte as mudanças feitas na última versão a um estado anterior: # # ```none # git revert HEAD # ``` # - A partir do histórico de mudanças obtido por `git log`, podemos reverter o projeto inteiro para uma versão anterior especificando o SHA do projeto: # + [markdown] slideshow={"slide_type": "subslide"} # ```none # git checkout SHA # ``` # - No caso de recuperar a versão anterior de um único arquivo, especificamos o arquivo com: # + [markdown] slideshow={"slide_type": "subslide"} # ```none # git checkout SHA -- file # ``` # + [markdown] slideshow={"slide_type": "subslide"} # # [[SHA]](https://en.wikipedia.org/wiki/Secure_Hash_Algorithms) (_Secure Hash Algorithm_) é um conjunto de algoritmos desenvolvidos pelo americano National Institute of Standards and Technology (NIST) para encriptar arquivos e atribuir-lhes uma identidade para finalidades de verificação. SHA é um tipo de CHF (_cryptographic hash function_). # + [markdown] slideshow={"slide_type": "subslide"} # **Exemplo:** 2 _commits_ realizados em um projeto. # - O ponteiro (_HEAD_) mostra que o _commit_ com SHA `553ae56fb180ff630ba4026cbc1deb212e4efd42` é o estado mais atual do projeto. # - Para retornar o projeto ao estado imediatamente anterior, podemos fazer `git checkout 431d0e589e7366dc21d0e18144f72b67d6b4a148`. # + [markdown] slideshow={"slide_type": "subslide"} # ```none # commit 553ae56fb180ff630ba4026cbc1deb212e4efd42 (HEAD -> master) # Author: <NAME> <<EMAIL>> # Date: Tue Mar 16 16:06:50 2021 -0300 # # Fix ex5. Required to modify convertInputUnits to see pressure. # # commit <PASSWORD> # Author: <NAME> <<EMAIL>> # Date: Tue Mar 16 14:24:03 2021 -0300 # # ex5 removed files. # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para comparar duas versões de arquivo podemos usar # # ``` # git diff # ``` # Entender o que está sendo comparado # - observar as indicações # - Comparar somente arquivos relevantes # + [markdown] slideshow={"slide_type": "subslide"} # ### Trabalhando com _branches_ # # - _Branches_ são abertas para incorporar novos elementos, funcionalidades ou componentes em um projeto de maneira a não interferir no tronco do projeto, que deve permanecer estável ao longo do tempo. # - São também usadas para evitar rupturas acidentais que danifiquem a operação normal das componentes de projeto. # + [markdown] slideshow={"slide_type": "subslide"} # - Para criar uma nova _branch_, usamos: # # ```none # git checkout -b nome_da_branch # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para navegar entre as _branches_: # # ```none # git checkout nome_da_branch # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para listar todas as _branches_ existentes: # # ```none # git branch # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Para deletar completamente uma branch: # # ```none # git branch -D nome_da_branch # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Fusão # # - Para integrar as modificações propostas em uma _branch_ na _branch master_ realizamos uma espécie de "fusão". # - Por exemplo, para fundir uma _branch_ de origem, digamos _branch_A_, em outra de destino, digamos _branch_B_, primeiro devemos levar o ponteiro para a origem _branch_A_ com: # + [markdown] slideshow={"slide_type": "subslide"} # ```none # git checkout branch_A # ``` # Em seguida, a fundimos na _branch_B_ com: # # ```none # git merge branch_B # ``` # + [markdown] slideshow={"slide_type": "subslide"} # #### Conflitos # # - Se, por exemplo, um mesmo arquivo for alterado na _branch_A_, mas não na _branch_B_, ao tentar fundi-las, uma incompatibilidade existirá, de maneira que um erro por conflito de versões será produzido. # # - Para corrigir incompatibilidades, as partes modificadas do arquivo devem ser equalizadas ou substituídas por um novo arquivo. Em seguida, um novo _commit_ deve ser submetido. Eventualmente, para descatar uma fusão, usamos: # # ```none # git merge --abort # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ##### Ferramentas para resolver conflitos # # Algumas ferramentas que ajudam a resolver conflitos gerados por _merge_ estão disponíveis no mercado, tais como # # - [KDiff3](http://kdiff3.sourceforge.net/) # - [Beyond Compare](https://www.scootersoftware.com/) # - [Meld](http://meldmerge.org/) e # - [P4Merge](https://www.perforce.com/products/helix-core-apps/merge-diff-tool-p4merge). # + [markdown] slideshow={"slide_type": "subslide"} # Para definir a ferramenta padrão para fusão, fazemos: # # ```none # git config --global merge.tool name_of_the_tool # ``` # e a lançamos com: # # ```none # git mergetool # ``` # + [markdown] slideshow={"slide_type": "subslide"} # #### Boas práticas # # Para trabalhar efetivamente com _branches_, algumas boas práticas recomendadas são: # # - Manter a _master_ limpa; # - Adicionar apenas uma nova _feature_ por _branch_; # - Usar nomes razoáveis para as _branches_. # + [markdown] slideshow={"slide_type": "subslide"} # ### Integração com repositórios # # Repositórios remotos são hoje amplamente utilizados como meios de hospedagem e compartilhamento de código, entre os quais estão o _GitHub_, _GitLab_ e o _Bitbucket_. Todavia, é possível que qualquer um crie seu próprio repositório em um servidor web (veja, por exemplo, este [link](https://opensource.com/life/16/8/how-construct-your-own-git-server-part-6)). # # - A seguir, damos exemplos de como utilizar o _GitHub_. Para realizar a comunicação entre um repositório local com seu homólogo remoto, ou vice-versa. # + [markdown] slideshow={"slide_type": "subslide"} # Para clonar um repositório remoto localmente, o comando se parece com: # # ``` # git clone https://github.com/user/repo.git # ``` # # onde _user_ é o nome do usuário no _GitHub_ e _repo_ é o nome do repositório. # + [markdown] slideshow={"slide_type": "subslide"} # - Para atualizar um repositório local tomando como base um remoto: # # ``` # git pull # ``` # Neste caso, é necessário que o histórico de modificações também seja compatível entre ambos. # + [markdown] slideshow={"slide_type": "subslide"} # - Para submeter versões após um _commit_ para o repositório remoto, utilizamos # # ``` # git push # ``` # # > No jargão da computação, _repo_ é uma redução de repositório. # # + [markdown] slideshow={"slide_type": "subslide"} # #### Vinculando o repositório local com o remoto # # Para que tenhamos um repositório local em sincronia com um remoto, podemos realizar os seguintes passos: # # - Inicialização e versionamento # # ```none # # # cd icd # git init # git add . # git commit # ``` # Acima, `icd` é o diretório onde o projeto está localizado em nosso disco local. Em seguida, inicializamos o rastreamento, adicionamos todos os arquivos contidos no diretório e atribuímos a versão. # + [markdown] slideshow={"slide_type": "subslide"} # - Vinculação remota # # Nesta etapa, fazemos a vinculação com o repositório remoto no _GitHub_ e "empurramos" os arquivos para lá: # # ```none # git remote add origin https://github.com/user/repo.git # git push - u origin master # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Atualização local # # Caso alteremos os arquivos diretamente no _GitHub_, podemos atualizar o repositório local com: # # ```none # git pull # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### _Pull requests_ # # Caso queiramos contribuir com um projeto cuja propriedade seja de outro usuário, podemos requisitar acesso a ele no _GitHub_ com _pull requests_. # # Suponhamos que 3 desenvolvedores atuam em um projeto em 3 branches distintas. Se um deles fizer uma modificação e submetê-la à _branch master_ sem que os outros 2 acompanhem em tempo real, ambos podem atualizar suas _branches_ com # # ``` # git pull origin master # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### _Forks_ # # Um _fork_ é o ato de _aforquilhar_ ("prender com forquilha ou garfo") um projeto, no sentido de copiar o repositório de um usuário no GitHub para você. O repositório original "forkado" é chamado de _upstream_. # + [markdown] slideshow={"slide_type": "subslide"} # Para trabalhar com a sua cópia "forkada", siga os seguintes passos: # # - Clone o _repo_ aforquilhado localmente: # # ```none # git clone git@github.com/seu_user/forked_repo.git # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Adicione o _upstream_ à sua lista de repositórios remotos para receber as mudanças submetidas no _repo_ original: # # ```none # git remote add upstream https://github.com/upstream_user/original_repo.git # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Verifique se ele foi, de fato adicionado: # # ```none # git remote -v # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Atualize o _repo_ aforquilhado trazendo do original as mudanças e _commits_ mais recentes, bem como todas as _branches_ (fazer uma "juntada"): # # ```none # git fetch upstream # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Liste todas as _branches_, incluindo as do _upstream_: # # ```none # git branch -va # ``` # + [markdown] slideshow={"slide_type": "subslide"} # - Direcione o ponteiro para a sua _master branch_: # # ```none # git checkout master # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Tutorial interativo do _Git_ # # Descrever em palavras o que os comandos _git_ realizam concretamente pode ser um pouco impreciso. A ferramenta # [Learn Git Branching](https://learngitbranching.js.org/) foi criada com o propósito de ensinar o _git_ por meio de tutoriais interativos. Com ela, você pode praticar e entender comandos e simular o comportamento de um repositório real. # + [markdown] slideshow={"slide_type": "slide"} # ## Controle de versão para dados # # - Ferramentas para que lidam com controle de versão de grandes arquivos passaram a ser desenvolvidas. # - Elas permitem que grandes dados sejam adicionados a repositórios e tenham seus estados controlados. # - Algumas dessas ferramentas são: # - [CyberDuck](https://cyberduck.io) # - [DataLad](https://www.datalad.org) # - [_Git LFS_](https://git-lfs.github.com/) # - [_git-annex_](https://git-annex.branchable.com/) # + [markdown] slideshow={"slide_type": "subslide"} # Exemplos de ferramentas de _cloud computing_ para gestão de grandes arquivos e controle de dados são: # # - [Alibaba Cloud](https://www.alibabacloud.com) # - [AWS CLI](https://aws.amazon.com/cli/) # - [Google Cloud](https://cloud.google.com) # - [Microsoft Azure](https://azure.microsoft.com/en-us/) # + [markdown] slideshow={"slide_type": "slide"} # ## Exercícios # # - Criar repositório para o curso e organizá-lo como um projeto. # - Praticar comandos do _git_ na _sandbox_ do [Learn Git Branching](https://learngitbranching.js.org/). # + [markdown] slideshow={"slide_type": "slide"} # ## Referências # # - [[GitHub Docs]](https://docs.github.com/en) # # - [[Learn Git with Bitbucket]](https://www.atlassian.com/git/tutorials/learn-git-with-bitbucket-cloud) # # - [[Pro Git Book]](https://git-scm.com/book/en/v2) # # - [[The Turing Way]](https://the-turing-way.netlify.app/welcome.html)
_build/jupyter_execute/rise/04-cvs-git-rise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import glob import json import pickle from collections import defaultdict import numpy as np import pandas as pd import seaborn as sb from matplotlib import pyplot as plt # + # load PS-SMAC data ps_smac_x, ps_smac_y = [], [] with open(os.path.join('example-results-1D-trinary-action-Sigmoid', 'smac3-output', 'run_1', 'validated_traj.json'), 'r') as fh: for line in fh.readlines()[1:]: # skip first line as it is not needed ps_smac_data = json.loads(line) ps_smac_x.append(ps_smac_data['evaluations']) ps_smac_y.append(ps_smac_data['cost']*-1) # As the final incumbent most likely was found before the budget was exhausted, we need # to append the last cost again at max_steps ps_smac_x.append(10**4) ps_smac_y.append(ps_smac_data['cost']*-1) # load 0.1-greedy fig = plt.figure(figsize=(15, 7.5)) with open(os.path.join('example-results-1D-trinary-action-Sigmoid', 'tabular', '0000_0.1-greedy-stats-1D3M-10000_eps-1_reps-seed_0.pkl'), 'rb') as fh: data = pickle.load(fh) # Load Chainer Data (Only 1 repetition in the data) file = os.path.join("example-results-1D-trinary-action-Sigmoid", "DQN", 'scores.txt') chainer_data = pd.read_csv(file, sep='\t') # Get the same style as plots in the paper sb.set_style("darkgrid") sb.set_context("paper", font_scale = 1, rc={ "grid.linewidth": 4, 'axes.labelsize': 32, "axes.titlesize": 32, "legend.fontsize": 32.0, 'lines.linewidth': 4, 'xtick.labelsize': 32.0, 'ytick.labelsize': 32.0, }) # plot plt.step(np.arange(len(data)), data, c='orange', label=r'$\epsilon$-greedy', where='post') plt.step(chainer_data['episodes'], chainer_data['mean'], c='b', label='DQN', where='post') plt.step(ps_smac_x, ps_smac_y, c='green', label='PS-SMAC', where='post') plt.xlabel('#Episodes') plt.ylabel('Reward') plt.ylim([4, 10]) plt.xlim([1, 10**4]) plt.semilogx() plt.legend() plt.show() print('Here we plot the results for only 1 run!') print('Comparable to results shown in Figure 3(a) of the paper.')
example_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 线性代数:机器学习背后的优化原理 # # # 线性代数作为数学的一个分支,广泛应用于科学和工程中,掌握好线性代数对于理解和从事机器学习算法相关工作是很有必要的,尤其对于深度学习算法而言。因此,这个项目会从浅入深更好的帮助你学习与积累一些跟人工智能强相关的线性代数的知识。 # # 本项目内容理论知识部分参考[《DeepLearning》又名花书](https://book.douban.com/subject/27087503/)第二章,希望大家支持正版购买图书。 # # 若项目中的题目有困难没完成也没关系,我们鼓励你带着问题提交项目,评审人会给予你诸多帮助。 # # 所有选做题都可以不做,不影响项目通过。如果你做了,那么项目评审会帮你批改,也会因为选做部分做错而判定为不通过。 # ## 准备工作 # # 我们将讲解常用的线性代数知识,而学员需使用numpy来实现这些知识点(当然也可以自己写算法实现),还需要使用matplotlib完成规定图像习题,当然,本项目用到的python代码(或numpy的使用)课程中并未完全教授,所以需要学员对相应操作进行学习与查询,这在我们往后的人工智能学习之旅中是必不可少的一个技能,请大家珍惜此项目的练习机会。 # # 当然,这里提供官方的[numpy Quickstart](https://docs.scipy.org/doc/numpy/user/quickstart.html#)来帮助你更好的完成项目。 # # 本项目还需要使用LaTeX公式,以下两个链接供学习与使用: # # [Latex cheatsheet](https://www.authorea.com/users/77723/articles/110898-how-to-write-mathematical-equations-expressions-and-symbols-with-latex-a-cheatsheet) # # [aTeX Cookbook](http://www.personal.ceu.hu/tex/cookbook.html#inline) # # 首先,导入你所需的软件包。一般我们建议在工程开头导入**所有**需要的软件包。 # DONE: import library # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # ## 1、标量,向量,矩阵,张量 # **首先,让我们回顾下基本的定义:** # # - 标量(scalar):形式而言,一个标量是一个单独的数,常用斜体的小写变量名称来表示。_v_ # # - 向量(vector):形式而言,一个向量是一列有序数,常用粗体的小写变量名称表示**v**,或者上面标记剪头$\vec{v}$ # # - 矩阵(matrix):形式而言,一个矩阵是一个二维数组,常用大写变量名称表示A,表示内部的元素则会使用$A_{i,j}$ # # - 张量(tensor):形式而言,一个张量是一个多维数组,常用粗体的大写字母变量名称表示**T**,表示内部的元素则会使用$A_{i,j,z}$ 等等 # # 用图片直观的显示区别如下 # <img src="images/diff.png" width="500"> # # **接下来让我们回顾下基本的运算:** # # - 加法 # <img src="images/add.png" width="500"> # # - 标量乘法 # <img src="images/scmu.png" width="400"> # # - 转置 # <img src="images/trans.png" width="370"> # # - 矩阵向量乘法(内积,人工智能中常见的拼写:matrix product 或者 dot product) # <img src="images/mul.png" width="570"> # # **线性方程组:** # # 由矩阵乘法也演变出了我们最常见的线性方程组,已知矩阵与未知向量的乘积,等于另一个已知向量,通过此方程组可求解那个未知向量,一般写为x,具体如下表示。 # 等式左侧可以这么来理解: # <img src="images/axb.png" width="400"> # 列为具体的矩阵来看: # $$ # \begin{bmatrix} # A_{1,1} & A_{1,2} & \cdots & A_{1,n} \\\\ # A_{2,1} & A_{2,2} & \cdots & A_{2,n} \\\\ # \cdots & \cdots & \cdots & \cdots \\\\ # A_{m,1} & A_{m,2} & \cdots & A_{m,n} # \end{bmatrix} # \times # \begin{bmatrix} # x_1 \\\\ # x_2 \\\\ # \cdots \\\\ # x_n # \end{bmatrix} # = # \begin{bmatrix} # b_1 \\\\ # b_2 \\\\ # \cdots \\\\ # b_m # \end{bmatrix} # $$ # # 或者更简单的表示为 # # $$Ax=b$$ # # 既然有未知数,那么自然需要求解未知数,而我们的未知数需要满足所有方程,也不是一直都有解的,下面来列我们二维矩阵所组成的方程解的情况,若两条线平行不存在焦点,那么说明没有一个$x_1$, $x_2$同时满足两个方程,则此方程组无解,同理,若相交,则有一个解,若完全相等,则有无穷个解。 # <img src="images/axbsolu.png" width="570"> # ### 1.1、基本运算并绘图 # 例题 $\vec{v}$ + $\vec{w}$ # # $\hspace{1cm}\vec{v} = \begin{bmatrix} 1\\ 1\end{bmatrix}$ # # # $\hspace{1cm}\vec{w} = \begin{bmatrix} -2\\ 2\end{bmatrix}$ # # 结果需要先使用numpy计算向量运算结果,并用LaTeX公式表示: # # $\hspace{1cm}\vec{v}+\vec{w} = \begin{bmatrix} -1\\ 3\end{bmatrix}$ # # 并使用matlibplot绘制出(图表颜色样式不要求) # # <img src="images/add_e.png" width="300"> # # #### 1.1.1 # **根据上面例题展示,计算并绘制 $2\vec{v}$ - $\vec{w}$ 的结果** # # $\hspace{1cm}\vec{v} = \begin{bmatrix} 4\\ 1\end{bmatrix}$ # # # $\hspace{1cm}\vec{w} = \begin{bmatrix} -1\\ 2\end{bmatrix}$ # # + # 1.1.1 DONE:参考向量Lab # 定义向量v和向量w 计算2v-w v = np.array([4,1]) w = np.array([-1,2]) two_v = 2 * v neg_w = -w vw = two_v + neg_w # 使用Matplotlib进行绘图 # 设置绘图参考轴ax ax = plt.axes() # 绘制原点 红色 ax.plot(0,0,'or') # 绘制向量v 向量w 向量vw ax.arrow(0, 0, *two_v, color='b', linewidth=2.5, head_width=0.30, head_length=0.35) ax.arrow(two_v[0], two_v[1], *neg_w, linestyle='dotted', color='c', linewidth=2.5, head_width=0.30, head_length=0.35) ax.arrow(0, 0, *vw, color='k', linewidth=3.5,head_width=0.30, head_length=0.35) # 设置x轴的限度和刻度 plt.xlim(-10, 10) major_xticks = np.arange(-10, 10) ax.set_xticks(major_xticks) #设置y轴的限度和刻度 plt.ylim(-10, 10) major_yticks = np.arange(-10, 10) ax.set_yticks(major_yticks) # 绘制方格线 plt.grid(b=True, which='major') # - # 1.1.1 LaTeX公式的解答为:(即图中黑色箭头) # $\hspace{1cm}\vec{2v}+\vec{w} = \begin{bmatrix} 9\\ 0\end{bmatrix}$ # 例题,方程组求解: # $$ # \begin{cases} # y = 2x + 1\\\\ # y = 6x - 2 # \end{cases} # $$ # 用matplotlib绘制图表(图表样式不要求) # <img src="images/2equ_solu.png" width="300"> # 由上可知此方程组有且仅有一个解 # # 需使用numpy(或自写算法)计算该解的结果,并用LaTeX公式表示出来(结果可以用小数或者分数展示) # $$ # \begin{cases} # x = \frac{3}{4} \\\\ # y = \frac{5}{2} # \end{cases} # $$ # # # #### 1.1.2 # **根据上面例题展示,绘制方程组,说明是否有解是否为唯一解,若有解需计算出方程组的解** # $$ # \begin{cases} # y = 2x + 1\\\\ # y = \frac{1}{10}x+6 # \end{cases} # $$ # + # 1.1.2 DONE:参考线性组合Lab # 注意这里plot输入参数是 X值的列表 Y值的列表 不是点(0,10)和点(1,21) plt.plot([0,10], [1,21], 'b', linewidth=3) # 点(0,1)和 点(10,21) plt.plot([0,10], [6,7], 'c-', linewidth=3) # 点(0,6)和 点(10,7) # 由图像可得:方程组有且仅有一解 所以可以直接计算 可以不用try防止其他情况 # 生成XY的系数矩阵vw 和 右侧的解的矩阵t vw = np.array([[2,-1], [1,-10]]) t = np.array([-1,-60]) # 使用np.linalg.solve方法求解 s = np.linalg.solve(vw, t) # 将解的情况进行绘制 plt.plot(s[0], s[1], 'ro', linewidth=3) plt.xlabel('Single Solution') plt.show() # print('解为:(', s[0], ',', s[1], ')') # - # 由上图可知:答案为1个解 # # 为了方便求X系数矩阵,Y系数矩阵,结果系数矩阵 把方程改写成 # $$ # \begin{cases} # 2x - y = -1\\\\ # x - 10y = -60 # \end{cases} # $$ # # 解为:(手工计算的分数...) # $$ # \begin{cases} # x = \frac{50}{19} \\\\ # y = \frac{119}{19} # \end{cases} # $$ # ### 1.2、说明题 # #### 1.2.1 # **使用numpy(或自写算法)说明$(AB)^{\text{T}} = B^\text{T}A^\text{T}$** # # **其中** # $$ # A=\begin{bmatrix} # 21 & 7 \\\\ # 15 & 42 \\\\ # 9 & 6 # \end{bmatrix}, # B=\begin{bmatrix} # 4 \\\\ # 33 # \end{bmatrix} # $$ # + # 1.2.1 DONE ''' 参考: 1.线性组合Lab中 矩阵的定义方法 2.矩阵乘法Lab中 矩阵转置(Lab中是DataFrame转置,其实类似的) 3.numpy.dot()方法 点乘运算 https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html?highlight=dot#numpy.dot 4.numpy.matmul()方法 矩阵乘法 https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html?highlight=matmul#numpy.matmul 有些好奇两者的区别,简单看了下 大致上是这样的 (线代大二学的有些忘...) 做个笔记 dot可以用在一维矩阵相乘(即向量) matmul更适合两个二维矩阵相乘计算 如果有一个标量存在的矩阵/向量乘法 更应该用a*b 或者multiply(a, b)方法 我在这里采用了点乘dot()方法 用matmul()也可以 是不是有性能区别? ''' # 定义A矩阵 和 B矩阵 A = np.array([[21,7],[15,41],[9,6]]) B = np.array([4,33]) # A*B 的转置为 AB_mprod_t = np.dot(A, B).T print("A乘以B 的转置为:\n", AB_mprod_t) # B的转置 * A的转置 为 B_t = B.T A_t = A.T BtAt_mprod = np.dot(B_t, A_t) print("B的转置 乘以 A的转置为:\n", BtAt_mprod) # 判断俩矩阵是否相等 维度一样 且 每个元素一样 if((AB_mprod_t == BtAt_mprod).all() & AB_mprod_t.ndim == BtAt_mprod.ndim): print("等式成立!") else: print("有错误!") # - # #### 1.2.2 # **使用numpy(或自写算法)说明 $A ( B + C ) = AB + AC$ ** # # **其中** # $$ # A=\begin{bmatrix} # 9 & 3 \\\\ # 8 & 4 \\\\ # 7 & 6 # \end{bmatrix}, # B=\begin{bmatrix} # 5 \\\\ # 2 # \end{bmatrix}, # C=\begin{bmatrix} # 5 \\\\ # 7 # \end{bmatrix} # $$ # + # 1.2.2 DONE # 定义矩阵A、B、C A = np.array([[9,3],[8,4],[7,6]]) B = np.array([5,2]) C = np.array([5,7]) # 为了简便计算 这里先计算一些中间变量 B+C、AB、AC BC_sum = B + C AB_mprod = np.dot(A, B) AC_mprod = np.dot(A, C) # 求解 A(B+C) A_BCsum_mprod = np.dot(A, BC_sum) print("A(B+C) 的解为:\n", A_BCsum_mprod) # 求解 AB + AC AB_AC_sum = AB_mprod + AC_mprod print("AB+AC 的解为:\n", AB_AC_sum) # 判断俩矩阵是否相等 维度一样 且 每个元素一样 if((A_BCsum_mprod == AB_AC_sum).all() & A_BCsum_mprod.ndim == AB_AC_sum.ndim): print("等式成立!") else: print("有错误!") # - # ## 2、特殊矩阵 # - 单位矩阵 # # 如果选取任意一个向量和某矩阵相乘,该向量都不会改变,我们将这种保持n维向量不变的矩阵记为单位矩阵$I_n$ # # - 逆矩阵 # # 如果存在一个矩阵,使$A^{-1} A = I_n$,那么$A^{-1}$就是A的逆矩阵。 # # - 对角矩阵 # # 如果一个矩阵只有主对角线上还有非零元素,其他位置都是零,这个矩阵就是对角矩阵 # # - 对称矩阵 # # 如果一个矩阵的转置是和它自己相等的矩阵,即$A=A^{T}$,那么这个矩阵就是对称矩阵 # # - 正交矩阵 # # 行向量和列向量是分别标准正交(90度)的方阵,即$A^{T}A = AA^{T} = I_n$,又即$A^{-1} = A^{T}$,那么这种方阵就是正交矩阵 # # # # # ### 2.1、证明题 # # 通过LaTeX公式,结合上面所述概念,假设$A^{-1}$存在的情况下,证明$Ax=b$的解$x={A}^{-1}{b}$ # #### 回答: # --- # 证明: # # 1.将$Ax=b$式子的等号左右两边 分别左乘一个$A^{-1}$ 得到: # # $A^{-1}Ax$ = $A^{-1}b$ # # # 2.根据逆矩阵的定义:矩阵$A$和它的逆矩阵$A^{-1}$的乘积为与A同阶的单位矩阵$E$: # # $A^{-1}A = E$ # # # 3.则有: # # $Ex$ = $A^{-1}b$ # # 4.根据单位矩阵的定义:任何矩阵与单位矩阵相乘都等于本身 即$Ex$ = $x$ # # 得证: # # $x$ = $A^{-1}b$ # # --- # ### 2.2、 计算题 # # #### 2.2.1 # # 通过numpy计算,再次验证2.1证明题 # $$ # \begin{cases} # y = 2x + 1\\\\ # y = \frac{1}{10}x+6 # \end{cases} # $$ # 并用LaTeX公式写出$A^{-1}$是多少(小数分数皆可) # + # 2.2.1 DONE # 参考numpy求转置 https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.inv.html?highlight=inverse%20matrix # 按照Ax=b的格式向量化方程组 (这里的x代表未知数 其实是一组解 即x和对应的y 注意区分) A = np.array([[2,-1],[1/10,-1]]) b = np.array([-1,-6]) # 使用np.linalg.inv求取A的逆矩阵 A_inv = np.linalg.inv(A) x = np.dot(A_inv, b) print("解为:", x) # 再次验证2.1 if ((x == s).all()): print("\n用x=A逆*b的解和上面solve求的解是一致的!可以从侧面验证2.1!") # - # $A^{-1}$为: # $$ # \left[ # \begin{matrix} # 0.52631579 & -0.52631579\\ # 0.05263158 & -1.05263158 # \end{matrix} # \right] # $$ # # 参考:使用$LaTeX$写矩阵 https://blog.csdn.net/bendanban/article/details/44221279 # #### 2.2.2 # # 1、请用numpy(或自写算法)实现一个6x6的对角矩阵,矩阵的对角线由3至8(含8)组成。 # # 2、计算第一问生成的对角矩阵与向量$[6,7,1,2,5,9]^{T}$的乘积 # + # 2.2.2 DONE # 实现6*6对角矩阵m 其对角线为3 4 5 6 7 8 # 参考:np.diag(v,k=0)方法 https://docs.scipy.org/doc/numpy/reference/generated/numpy.diag.html#numpy.diag M = np.diag(np.arange(3,9)) print("对角线矩阵为:\n", M) # 计算第一问对角矩阵与向量[6,7,1,2,5,9]转置的乘积 m = np.array([6,7,1,2,5,9]).T res = np.dot(M, m) print("结果为:\n", res) # - # ## 3、迹运算 # 迹运算返回的是矩阵对角元素的和,如图所示 # <img src="images/matrix.png" width="360"> # 写成数学公式为: # $$ \large Tr(A) = \sum_{i}A_{i,i}$$ # # **说明题:** # # 使用numpy验证 # $$ # \large Tr(ABC) = Tr(CAB) = Tr(BCA) # $$ # 其中 # $$ # A= # \begin{bmatrix} # 7 & 6 \\\\ # 29 & 3 # \end{bmatrix} # $$ # # $$ # B= # \begin{bmatrix} # 2 & -8 \\\\ # 9 & 10 # \end{bmatrix} # $$ # # $$ # C= # \begin{bmatrix} # 2 & 17 \\\\ # 1 & 5 # \end{bmatrix} # $$ # + # 3 DONE # 参考:np.trace()方法 https://docs.scipy.org/doc/numpy/reference/generated/numpy.trace.html?highlight=trace#numpy.trace # 定义矩阵A、B、C A = np.array([[7,6], [29,3]]) B = np.array([[2,-8], [9,10]]) C = np.array([[2,17], [1,5]]) # 计算A*B*C的迹 ABC_Tr 使用trace方法和matmul方法 ABC_Tr = np.trace(np.matmul(np.matmul(A, B), C)) # 计算C*A*B的迹 CAB_Tr CAB_Tr = np.trace(np.matmul(np.matmul(C, A), B)) # 计算B*C*A的迹 BCA_Tr BCA_Tr = np.trace(np.matmul(np.matmul(B, C), A)) # 输出结果以验证 print("Tr(ABC) = ", ABC_Tr) print("Tr(CAB) = ", CAB_Tr) print("Tr(BCA) = ", BCA_Tr) # - # ## 4、衡量向量以及矩阵的大小:范数与条件数 # # ### 范数的定义 # # 在线性代数等数学分支中,范数(Norm)是一个函数,其给予某向量空间(或矩阵)中的每个向量以长度或称之为大小。对于零向量,其长度为零。直观的说,向量或矩阵的范数越大,则我们可以说这个向量或矩阵也就越大。有时范数有很多更为常见的叫法,如绝对值其实便是一维向量空间中实数或复数的范数,范数的一般化定义:设$p\ge 1$,p-norm用以下来表示 # # # $$ \large {\Vert x \Vert}_{p} = \lgroup {\sum_{i}{\vert x_i \vert}^p }\rgroup ^{\frac{1}{p}}$$ # # 此处,当p=1时,我们称之曼哈顿范数(Manhattan Norm)。其来源是曼哈顿的出租车司机在四四方方的曼哈顿街道中从一点到另一点所需要走过的距离。也即我们所要讨论的L1范数。其表示某个向量中所有元素绝对值的和。 而当p=2时,则是我们最为常见的Euclidean norm。也称为Euclidean distance,中文叫欧几里得范数,也即我们要讨论的L2范数,他也经常被用来衡量向量的大小。 而当p=0时,严格的说此时p已不算是范数了,L0范数是指向量中非0的元素的个数,但很多人仍然称之为L0范数(Zero norm零范数)。 这三个范数有很多非常有意思的特征,尤其是在机器学习中的正则化(Regularization)以及稀疏编码(Sparse Coding)有非常有趣的应用,这个在进阶课程可以做更深入的了解。 # # **L0 范数** # $$ \large \Vert x \Vert = \sqrt[0]{\sum_i x_i^0} = \#(i|x_i \neq0) $$ # **L1 范数** # $$ \large {\Vert x \Vert}_{1} = \lgroup {\sum_{i}{\vert x_i \vert} }\rgroup $$ # **L2 范数** # $$ \large {\Vert x \Vert}_{2} = \lgroup {\sum_{i}{\vert x_i \vert}^2 }\rgroup ^{\frac{1}{2}}$$ # # 另外这里还存在特例: # 当 $ p -> \infty $ 时,我们称之为 $ L^{\infty} $范数,也被称为“maximum norm(max范数)”,这个范数表示向量中具有最大幅度的元素的绝对值: # # $$ \large {\Vert x \Vert}^{\infty} = \max_{i}{\vert x_i \vert} $$ # # [以上资料部分参考wiki](http://t.cn/RINHvvt) # # ### 4.1、计算向量的范数 # 编写一个函数来计算一下向量的各种范数。 # + # TODO 实现这里向量范数计算的函数,要求可以计算p = 0,1,2,3 ... 无穷 情况下的范数 """ 计算向量的范数 参数 x: 向量 numpy数组 或者list数组 p: 范数的阶,int型整数或者None infty: 是否计算max范数,bool型变量,True的时候表示计算max范数,False的时候计算p范数 返回 向量的范数,float类型数值 hint: 1.你需要首先判断infty是True or False, 然后判断p 是否为零 2.注意int类型变量在计算时候需要规整为float类型 参考: 1.np.linalg.norm求范数 https://blog.csdn.net/hqh131360239/article/details/79061535 2.范数(norm)几种范数的简单介绍 https://blog.csdn.net/a493823882/article/details/80569888 """ def calc_Norm(x, p = 2, infty = False): if(infty == True): # 返回max范数 return float(np.linalg.norm(x, ord=np.inf)) elif(infty == False): # 返回p范数 return float(np.linalg.norm(x, ord=p)) # - # %run -i -e test.py LinearRegressionTestCase.test_calc_Norm # ### 4.2、计算矩阵的范数 # 我们也需要衡量矩阵的大小,对于矩阵大小的衡量在很多优化问题中是非常重要的。而在深度学习中,最常见的做法是使用Frobenius 范数(Frobenius norm),也称作矩阵的F范数,其定义如下: # # $$ \large {\Vert A \Vert}_{F} = \sqrt {\sum_{i,j}{\vert A_{i,j} \vert}^2 } $$ # # 我们这里继续来计算一下F范数 # + # TODO 实现这里矩阵Frobenius范数计算的函数 """ 计算向量的范数 参数 A: 给定的任意二维矩阵 list或者numpy数组形式 返回 矩阵的Frobenius范数,float类型数值 """ def calc_Frobenius_Norm(A): # ord参数设置为'fro' 即可求取矩阵的F范数 return float(np.linalg.norm(A, ord='fro')) # - # %run -i -e test.py LinearRegressionTestCase.test_calc_Frobenius_Norm # ### 4.3、计算矩阵的条件数 # 矩阵的条件数(condition number)是矩阵(或者它所描述的线性系统)的稳定性或者敏感度的度量,我们这里为了简化条件,这里只考虑矩阵是奇异矩阵的时候,如何计算以及理解条件数(condition number): # # 当矩阵A为奇异矩阵的时候,condition number为无限大;当矩阵A非奇异的时候,我们定义condition number如下: # # $$ \large \kappa{(A)} = {\Vert A \Vert}_F {\Vert A^{-1} \Vert}_F$$ # # [奇异矩阵,非奇异矩阵](https://zh.wikipedia.org/wiki/%E9%9D%9E%E5%A5%87%E5%BC%82%E6%96%B9%E9%98%B5) # # 计算矩阵的条件数 """ 计算矩阵的条件数 参数 A: 给定的任意二维矩阵 list或者numpy数组形式 返回 矩阵的condition number 参考: 1.numpy.linalg.cond方法 https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.cond.html?highlight=cond 注意!这里要求的是frobenius F范数计算的条件数!!! 参数p应该是'fro' 不能是默认或者其他值! """ def calc_Condition_Number(A): return float(np.linalg.cond(A, p='fro')) # %run -i -e test.py LinearRegressionTestCase.test_calc_Condition_Number # ### (选做)4.4、条件数的理解与应用 # # a. 有如下两个2*2的非奇异矩阵A和B: # # $ A = \begin{bmatrix} # 1 &2 \\ # 3 &4 \\ # \end{bmatrix} $ # # $ B = \begin{bmatrix} # 1 &2 \\ # 2 &4.0001 \\ # \end{bmatrix} # $ # # 计算condition number(A), condition number(B); # # b. 根据上面构造的矩阵A,B分别计算线性系统方程组的解$w$: # # # A $ \begin{bmatrix}w_{a1}\\w_{a2}\\ \end{bmatrix} $ = $ \begin{bmatrix}1\\2\\ \end{bmatrix} $, # # B $ \begin{bmatrix}w_{b1}\\w_{b2}\\ \end{bmatrix} $ = $ \begin{bmatrix}1\\2\\ \end{bmatrix} $, # # A $ \begin{bmatrix}w_{a1}\\w_{a2}\\ \end{bmatrix} $ = $ \begin{bmatrix}{1.0001}\\{2.0001}\\ \end{bmatrix} $, # # B $ \begin{bmatrix}w_{b1}\\w_{b2}\\ \end{bmatrix} $ = $ \begin{bmatrix}{1.0001}\\{2.0001}\\ \end{bmatrix} $. # # # # c. 计算完成之后,比较condition number大小与线性系统稳定性之间的关系,并且给出规律性的总结; # + # 4.4的a # 定义A、B A = np.array([[1,2], [3,4]]) B = np.array([[1,2], [2,4.0001]]) # 计算 print('A的条件数为:',calc_Condition_Number(A)) print('B的条件数为:',calc_Condition_Number(B)) print("--------------------------------------------------\n") # 4.4的b b1 = np.array([1,2]) wa1 = np.dot(np.linalg.inv(A), b1) wb1 = np.dot(np.linalg.inv(B), b1) print("当目标为[1,2]时 系数矩阵为A矩阵时的解为:\n", wa1) print("当目标为[1,2]时 系数矩阵为B矩阵时的解为:\n", wb1) print('\n') b2 = np.array([1.0001,2.0001]) wa2 = np.dot(np.linalg.inv(A), b2) wb2 = np.dot(np.linalg.inv(B), b2) print("当目标为[1.0001,2.0001]时 系数矩阵为A矩阵时的解为:\n", wa2) print("当目标为[1.0001,2.0001]时 系数矩阵为B矩阵时的解为:\n", wb2) # - # #### 4.4的c 规律性总结: # # 已知: $A$和$B$是非奇异矩阵 都可逆 # # 对 $Xw = b$ 而言 # # 此处有 $\large \kappa{(A)} < \large \kappa{(B)}$ # # 当$b$产生很小的变动时(所有元素都增加了0.0001) # # $wa1$和$wa2$的变动 $<<$ $wb1$到$wb2$的变动 (差了十几个数量级) # # 即可简单理解成:一个矩阵的条件数越大,其线性系统越不稳定,容易变动。反之就越稳定。 # # > 参考:条件数百度百科-举例部分 对线性方程组而言 b的扰动引起x的变化https://baike.baidu.com/item/%E6%9D%A1%E4%BB%B6%E6%95%B0/5293168?fr=aladdin # # --- # d. **阅读与思考**: 考虑更为通用的一种情况,我们计算一个典型的线性回归系统: # # $$ Xw = b $$ # # 可以简单推导得出其闭式解为:$ w=(X^TX)^{−1}X^Tb $ ,如果 $X^TX$可逆 # # 推导过程: # # 1.等式两边乘以$X^T$ # $$ X^TXw = X^Tb $$ # 2.等式两边乘以$(X^TX)^{-1}$ # $$ (X^TX)^{-1}X^TXw = (X^TX)^{−1}X^Tb $$ # 3.因为$A^{-1}A = I$,两边约去即可得: # $$ w=(X^TX)^{−1}X^Tb $$ # # # 当我们需要拟合的数据X满足数据量远远小于特征数目的时候,也就是X矩阵的行数 << X矩阵的列数的时候,因为$X^TX$不是奇异矩阵,此时方程组不存在闭式解;那么我们该如何重新构造$X^TX$,使得该闭式解成立? # # hint1. 单位矩阵的condition number是最低的,是最为稳定的; # # hint2. 如果要使得该系统存在闭式解,那么就必须使得求逆运算是可以进行的,也就是说重新构造的$X^TX$必须是可逆的方阵; # # hint3. 重新构造的方式可以是在$X^TX$基础上进行加或者减或者乘除相关矩阵的操作; # # 一种可行的方式就是: # $$ w = (X^TX+\lambda I)^{−1}X^Tb $$ # # 实际上我们最为常用的[Ridge Regression](http://scikit-learn.org/stable/modules/linear_model.html)和 L2范数以及condition number之间某种程度上是可以相互推导的: # # 首先,Ridge Regression的损失函数为: # $$ J_w = min({\Vert Xw -y \Vert}^2 + \alpha {\Vert w \Vert}^2) $$ # 我们展开这个损失函数: # $$ {\Vert Xw -y \Vert}^2 + \alpha {\Vert w \Vert}^2 = (Xw -y)^T (Xw-y) + \alpha w^Tw$$ # 由于这里是一个凸函数,我们令导数等于零,即为最小值的解,求导可得: # $$ X^T (Xw-y) + \alpha w = 0 $$ # # 整理即可得到: # $$ w = (X^TX+\lambda I)^{−1}X^Tb $$ # ## 5、SVD # # [SVD](https://en.wikipedia.org/wiki/Singular-value_decomposition)是Singular value decomposition的缩写,称为奇异值分解,是分解矩阵的一种方式,会将矩阵分解为奇异向量(singular vector)和奇异值(singular value),分解的意义其实很明确,就是想将一个很大很复杂的矩阵,用更小更简单的几个子矩阵的相乘来表示,这些小矩阵描述的是矩阵的重要的特性。 # # 那么SVD具体的数学表达是什么呢? # # 假设有一个矩阵C,我们可以将矩阵C分解为三个矩阵的乘积: # <img src="images/svd.png" width="480"> # # $$\large C = UDV^{T}$$ # # # 如果C是一个m x n的矩阵,那么U是一个m x m的矩阵,D是一个m x n的矩阵,V是一个n x n的矩阵,这些小矩阵并不是普普通通的矩阵,U和V都定义为正交矩阵,而D定义为对角矩阵。 # # SVD最常用的做法就是用来进行特征的降维以及矩阵的低秩重构,例如这里分别取矩阵U、D、VT的前k列,如图示中的白色部分,然后重新计算新的C矩阵,即为k维度下的矩阵重构,这种方法被广泛应用于自然语言处理[LSA](https://en.wikipedia.org/wiki/Latent_semantic_analysis)、推荐系统[SVD++,FM,FFM](https://tech.meituan.com/deep_understanding_of_ffm_principles_and_practices.html)等领域,如有兴趣可以继续参考链接相关资料。 # <img src="images/svd_decompostion.png" width="480"> # # # 具体计算UDV的算法不是我们这个项目的关键,我们只需使用numpy得出结果即可,下面的习题,将会带你体会SVD的某一应用场景。 # # 提示:我们会需要使用[numpy.linalg](https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.linalg.html)相关函数。 # # ### 5.1、使用numpy去计算任意矩阵的奇异值分解: """ 计算任意矩阵的奇异值分解 参数 A: 给定的任意二维矩阵 list或者numpy数组形式 返回 使用numpy.linalg相关函数,直接返回分解之后的矩阵U,D,V (可以尝试一下使用np.shape一下分解出来的U,D,VT,会发现维度跟我们上面讲解所描述的不同, 暂时不用管他直接返回np求解出的U,D,VT即可) 参考: np.linalg.svd()方法 https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html?highlight=svd#numpy.linalg.svd 这里三个参数的含义还有些不是很明确... """ def calc_svd(A): return np.linalg.svd(A, full_matrices=True, compute_uv=True) # %run -i -e test.py LinearRegressionTestCase.test_calc_svd # ### (选做) 5.2、利用奇异值分解对矩阵进行降维 # + # TODO 利用SVD进行对于矩阵进行降维 """ 利用SVD进行对于矩阵进行降维 参数 A: 给定的任意二维矩阵 list或者numpy数组形式 shape为(m,n) topk: 降维的维度 (m,n) -> (m,topk) 返回 降维后的矩阵 (m, topk) hint 1. 对角矩阵D存在一个较为明显的特性,就是D的对角线元素是递减的,这些元素实际上是衡量了所分解的矩阵U,V的列向量的重要性 2. 因此我们常说的svd降维就是利用选取的前topk大的对角线矩阵元素进行构造新的降维矩阵 3. U的按照前topk截取的列向量 * topk截取的对角矩阵 即为新的降维后的矩阵 """ def calc_svd_decompostion(A, topk = 2): pass # - # %run -i -e test.py LinearRegressionTestCase.test_calc_svd_decompostion # ### (选做) 5.3、利用奇异值分解对矩阵进行降维后重构 """ 利用SVD进行对于矩阵进行降维 参数 A: 给定的任意二维矩阵 list或者numpy数组形式 shape为(m,n) topk: 降维的维度 (m,n) -> (m,topk) 返回 降维重构后的矩阵 (m, n) hint 这里除了降维矩阵外,另外一个较为常见的应用就是对矩阵进行重构,具体的做法类似前面的思路 1. 选取对应的U,D,V的topk向量 2. U的按照前topk截取的列向量 * topk截取的对角矩阵 * V^T按照前topk截取的行向量(注意这里是V的转置,因为分解得到的是V^T) """ def calc_svd_reconsitution(A, topk = 2): pass # %run -i -e test.py LinearRegressionTestCase.test_calc_svd_reconsitution # ### (选做) 5.4、计算不同降维大小重构矩阵的Frobenius范数损失 # # 定义矩阵$A$以及使用SVD降维(降维大小为k)分解后的重构矩阵$A_k$,则这里的F范数损失定义如下: # # $$ \Large Loss_{F} = {\Vert A - A_k \Vert}_F $$ # # 这里需要编码求出对于给定的矩阵A 分别在不同的降维幅度下重构后的F范数损失,并且作出损失大小随着降维大小的变化图: ## 不要修改这里! import numpy as np from sklearn.datasets import load_boston import matplotlib.pyplot as plt # %matplotlib inline A = load_boston()['data'] # 载入boston house 数据集 print(A.shape) # + loss_hist = [] for topk in range(1,13): # 5.4 TODO ### 1.计算相应的SVD topk降维后的重构矩阵,需实现calc_svd_reconsitution ### 2.计算对应的F范数损失,并存储loss放入loss_hist列表中 ### 画出F损失随着降维大小的变化图 ### x坐标为对应的降维大小,y坐标为对应的F损失 plt.plot(range(1,13),loss_hist,'r--') plt.xlabel('decomposition size') plt.ylabel('F Loss') # - # ### 5.5、SVD的有趣应用 # 为了这个习题我准备了两张图,参见项目文件夹下的test_girl.jpg和test_boy.jpeg,自选一张,你需要 # - 需要使用 `PIL` 加载你所选择的图像([文档](https://pillow.readthedocs.io/en/latest/reference/Image.html)),所以记得导入需要的包(模块) # - 使用Image的[convert方法](https://pillow.readthedocs.io/en/latest/reference/Image.html#PIL.Image.Image.convert)将图像变为灰度图 # - 将convert后的结果转换成np.array,需用到[Image.getdata方法](https://pillow.readthedocs.io/en/latest/reference/Image.html#PIL.Image.Image.getdata)来读取图片每个pixel的数据,特别注意一下,对于彩色的图来说,即使我们转为了灰度图,但每一个pixel还是由RGB三个维度组成,所以在getdata时,band需要设定为某一个颜色index,比如band = 0,这样只用R这个维度的数据。用这个方法来保证图片的每个pixel只占有一个单元的空间。 # - 因为我们转np.array时破坏了原有图形的样子,变成了一个一维数据,我们要将转换后的np.array恢复到图片应有的size,转换后,可以shape确认下是否与最开始转出的灰度图的size一致,注意图的size是(宽,高),而宽对应array.shape的应该是列,而高对应的是行,别弄反了。 # - 使用上方实现的calc_svd函数计算上一步计算出的np.array数据,赋值给变量:U,D,VT # - 打印出U,D,VT的shape形状,尤其注意观察D的shape # - 在U,VT,D变量成功实现的情况下,运行测试程序看效果 # + # 5.5 DONE # 导入PIL中的Image模块 https://pillow.readthedocs.io/en/latest/reference/Image.html from PIL import Image # 打开图像文件 https://pillow.readthedocs.io/en/latest/reference/Image.html im2 = Image.open("test_girl.jpg") # 使用.convert()方法转换成灰度图 # mode参数为色彩模式:L是8位灰度 1是二值图等等... im2_gray = im2.convert(mode='L') # 转换成np.array 并选择只获取R红色的像素值 im2_gray_array = np.array(im2_gray.getdata(band=0)) # 恢复原有的尺寸 使用.reshape()方法和.size方法 # 注意要反转tuple size是宽*高 shape是行*列 用切片操作完成 # 参考:切片反转 https://www.cnblogs.com/debude/p/5368945.html reim2_gray_array = im2_gray_array.reshape(im2_gray.size[::-1]) # 计算svd奇异值分解 这里选取第二张图片 (U,D,VT) = calc_svd(reim2_gray_array) print("U的形状:", U.shape) print("D的形状:", D.shape) print("VT的形状:", VT.shape) # - #请在U,D,V变量完成的情况下调用此测试程序,不要修改此处 plt.figure(figsize=(16,6)) for i,topk in enumerate([5, 10, 15, 20, 30, 50]): reconstimg = np.matrix(U[:, :topk]) * np.diag(D[:topk]) * np.matrix(VT[:topk, :]) plt.subplot(231+i) plt.imshow(reconstimg, cmap='gray') title = "n = %s" % ((i+1)*5) plt.title(title) plt.show() # 相关继续深入学习的资料: # 1. [机器学习与优化](http://freemind.pluskid.org/series/mlopt/) # 2. [PCA与SVD的区别](https://www.zhihu.com/question/40043805/answer/138429562) # 3. [SVD在降维中的应用](https://www.cnblogs.com/pinard/p/6251584.html) # 4. [SVD在自然语言处理中的应用](https://blog.csdn.net/pipisorry/article/details/42560331) # 5. [SVD在推荐系统中的应用](https://tech.meituan.com/deep_understanding_of_ffm_principles_and_practices.html) # 6. [《Elements of Statistical Learning》<NAME>, <NAME>, and <NAME>](https://web.stanford.edu/~hastie/ElemStatLearn//)
P3_Linear_Algebra/p3_Linear_Algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import scipy as sp from scipy.signal import blackman from scipy.fft import rfft, rfftfreq import cospar from everest import window # %matplotlib inline # + from scipy.signal import blackman from scipy.fft import rfft, rfftfreq def time_smooth(x, *ys, sampleFactor = 1, kind = 'linear'): yield (ix := np.linspace(np.min(x), np.max(x), round(len(x) * sampleFactor))) for y in ys: yield sp.interpolate.interp1d(x, y, kind = kind)(ix) def time_fourier(x, *ys, sampleFactor = 1, interpKind = 'linear'): x, *ys = time_smooth(x, *ys, sampleFactor = sampleFactor, kind = interpKind) N = len(x) T = np.diff(x).mean() yield rfftfreq(N, T)[: N // 2] w = blackman(N) for y in ys: yield np.abs(rfft(y * w))[: N // 2] # + N = 600 T = 1.0 / 800.0 # x = np.sort(np.random.rand(N)) * N * T x = np.linspace(0.0, N*T, N, endpoint=False) y = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x) xf, yf = time_fourier(x, y, sampleFactor = 1) window.plot.line(xf, np.log10(yf), size = (12, 6)) # + N = 6000 T = 1.0 / 8000.0 # x = np.sort(np.random.rand(N)) * N * T x = np.linspace(0.0, N*T, N, endpoint=False) y = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x) xf, yf = time_fourier(x, y, sampleFactor = 1) window.plot.line(xf, np.log10(yf), size = (12, 6)) # - xf, yf = time_fourier(x, y, sampleFactor = 10) window.plot.line(xf, np.log10(yf), size = (12, 6)) # + import numpy as np import scipy as sp from scipy.signal import blackman from scipy.fft import rfft, rfftfreq def time_smooth(x, *ys, sampleFactor = 1, kind = 'linear'): yield (ix := np.linspace(np.min(x), np.max(x), round(len(x) * sampleFactor))) for y in ys: yield sp.interpolate.interp1d(x, y, kind = kind)(ix) def time_fourier(x, *ys): x, *ys = time_smooth(x, *ys) N = len(x) T = np.ptp(x) / N yield rfftfreq(N, T)[: N // 2] w = blackman(N) for y in ys: yield np.abs(rfft(y * w))[: N // 2] # - # Number of sample points N = 600 # sample spacing T = 1.0 / 800.0 x = np.linspace(0.0, N*T, N, endpoint=False) y = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x) xf, yf = time_fourier(x, y) import cospar from everest import window # %matplotlib inline window.plot.line(xf, np.log10(yf)) import os os.getcwd()
analysis/cospar/working_011b_fourierpractice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # # Plotly - Create Leaderboard # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Plotly/Plotly_Create_Leaderboard.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # **Tags:** #plotly #chart #horizontalbar #dataviz #snippet #operations #image #html # + [markdown] papermill={} tags=["naas", "awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # **Author:** [<NAME>](https://www.linkedin.com/in/ACoAABCNSioBW3YZHc2lBHVG0E_TXYWitQkmwog/) # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # Learn more on the Plotly doc : https://plotly.com/python/horizontal-bar-charts/ # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ## Input # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ### Import libraries # + papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] import plotly.express as px import pandas as pd # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ### Variables # + papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] title = "Leaderboard" # Output paths output_image = f"{title}.png" output_html = f"{title}.html" # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ## Model # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ### Get data model # + papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] data = [ {"LABEL": "A", "VALUE": 88}, {"LABEL": "B", "VALUE": 12}, {"LABEL": "C", "VALUE": 43}, {"LABEL": "D", "VALUE": 43}, {"LABEL": "E", "VALUE": 2}, {"LABEL": "F", "VALUE": 87}, {"LABEL": "G", "VALUE": 67}, {"LABEL": "H", "VALUE": 111}, {"LABEL": "I", "VALUE": 24}, {"LABEL": "J", "VALUE": 123}, ] df = pd.DataFrame(data) df = df.sort_values(by=["VALUE"], ascending=True) #Order will be reversed in plot df # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ### Create the plot # + papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] def create_barchart(df, label, value): last_value = '{:,.0f}'.format(df[value].sum()) fig = px.bar(df, y=label, x=value, orientation='h', text=value) fig.update_layout( title=f"<b>Ranking by label</b><br><span style='font-size: 13px;'>Total value: {last_value}</span>", title_font=dict(family="Arial", size=18, color="black"), legend_title="Packs", legend_title_font=dict(family="Arial", size=11, color="black"), legend_font=dict(family="Arial", size=10, color="black"), font=dict(family="Arial", size=12, color="black"), plot_bgcolor="#ffffff", width=1200, height=800, xaxis_title=None, xaxis_showticklabels=False, yaxis_title=None, margin_pad=10, margin_t=100, ) # Display fig config = {'displayModeBar': False} fig.show(config=config) return fig fig = create_barchart(df, "LABEL", "VALUE") # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ## Output # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ### Export in PNG and HTML # + papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] fig.write_image(output_image, width=1200) fig.write_html(output_html) # + [markdown] papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] # ### Generate shareable assets # + papermill={} tags=["awesome-notebooks/Plotly/Plotly_Create_Leaderboard.ipynb"] link_image = naas.asset.add(output_image) link_html = naas.asset.add(output_html, {"inline":True}) #-> Uncomment the line below to remove your assets # naas.asset.delete(output_image) # naas.asset.delete(output_html)
Plotly/Plotly_Create_Leaderboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Interactive 3D brownian motion with Vispy # + import numpy as np from vispy import geometry from vispy import scene from vispy import app import vispy import vispy.app.qt as vispyqt qtapp = vispy.app.use_app('pyqt4') QtCore = qtapp.backend_module.QtCore QtGui = qtapp.backend_module.QtGui # + class MainWindow(QtGui.QWidget): def __init__(self): QtGui.QWidget.__init__(self, None) self.setMinimumSize(800, 600) self.setWindowTitle('Test') self.canvas = vispyqt.QtSceneCanvas(keys='interactive', app=qtapp, parent=self, bgcolor='white') self.canvas.measure_fps(1, self.show_fps) self.view = self.canvas.central_widget.add_view() self.view.camera = scene.cameras.TurntableCamera() self.spheres = [] self.colors = ['red', 'red', 'green', 'green'] # Generate random motion for each spheres tmax = 100000 self.pos = np.random.randint(-1, 2, size=(tmax, len(self.colors), 3)) # Create spheres for i, color in enumerate(self.colors): position = self.pos[0, i] sphere = self.create_sphere(position, color) self.spheres.append(sphere) self.view.add(sphere) # Config slider widget according to random motions self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self) self.slider.valueChanged.connect(self.move) self.slider.setRange(0, tmax) self.slider.setSingleStep(1) # Layout vlayout = QtGui.QVBoxLayout(self) self.setLayout(vlayout) vlayout.addWidget(self.canvas.native, 0) vlayout.addWidget(self.slider, 1) # FPS - Does not work :-( self.text = scene.visuals.Text('fsdfsdfsdfsdfsd', font_size=200, color='black', pos=[10, 10], anchor_x='center', anchor_y='baseline') self.canvas.draw_visual(self.text) def create_sphere(self, position, color): mdata = geometry.create_sphere(64, 64, radius=0.5) sphere = scene.visuals.Mesh(meshdata=mdata, shading='flat', color=color) t = scene.transforms.AffineTransform() t.translate(position) sphere.transform = t return sphere def move(self, t): for i, sphere in enumerate(self.spheres): sphere.transform.translate(self.pos[t - 1, i] / 5) def show_fps(self, fps): str_fps = "FPS : {}".format(fps) print(str_fps) self.text.text = str_fps self.canvas.update() qtapp.create() win = MainWindow() win.show() qtapp.run()
Visualization/Vispy_Brownian/notebook.ipynb